code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
|---|---|---|---|---|---|
"""
Django settings for aestheticBlasphemy project.
Generated by 'django-admin startproject' using Django 1.9.6.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
import sys
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
PROJECT_PATH = os.path.abspath(os.path.dirname(__file__))
from .custom_settings import *
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '%7ohz4qftz(8@^kly*+l))7_8&e*0#$+!a1pqwu(2cg0qi&nqy'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = SITE_URLS
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
#Own
'django.contrib.humanize',
'django.contrib.sites',
'django.contrib.sitemaps',
'mptt',
#'treebeard',
'blogging',
'django_select2',
'crispy_forms',
'dashboard',
'meta_tags',
'django.contrib.redirects',
'allauth',
'allauth.account',
'allauth.socialaccount',
'allauth.socialaccount.providers.facebook',
'allauth.socialaccount.providers.google',
'allauth.socialaccount.providers.twitter',
'taggit',
'ckeditor',
'ckeditor_uploader',
'reversion',
'pl_messages',
'rest_framework',
'comments',
'django.contrib.flatpages',
'events',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'reversion.middleware.RevisionMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.contrib.redirects.middleware.RedirectFallbackMiddleware',
'django.contrib.flatpages.middleware.FlatpageFallbackMiddleware',
'reversion.middleware.RevisionMiddleware',
]
ROOT_URLCONF = 'aestheticBlasphemy.urls'
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(PROJECT_PATH, "templates")],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.i18n',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
'aestheticBlasphemy.context_processors.site_processor',
'aestheticBlasphemy.context_processors.getvars',
'django.template.context_processors.media',
'django.template.context_processors.csrf',
'pl_messages.context_processor.notifications'
],
'debug': DEBUG,
},
},
]
WSGI_APPLICATION = 'aestheticBlasphemy.wsgi.application'
SESSION_SERIALIZER = 'django.contrib.sessions.serializers.JSONSerializer'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
#'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
'NAME': DB_BASENAME,
'HOST': '127.0.0.1',
'USER': DB_NAME,
'PASSWORD': DB_PASSWORD,
'PORT': '3306',
'TIME_ZONE': 'Asia/Kolkata',
}
}
AUTHENTICATION_BACKENDS = (
# Needed to login by username in Django admin, regardless of `allauth`
'django.contrib.auth.backends.ModelBackend',
# `allauth` specific authentication methods, such as login by e-mail
'allauth.account.auth_backends.AuthenticationBackend',
)
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en'
TIME_ZONE = TIME_ZONE_VALUE
USE_I18N = False
#USE_L10N = True
USE_TZ = True
SITE_ID = 1
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
#STATIC_URL = '/static/'
#STATIC_ROOT = os.path.join(BASE_DIR,'static')
STATICFILES_DIRS = [
os.path.join(PROJECT_PATH, 'static'),
]
#MEDIA_ROOT = os.path.join(BASE_DIR,'media')
#MEDIA_URL = '/media/'
AUTHENTICATION_BACKENDS = [
# Needed to login by username in Django admin, regardless of `allauth`
'django.contrib.auth.backends.ModelBackend',
# `allauth` specific authentication methods, such as login by e-mail
'allauth.account.auth_backends.AuthenticationBackend',
]
LOGIN_REDIRECT_URL = '/'
#
#LOGGING = {
# 'version': 1,
# 'disable_existing_loggers': False,
# 'handlers': {
# 'file': {
# 'level': 'DEBUG',
# 'class': 'logging.FileHandler',
# 'filename': os.path.join(PROJECT_PATH, 'static/logs'),
# },
# },
# 'loggers': {
# 'django': {
# 'handlers': ['file'],
# 'level': 'DEBUG',
# 'propagate': True,
# },
# },
#}
CRISPY_TEMPLATE_PACK = 'bootstrap4'
EMAIL_SUBJECT_PREFIX = '[PirateLearner]'
EMAIL_HOST = 'smtp.webfaction.com'
EMAIL_HOST_USER = 'pirate_learner_mailbox'
EMAIL_HOST_PASSWORD = 'pirate@world'
SERVER_EMAIL = 'rai812@web379.webfaction.com'
EMAIL_USE_TLS = True
EMAIL_PORT = 25
#Comments App settings
COMMENT_MODERATION_ENABLED = True
CKEDITOR_UPLOAD_PATH = 'images/'
CKEDITOR_CONFIGS = {
'default': {
'toolbar': [
["Format", "Bold", "Italic", "Underline", "Strike", "Blockquote","Subscript", "Superscript", "SpellChecker"],
[ "Indent", "Outdent", 'JustifyLeft', 'JustifyCenter',
'JustifyRight', 'JustifyBlock'],
["Image", "Table", "Link", "Unlink", "Anchor", "SectionLink",'NumberedList', 'BulletedList', 'HorizontalRule', 'CreateDiv'],
['Undo', 'Redo'], ["Source", 'RemoveFormat','Iframe'],["Maximize"],['ShowBlocks', 'Syntaxhighlight', 'Mathjax'],
],
'contentsCss': STATIC_URL+'css/bootstrap.css',
'codemirror' : {
# Set this to the theme you wish to use (codemirror themes)
'theme': 'default',
# Whether or not you want to show line numbers
'lineNumbers': 'true',
# Whether or not you want to use line wrapping
'lineWrapping': 'true',
# Whether or not you want to highlight matching braces
'matchBrackets': 'true',
# Whether or not you want tags to automatically close themselves
'autoCloseTags': 'false',
# Whether or not you want Brackets to automatically close themselves
'autoCloseBrackets': 'false',
# Whether or not to enable search tools, CTRL+F (Find), CTRL+SHIFT+F (Replace), CTRL+SHIFT+R (Replace All), CTRL+G (Find Next), CTRL+SHIFT+G (Find Previous)
'enableSearchTools': 'true',
# Whether or not you wish to enable code folding (requires 'lineNumbers' to be set to 'true')
'enableCodeFolding': 'true',
# Whether or not to enable code formatting
'enableCodeFormatting': 'false',
# Whether or not to automatically format code should be done when the editor is loaded
'autoFormatOnStart': 'false',
# Whether or not to automatically format code should be done every time the source view is opened
'autoFormatOnModeChange': 'false',
# Whether or not to automatically format code which has just been uncommented
'autoFormatOnUncomment': 'false',
# Define the language specific mode 'htmlmixed' for html including (css, xml, javascript), 'application/x-httpd-php' for php mode including html, or 'text/javascript' for using java script only
'mode': 'htmlmixed',
# Whether or not to show the search Code button on the toolbar
'showSearchButton': 'true',
# Whether or not to show Trailing Spaces
'showTrailingSpace': 'true',
# Whether or not to highlight all matches of current word/selection
'highlightMatches': 'true',
# Whether or not to show the format button on the toolbar
'showFormatButton': 'true',
# Whether or not to show the comment button on the toolbar
'showCommentButton': 'true',
# Whether or not to show the uncomment button on the toolbar
'showUncommentButton': 'true',
#Whether or not to show the showAutoCompleteButton button on the toolbar
'showAutoCompleteButton': 'true',
# Whether or not to highlight the currently active line
'styleActiveLine': 'true'
},
'disallowedContent':{
'p h1 h2 h3 h4 span blockquote':{
#Disallow setting font-family or font-size
'styles':['font*'],
},
},
'allowedContent':{
'*': {
'attributes': ['id', 'itemprop', 'title', 'placeholder', 'type', 'data-*'],
'classes':['text-center', 'text-left', 'text-right', 'text-justify', 'center-text', 'text-muted',
'align-center', 'pull-left', 'pull-right', 'center-block', 'media', 'image',
'list-unstyled', 'list-inline',
'language-*', '*',
],
},
'p': {
'attributes': ['id'],
},
'h1 h2 h3 h4 em i b strong caption h5 h6 u s br hr': 'true',
'a': {
'attributes': ['!href','target','name', 'id', 'name', 'rel'],
},
'img':{
#Do not allow image height and width styles
'attributes': ['!src', 'alt', 'id'],
},
'span ul ol li sup sub': 'true',
'div':{
'classes':'*',
},
'iframe':{
'classes':'*',
'attributes':'*',
},
'small abbr address footer section article dl dt dd kbd var samp form label input button textarea fieldset':'true',
'pre':{
'attributes': ['title'],
'classes':['*']
},
'code': 'true',
'blockquote':'true',
'table':'true',
'tr':'true',
'th':'true',
'td':'true',
},
'justifyClasses': ['text-left', 'text-center', 'text-right', 'text-justify'],
'extraPlugins': 'button,toolbar,codesnippet,about,stylescombo,richcombo,floatpanel,panel,button,listblock,dialog,dialogui,htmlwriter,removeformat,horizontalrule,widget,lineutils,mathjax,div,fakeobjects,iframe,image2,justify,blockquote,indent,indentlist,indentblock',
'ignoreEmptyParagraph': 'true',
'coreStyles_bold': {
'element': 'b',
'overrides': 'strong',
},
'coreStyles_italic':{
'element':'i',
'overrides':'em',
},
#'fillEmptyBlocks':'false',#Might need a callback fn
'image2_alignClasses':['pull-left','text-center','pull-right'],
'mathJaxClass':'math-tex',
'mathJaxLib':STATIC_URL+'js/MathJax/MathJax.js?config=TeX-AMS-MML_HTMLorMML',
'tabSpaces':'4',
'indentClasses': ['col-xs-offset-1', 'col-xs-offset-2', 'col-xs-offset-3', 'col-xs-offset-4'],
},
'author': {
'toolbar': [
["Format", "Bold", "Italic", "Underline", "Strike", "Blockquote","Subscript", "Superscript", "SpellChecker"],
[ "Indent", "Outdent", 'JustifyLeft', 'JustifyCenter',
'JustifyRight', 'JustifyBlock'],
["Image", "Table", "Link", "Unlink", "Anchor", "SectionLink",'NumberedList', 'BulletedList', 'HorizontalRule', 'CreateDiv'],
['Undo', 'Redo'], ["Source", 'RemoveFormat','Iframe'],["Maximize"],['ShowBlocks', 'Syntaxhighlight', 'Mathjax'],
],
'contentsCss': STATIC_URL+'css/bootstrap.css',
'codemirror' : {
# Set this to the theme you wish to use (codemirror themes)
'theme': 'default',
# Whether or not you want to show line numbers
'lineNumbers': 'true',
# Whether or not you want to use line wrapping
'lineWrapping': 'true',
# Whether or not you want to highlight matching braces
'matchBrackets': 'true',
# Whether or not you want tags to automatically close themselves
'autoCloseTags': 'false',
# Whether or not you want Brackets to automatically close themselves
'autoCloseBrackets': 'false',
# Whether or not to enable search tools, CTRL+F (Find), CTRL+SHIFT+F (Replace), CTRL+SHIFT+R (Replace All), CTRL+G (Find Next), CTRL+SHIFT+G (Find Previous)
'enableSearchTools': 'true',
# Whether or not you wish to enable code folding (requires 'lineNumbers' to be set to 'true')
'enableCodeFolding': 'true',
# Whether or not to enable code formatting
'enableCodeFormatting': 'false',
# Whether or not to automatically format code should be done when the editor is loaded
'autoFormatOnStart': 'false',
# Whether or not to automatically format code should be done every time the source view is opened
'autoFormatOnModeChange': 'false',
# Whether or not to automatically format code which has just been uncommented
'autoFormatOnUncomment': 'false',
# Define the language specific mode 'htmlmixed' for html including (css, xml, javascript), 'application/x-httpd-php' for php mode including html, or 'text/javascript' for using java script only
'mode': 'htmlmixed',
# Whether or not to show the search Code button on the toolbar
'showSearchButton': 'true',
# Whether or not to show Trailing Spaces
'showTrailingSpace': 'true',
# Whether or not to highlight all matches of current word/selection
'highlightMatches': 'true',
# Whether or not to show the format button on the toolbar
'showFormatButton': 'true',
# Whether or not to show the comment button on the toolbar
'showCommentButton': 'true',
# Whether or not to show the uncomment button on the toolbar
'showUncommentButton': 'true',
#Whether or not to show the showAutoCompleteButton button on the toolbar
'showAutoCompleteButton': 'true',
# Whether or not to highlight the currently active line
'styleActiveLine': 'true'
},
'disallowedContent':{
'p h1 h2 h3 h4 span blockquote':{
#Disallow setting font-family or font-size
'styles':['font*'],
},
},
'allowedContent':{
'*': {
'attributes': ['id', 'itemprop', 'title', 'placeholder', 'type', 'data-*'],
'classes':['text-center', 'text-left', 'text-right', 'text-justify', 'center-text', 'text-muted',
'align-center', 'pull-left', 'pull-right', 'center-block', 'media', 'image',
'list-unstyled', 'list-inline',
'language-*', '*',
],
},
'p': {
'attributes': ['id'],
},
'h1 h2 h3 h4 em i b strong caption h5 h6 u s br hr': 'true',
'a': {
'attributes': ['!href','target','name', 'id', 'name', 'rel'],
},
'img':{
#Do not allow image height and width styles
'attributes': ['!src', 'alt', 'id'],
},
'span ul ol li sup sub': 'true',
'div':{
'classes':'*',
},
'iframe':{
'classes':'*',
'attributes':'*',
},
'small abbr address footer section article dl dt dd kbd var samp form label input button textarea fieldset':'true',
'pre':{
'attributes': ['title'],
'classes':['*']
},
'code': 'true',
'blockquote':'true',
'table':'true',
'tr':'true',
'th':'true',
'td':'true',
},
'justifyClasses': ['text-left', 'text-center', 'text-right', 'text-justify'],
'extraPlugins': 'button,toolbar,codesnippet,about,stylescombo,richcombo,floatpanel,panel,button,listblock,dialog,dialogui,htmlwriter,removeformat,horizontalrule,widget,lineutils,mathjax,div,fakeobjects,iframe,image2,justify,blockquote,indent,indentlist,indentblock',
'ignoreEmptyParagraph': 'true',
'coreStyles_bold': {
'element': 'b',
'overrides': 'strong',
},
'coreStyles_italic':{
'element':'i',
'overrides':'em',
},
#'fillEmptyBlocks':'false',#Might netext-centerk fn
'image2_alignClasses':['pull-left','text-center','pull-right'],
'mathJaxClass':'math-tex',
'mathJaxLib':STATIC_URL+'js/MathJax/MathJax.js?config=TeX-AMS-MML_HTMLorMML',
'tabSpaces':'4',
'indentClasses': ['col-xs-offset-1', 'col-xs-offset-2', 'col-xs-offset-3', 'col-xs-offset-4'],
},
}
CKEDITOR_RESTRICT_BY_USER=True
REST_FRAMEWORK = {
# Use hyperlinked styles by default.
# Only used if the `serializer_class` attribute is not set on a view.
'DEFAULT_MODEL_SERIALIZER_CLASS':
'rest_framework.serializers.HyperlinkedModelSerializer',
# Use Django's standard `django.contrib.auth` permissions,
# or allow read-only access for unauthenticated users.
'DEFAULT_PERMISSION_CLASSES': [
'rest_framework.permissions.DjangoModelPermissionsOrAnonReadOnly'
],
'DEFAULT_RENDERER_CLASSES': [
'rest_framework.renderers.JSONRenderer',
'rest_framework.renderers.BrowsableAPIRenderer',
'rest_framework.renderers.TemplateHTMLRenderer',
],
'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.LimitOffsetPagination',
}
META_SITE_PROTOCOL = 'https'
# META_SITE_DOMAIN = 'pirateLearner.com' using META_USE_SITE SETTING
META_SITE_TYPE = 'article' # override when passed in __init__
META_SITE_NAME = 'Aesthetic Blasphemy'
#META_INCLUDE_KEYWORDS = [] # keyword will be included in every article
#META_DEFAULT_KEYWORDS = [] # default when no keyword is provided in __init__
#META_IMAGE_URL = '' # Use STATIC_URL
META_USE_OG_PROPERTIES = True
META_USE_TWITTER_PROPERTIES = True
META_USE_GOOGLEPLUS_PROPERTIES = True
META_USE_SITES = True
META_PUBLISHER_FB_ID = 'https://www.facebook.com/PirateLearner' # can use PAGE URL or Publisher id ID
META_PUBLISHER_GOOGLE_ID = 'https://plus.google.com/116465481265465787624' # Google+ ID
META_FB_APP_ID = ''
#blogging app settings
BLOGGING_MAX_ENTRY_PER_PAGE = 5
BLOGGING_CSS_FRAMEWORK = 'bootstrap4'
BLOGGING_USE_REVERSION = False
LOGGING = {
'version': 1,
'disable_existing_loggers': True,
'formatters': {
'verbose': {
'format': '[%(asctime)-12s] %(message)s',
'datefmt': '%b %d %H:%M:%S'
},
'simple': {
'format': '%(message)s'
},
},
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler',
'include_html': True,
},
'console': {
'class': 'logging.StreamHandler',
'stream': sys.stderr,
},
'file': {
'level': 'INFO',
'class': 'logging.FileHandler',
#'filename': STATIC_ROOT+'/logging/log.txt',
'filename': '/home/craft/projects/aestheticblasphemy/aestheticBlasphemy/logs/abLog.txt',
},
},
'loggers': {
'django': {
'handlers': ['console'],
'level': 'ERROR',
'propagate': True,
},
'PirateLearner': {
'handlers': ['console'],
'level': 'DEBUG',
},
}
}
|
aestheticblasphemy/aestheticBlasphemy
|
aestheticBlasphemy/settings.py
|
Python
|
gpl-3.0
| 24,621
|
#!/usr/bin/env python3
# -*-coding:Utf-8 -*
'''Some code to compute the abuncance matching'''
import pyfits
import numpy as np
import matplotlib.pyplot as plt
import scipy
from astropy.cosmology import Planck15 as cosmo
#exec(open('TestCosmos2015.py').read())
def comov_volume(omega_sample, zmin, zmax):
"Compute the comoving volume between two redshifts in a solid angle."
V = omega_sample/41253.0*(cosmo.comoving_volume(zmax)-cosmo.comoving_volume(
zmin))
return V
""" For Laigle Catalog"""
"""Redshift selection"""
"""
zmin = 0.3
zmax = 0.7
zbin = galdata[galdata['PHOTOZ']>zmin]
zbin = zbin[zbin['PHOTOZ']<zmax]
omega_sample = 1.2
V = comov_volume(omega_sample, zmin, zmax)
#Compute abundances
n = 100 #number of mass bins for our graph
mmingal = zbin['MASS_MED'].min()
mmaxgal = zbin['MASS_MED'].max()
step = (mmaxgal-mmingal)/n #resolution
h = cosmo.h
N = np.empty(n)
for i in range(n):
"Compute the number of galaxies more massive than m for each mass bin"
N[i] = np.sum(zbin['MASS_MED']>(mmingal+step*i)) / (V*h*h*h)
"""
"""For Jean Coupon Catalog """
zmin = 0.2
zmax=0.5
zbin = tdata[tdata['photo_z']>zmin]
zbin = zbin[zbin['photo_z']<=zmax]
n = 500 #number of mass bins for our graph
mmingal = zbin['mstar_cosmo'].min()
mmaxgal = zbin['mstar_cosmo'].max()
step = (mmaxgal-mmingal)/n #resolution
omega_sample = 1.2
V = comov_volume(omega_sample, zmin, zmax)
# # Compute Density for a linear scale
Ngal = np.empty(n)
for i in range(n):
"Compute the number of galaxies more massive than m for each mass bin"
Ngal[i] = np.sum(zbin['mstar_cosmo']>(mmingal+step*i)) / V.value
#Compute density for a log scale
Ngallog = np.empty(n)
massbinlog = np.logspace(mmingal, mmaxgal, num=n)
for i in range(n):
"Compute the number of galaxies more massive than m for each mass bin"
Ngallog[i] = np.sum(zbin['mstar_cosmo']>np.log10(massbinlog[i])) / V.value
#Plots
# plt.plot(np.linspace(mmingal, mmaxgal, num=n),Ngal)
# plt.ylabel('N(>m), $Mpc^{-3}$')
# plt.xlabel('Stellar Mass, $log( M_{*} / M_{\odot})$')
# plt.title('Abundance for Jean Coupon Catalog')
# plt.show()
# plt.loglog(massbinlog, Ngallog)
# plt.ylabel('log( N(>m) $Mpc^{-3})$')
# plt.xlabel('$log( M_{*} / M_{\odot})$')
# plt.title('Abundance for Jean Coupon Catalog')
# plt.show()
|
Gorbagzog/StageIAP
|
AbundanceGalaxies.py
|
Python
|
gpl-3.0
| 2,309
|
#! /usr/bin/env python
# dnmapR_client.py is a revised and updated version of dnmap_client.py
# GPL v3
# Opsdisk LLC | opsdisk.com
# dnmap version modified: .6
# http://sourceforge.net/projects/dnmap
# Copyright (C) 2009 Sebastian Garcia
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
#
# Author:
# Sebastian Garcia eldraco@gmail.com
#
# Based on code from Twisted examples.
# Copyright (c) Twisted Matrix Laboratories.
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
import argparse
import os
import random
import shlex
import sys
import time
from subprocess import Popen
from subprocess import PIPE
# openssl check
try:
from OpenSSL import SSL
except:
print '[-] Python openssl library required: apt-get install python-openssl'
exit(-1)
# twisted check
try:
from twisted.internet.protocol import ClientFactory, ReconnectingClientFactory
from twisted.protocols.basic import LineReceiver
from twisted.internet import ssl, reactor
except:
print '[-] Python twisted library required: apt-get install python-twisted'
exit(-1)
vernum = '1.0'
def check_clean(line):
global debug
try:
outboundChars = [';', '#', '`']
ret = True
for char in outboundChars:
if char in line:
ret = False
return ret
except Exception as inst:
print '[-] Problem in dataReceived function'
print type(inst)
print inst.args
print inst
class NmapClient(LineReceiver):
def connectionMade(self):
global clientID
global alias
global debug
print '[+] Client connected succesfully...waiting for more commands.'
if debug:
print '[+] Your client ID is: {0}, and your alias is: {1}'.format(str(clientID), str(alias))
euid = os.geteuid()
# Do not send the euid, just tell if we are root or not.
if euid == 0:
# True
iamroot = 1
else:
# False
iamroot = 0
# 'Client ID' text must be sent to receive another command
line = 'Starts the Client ID:{0}:Alias:{1}:Version:{2}:ImRoot:{3}'.format(str(clientID), str(alias), vernum, iamroot)
if debug:
print '[*] Line sent: {0}'.format(line)
self.sendLine(line)
#line = 'Send more commands to Client ID:{0}:Alias:{1}:\0'.format(str(clientID), str(alias))
line = 'Send more commands'
if debug:
print '[*] Line sent: {0}'.format(line)
self.sendLine(line)
def dataReceived(self, line):
global debug
global clientID
global alias
# If a wait is received, just wait.
if 'Wait' in line:
sleeptime = int(line.split(':')[1])
time.sleep(sleeptime)
# Ask for more
#line = 'Send more commands to Client ID:{0}:Alias:{1}:'.format(str(clientID),str(alias))
line = 'Send more commands'
if debug:
print '[*] Line sent: {0}'.format(line)
self.sendLine(line)
else:
# dataReceived does not wait for end of lines, CR, or LF
if debug:
print "\tCommand Received: {0}".format(line.strip('\n').strip('\r'))
# A little bit of protection from the server
if check_clean(line):
# Store the nmap output file so we can send it to the server later
try:
nmapOutputFile = line.split('-oA ')[1].split(' ')[0].strip(' ')
except IndexError:
randomFileName = str(random.randrange(0, 100000000, 1))
print '[-] No -oA argument, adding it to keep results. Added -oA ' + randomFileName
line = line + '-oA ' + randomFileName
nmapOutputFile = line.split('-oA ')[1].split(' ')[0].strip(' ')
try:
nameReturnCode = -1
# Check for rate commands
# Verify that the server is NOT trying to force us to be faster. NMAP PARAMETER DEPENDENCE
if 'min-rate' in line:
tempVect = shlex.split(line)
wordIndex = tempVect.index('--min-rate')
# Just delete the --min-rate parameter with its value
nmapCommand = tempVect[0:wordIndex] + tempVect[wordIndex + 1:]
else:
nmapCommand = shlex.split(line)
# Do we have to add a max-rate parameter?
'''if maxRate:
nmapCommand.append('--max-rate')
nmapCommand.append(str((maxRate)))'''
# Strip the command, so we can control that only nmap is really executed
nmapCommand = nmapCommand[1:]
nmapCommand.insert(0, 'nmap')
# Recreate the final command and display it
nmapCommandString = ''
for i in nmapCommand:
nmapCommandString = nmapCommandString + i + ' '
print "\tExecuted command: {0}".format(nmapCommandString)
nmapProcess = Popen(nmapCommand, stdout=PIPE)
rawNmapOutput = nmapProcess.communicate()[0]
nameReturnCode = nmapProcess.returncode
except OSError:
print '[-] nmap is not installed: apt-get install nmap'
exit(-1)
except ValueError:
rawNmapOutput = '[-] Invalid nmap arguments.'
print rawNmapOutput
except Exception as inst:
print '[-] Problem in dataReceived function'
print type(inst)
print inst.args
print inst
if nameReturnCode >= 0:
# nmap ended ok
# Tell the server that we are sending the nmap output
print '\tSending output to the server...'
#line = 'Nmap Output File:{0}:{1}:{2}:'.format(nmapOutputFile.strip('\n').strip('\r'),str(clientID),str(alias))
line = 'nmap output file:{0}:'.format(nmapOutputFile.strip('\n').strip('\r'))
if debug:
print '[*] Line sent: {0}'.format(line)
self.sendLine(line)
self.sendLine(rawNmapOutput)
#line = 'Nmap Output Finished:{0}:{1}:{2}:'.format(nmapOutputFile.strip('\n').strip('\r'),str(clientID),str(alias))
line = 'nmap output finished:{0}:'.format(nmapOutputFile.strip('\n').strip('\r'))
if debug:
print '[*] Line sent: {0}'.format(line)
self.sendLine(line)
# Move nmap output files to it's directory
os.system('mv *.nmap nmap_output > /dev/null 2>&1')
os.system('mv *.gnmap nmap_output > /dev/null 2>&1')
os.system('mv *.xml nmap_output > /dev/null 2>&1')
# Ask for another command.
# 'Client ID' text must be sent to receive another command
print '[*] Waiting for more commands...'
#line = 'Send more commands to Client ID:{0}:Alias:{1}:'.format(str(clientID),str(alias))
line = 'Send more commands'
if debug:
print '[*] Line sent: {0}'.format(line)
self.sendLine(line)
else:
# Unknown command sent to client
print '[!] Unknown command sent to this client: {0}'.format(line)
line = 'Send more commands'
if debug:
print '[*] Line sent: {0}'.format(line)
self.sendLine(line)
class NmapClientFactory(ReconnectingClientFactory):
try:
protocol = NmapClient
def startedConnecting(self, connector):
print '[+] Attempting connection to server'
def clientConnectionFailed(self, connector, reason):
print '[-] Connection failed: ', reason.getErrorMessage()
# Try to reconnect
print '[*] Trying to reconnect. Please wait...'
ReconnectingClientFactory.clientConnectionLost(self, connector, reason)
def clientConnectionLost(self, connector, reason):
print '[-] Connection lost. Reason: {0}'.format(reason.getErrorMessage())
# Try to reconnect
print '[*] Trying to reconnect in 10 secs. Please wait...'
ReconnectingClientFactory.clientConnectionLost(self, connector, reason)
except Exception as inst:
print '[-] Problem in NmapClientFactory'
print type(inst)
print inst.args
print inst
def process_commands():
global serverIP
global serverPort
global clientID
global factory
try:
print '[+] Client started...'
# Generate a unique client ID
clientID = str(random.randrange(0, 100000000, 1))
# Create the output directory
print '[*] Nmap output files stored in \'nmap_output\' directory...'
if not os.path.exists('./nmap_output'):
os.system('mkdir nmap_output > /dev/null 2>&1')
factory = NmapClientFactory()
# Do not wait more that 10 seconds between re-connections
factory.maxDelay = 10
reactor.connectSSL(str(serverIP), int(serverPort), factory, ssl.ClientContextFactory())
reactor.run()
except Exception as inst:
print '[-] Problem in process_commands function'
print type(inst)
print inst.args
print inst
def network_port_type(data):
if int(data) >= 0 and int(data) <= 65535:
return int(data)
else:
raise argparse.ArgumentTypeError("{} is not a valid TCP port".format(data))
def main():
global serverIP
global serverPort
global alias
global debug
#global maxRate
parser = argparse.ArgumentParser(description='dnmapR_client version ' + vernum)
parser.add_argument('-s', dest='serverip', action='store', default='127.0.0.1', help='Server IP to connect to. Default is 127.0.0.1')
parser.add_argument('-p', dest='serverport', action='store', default=46001, type=network_port_type, help='Server port to connect to. Default is 46001.')
parser.add_argument('-a', dest='alias', action='store', default='anonymous', help='Alias for this client.')
#parser.add_argument('-m', dest='maxrate', action='store', default=10000, help='Force dnmapR_client to use at most this rate. Useful to slow nmap down. Adds the --max-rate parameter.')
parser.add_argument('-d', dest='debug', action='store_true', default=False, help='Debugging')
args = parser.parse_args()
serverIP = args.serverip
serverPort = args.serverport
alias = args.alias.strip('\n').strip('\r').strip(' ')
#maxRate = args.maxrate
debug = args.debug
try:
# Start connecting
process_commands()
except KeyboardInterrupt:
# Handle CTRL-C interrupt.
print "[!] Keyboard interrupt detected...exiting."
sys.exit(1)
if __name__ == '__main__':
main()
|
opsdisk/dnmapR
|
dnmapR_client.py
|
Python
|
gpl-3.0
| 12,638
|
#!/usr/bin/env python
# This file is part of nexdatas - Tango Server for NeXus data writer
#
# Copyright (C) 2012-2017 DESY, Jan Kotanski <jkotan@mail.desy.de>
#
# nexdatas is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# nexdatas is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with nexdatas. If not, see <http://www.gnu.org/licenses/>.
# \package test nexdatas
# \file Checkers.py
# checkers for unittests
#
import os
import random
import binascii
import time
import numpy
import sys
from nxswriter import Types
from math import exp
if sys.version_info > (3,):
unicode = str
long = int
else:
bytes = str
# checks for scalar attributes
class Checker(object):
# constructor
# \param testCase TestCase instance
def __init__(self, testCase):
# test case
self._tc = testCase
try:
# random seed
self.seed = long(binascii.hexlify(os.urandom(16)), 16)
except NotImplementedError:
self.seed = long(time.time() * 256) # use fractional seconds
# self.seed = 113927724434234094860192629108901591122
self.__rnd = random.Random(self.seed)
# checks field tree
# \param f pninx file object
# \param fname file name
# \param children number of detector children
# \returns detector group object
def checkFieldTree(self, f, fname, children):
# self._tc.assertEqual("%s/%s" % ( os.getcwd(), f.name), fname)
f = f.root()
self._tc.assertEqual(5, len(f.attributes))
self._tc.assertEqual(f.attributes["file_name"][...], fname)
# f = f.root()
self._tc.assertTrue(f.attributes["NX_class"][...], "NXroot")
self._tc.assertEqual(f.size, 2)
en = f.open("entry1")
self._tc.assertTrue(en.is_valid)
self._tc.assertEqual(en.name, "entry1")
self._tc.assertEqual(len(en.attributes), 1)
self._tc.assertEqual(en.size, 1)
at = en.attributes["NX_class"]
self._tc.assertTrue(at.is_valid)
self._tc.assertTrue(hasattr(at.shape, "__iter__"))
try:
self._tc.assertEqual(len(at.shape), 1)
self._tc.assertEqual(at.shape, (1,))
except Exception:
self._tc.assertEqual(len(at.shape), 0)
self._tc.assertEqual(at.dtype, "string")
self._tc.assertEqual(at.name, "NX_class")
self._tc.assertEqual(at[...], "NXentry")
ins = en.open("instrument")
self._tc.assertTrue(ins.is_valid)
self._tc.assertEqual(ins.name, "instrument")
self._tc.assertEqual(len(ins.attributes), 1)
self._tc.assertEqual(ins.size, 1)
at = ins.attributes["NX_class"]
self._tc.assertTrue(at.is_valid)
self._tc.assertTrue(hasattr(at.shape, "__iter__"))
try:
self._tc.assertEqual(len(at.shape), 1)
self._tc.assertEqual(at.shape, (1,))
except Exception:
self._tc.assertEqual(len(at.shape), 0)
self._tc.assertEqual(at.dtype, "string")
self._tc.assertEqual(at.name, "NX_class")
self._tc.assertEqual(at[...], "NXinstrument")
det = ins.open("detector")
self._tc.assertTrue(det.is_valid)
self._tc.assertEqual(det.name, "detector")
self._tc.assertEqual(len(det.attributes), 1)
self._tc.assertEqual(det.size, children)
at = det.attributes["NX_class"]
self._tc.assertTrue(at.is_valid)
self._tc.assertTrue(hasattr(at.shape, "__iter__"))
try:
self._tc.assertEqual(len(at.shape), 1)
self._tc.assertEqual(at.shape, (1,))
except Exception:
self._tc.assertEqual(len(at.shape), 0)
self._tc.assertEqual(at.dtype, "string")
self._tc.assertEqual(at.name, "NX_class")
self._tc.assertEqual(at[...], "NXdetector")
return det
# checks attribute tree
# \param f pninx file object
# \param fname file name
# \param gattributes number of group attributes
# \param fattributes number of field attributes
# \returns detector group object
def checkAttributeTree(self, f, fname, gattributes, fattributes):
# self._tc.assertEqual("%s/%s" % ( os.getcwd(), f.name), fname)
f = f.root()
self._tc.assertEqual(5, len(f.attributes))
self._tc.assertEqual(f.attributes["file_name"][...], fname)
self._tc.assertTrue(f.attributes["NX_class"][...], "NXroot")
self._tc.assertEqual(f.size, 2)
en = f.open("entry1")
self._tc.assertTrue(en.is_valid)
self._tc.assertEqual(en.name, "entry1")
self._tc.assertEqual(len(en.attributes), 1)
self._tc.assertEqual(en.size, 1)
at = en.attributes["NX_class"]
self._tc.assertTrue(at.is_valid)
self._tc.assertTrue(hasattr(at.shape, "__iter__"))
try:
self._tc.assertEqual(len(at.shape), 1)
self._tc.assertEqual(at.shape, (1,))
except Exception:
self._tc.assertEqual(len(at.shape), 0)
self._tc.assertEqual(at.dtype, "string")
self._tc.assertEqual(at.name, "NX_class")
self._tc.assertEqual(at[...], "NXentry")
ins = en.open("instrument")
self._tc.assertTrue(ins.is_valid)
self._tc.assertEqual(ins.name, "instrument")
self._tc.assertEqual(len(ins.attributes), 1)
self._tc.assertEqual(ins.size, 2)
at = ins.attributes["NX_class"]
self._tc.assertTrue(at.is_valid)
self._tc.assertTrue(hasattr(at.shape, "__iter__"))
try:
self._tc.assertEqual(len(at.shape), 1)
self._tc.assertEqual(at.shape, (1,))
except Exception:
self._tc.assertEqual(len(at.shape), 0)
self._tc.assertEqual(at.dtype, "string")
self._tc.assertEqual(at.name, "NX_class")
self._tc.assertEqual(at[...], "NXinstrument")
det = ins.open("detector")
self._tc.assertTrue(det.is_valid)
self._tc.assertEqual(det.name, "detector")
self._tc.assertEqual(len(det.attributes), 1 + gattributes)
self._tc.assertEqual(det.size, 0)
det = ins.open("detector")
self._tc.assertTrue(det.is_valid)
self._tc.assertEqual(det.name, "detector")
self._tc.assertEqual(len(det.attributes), 1 + gattributes)
self._tc.assertEqual(det.size, 0)
field = ins.open("counter")
self._tc.assertTrue(field.is_valid)
self._tc.assertEqual(field.name, "counter")
self._tc.assertEqual(len(field.attributes), 1 + fattributes)
at = det.attributes["NX_class"]
self._tc.assertTrue(at.is_valid)
self._tc.assertTrue(hasattr(at.shape, "__iter__"))
try:
self._tc.assertEqual(len(at.shape), 1)
self._tc.assertEqual(at.shape, (1,))
except Exception:
self._tc.assertEqual(len(at.shape), 0)
self._tc.assertEqual(at.dtype, "string")
self._tc.assertEqual(at.name, "NX_class")
self._tc.assertEqual(at[...], "NXdetector")
return det, field
# checks scalar attributer
# \param det detector group
# \param name field name
# \param dtype numpy type
# \param values original values
# \param error data precision
def checkScalarAttribute(self, det, name, dtype, values, error=0):
cnt = det.attributes[name]
self._tc.assertTrue(cnt.is_valid)
self._tc.assertEqual(cnt.name, name)
self._tc.assertTrue(hasattr(cnt.shape, "__iter__"))
# print name, "SHAPE", cnt.shape,len(cnt.shape)
try:
self._tc.assertEqual(len(cnt.shape), 1)
self._tc.assertEqual(cnt.shape, (1,))
except Exception:
self._tc.assertEqual(len(cnt.shape), 0)
self._tc.assertEqual(cnt.dtype, dtype)
# pninx is not supporting reading string areas
if not isinstance(values, str) and not isinstance(values, unicode):
value = cnt[...]
if self._isNumeric(value):
# print "Val", name , values ,value
try:
self._tc.assertTrue(abs(values - value) <= error)
except Exception:
self._tc.assertEqual(values, value)
else:
self._tc.assertEqual(values, value)
if not isinstance(cnt[...], numpy.string_) and \
self._isNumeric(cnt[...]) and \
not (isinstance(cnt[...], numpy.ndarray) and
(str(cnt[...].dtype).startswith("|S"))):
if hasattr(cnt[...], "dtype") and str(cnt[...].dtype) == "object":
self._tc.assertEqual(values, cnt[...])
elif not self._isNumeric(values):
self._tc.assertEqual(Types.Converters.toBool(values), cnt[...])
else:
try:
self._tc.assertTrue(abs(values - cnt[...]) <= error)
except Exception:
self._tc.assertEqual(values, cnt[...])
elif isinstance(cnt[...], numpy.bool_):
self._tc.assertEqual(Types.Converters.toBool(values), cnt[...])
elif self._isNumeric(values):
self._tc.assertTrue(abs(values - cnt[...]) <= error)
else:
self._tc.assertEqual(values, cnt[...])
# checks spectrum attribute
# \param det detector group
# \param name field name
# \param dtype numpy type
# \param values original values
# \param error data precision
def checkSpectrumAttribute(self, det, name, dtype, values, error=0):
cnt = det.attributes[name]
self._tc.assertTrue(cnt.is_valid)
self._tc.assertEqual(cnt.name, name)
self._tc.assertTrue(hasattr(cnt.shape, "__iter__"))
self._tc.assertEqual(len(cnt.shape), 1)
self._tc.assertEqual(cnt.shape, (len(values),))
self._tc.assertEqual(cnt.dtype, dtype)
# pninx is not supporting reading string areas
# print "values", values, type(values)
# print "cnt", cnt[...], type(cnt[...])
for i in range(len(values)):
if isinstance(cnt[...], str) or isinstance(cnt[...], unicode):
self._tc.assertEqual(values[i], cnt[...])
elif dtype != "string" and self._isNumeric(cnt[i]) and not (
isinstance(cnt[...], numpy.ndarray) and
str(cnt[...].dtype) == 'object'):
if dtype == "bool":
self._tc.assertEqual(
Types.Converters.toBool(values[i]), cnt[i])
else:
# print "CMP",name, cnt[i] , values[i] ,
# cnt[i] - values[i] , error
self._tc.assertTrue(abs(cnt[i] - values[i]) <= error)
else:
self._tc.assertEqual(values[i], cnt[i])
# checks image attribute
# \param det detector group
# \param name field name
# \param dtype numpy type
# \param values original values
# \param error data precision
def checkImageAttribute(self, det, name, dtype, values, error=0):
cnt = det.attributes[name]
# print name, cnt, cnt.dtype
self._tc.assertTrue(cnt.is_valid)
self._tc.assertEqual(cnt.name, name)
self._tc.assertTrue(hasattr(cnt.shape, "__iter__"))
if str(cnt.dtype) == "string" and cnt.shape == (1,):
self._tc.assertEqual(values[0][0], cnt[...])
self._tc.assertEqual((1, 1), (len(values), len(values[0])))
self._tc.assertEqual(cnt.dtype, dtype)
else:
self._tc.assertEqual(len(cnt.shape), 2)
self._tc.assertEqual(cnt.shape, (len(values), len(values[0])))
self._tc.assertEqual(cnt.dtype, dtype)
# pninx is not supporting reading string areas
for i in range(len(values)):
for j in range(len(values[i])):
# print i, j, cnt[i,j], values[i][j]
if dtype != "string" and self._isNumeric(cnt[i, 0]):
if dtype == "bool":
self._tc.assertEqual(
Types.Converters.toBool(values[i][j]),
cnt[i, j])
else:
self._tc.assertTrue(
abs(cnt[i, j] - values[i][j]) <= error)
else:
self._tc.assertEqual(values[i][j], cnt[i, j])
# checks if instance is numeric
# \param checking instance
# \returns is instance is numeric
def _isNumeric(self, instance):
if sys.version_info > (3,):
attrs = ['__pow__', '__mul__', '__floordiv__',
'__truediv__', '__add__', '__sub__']
else:
attrs = ['__pow__', '__mul__', '__div__', '__add__', '__sub__']
return all(hasattr(instance, attr) for attr in attrs)
# creates spectrum plot with random Gaussians
# \param xlen data length
# \param nrGauss of Gaussians
# \returns list with the plot
def nicePlot(self, xlen=2048, nrGauss=5):
pr = [[self.__rnd.uniform(0.01, 0.001),
self.__rnd.uniform(0, xlen),
self.__rnd.uniform(0.0, 1.)]
for i in range(nrGauss)]
return [sum([pr[j][2] * exp(-pr[j][0] * (i - pr[j][1]) ** 2)
for j in range(len(pr))])
for i in range(xlen)]
# creates spectrum plot with random Gaussians
# \param xlen data x-length
# \param ylen data y-length
# \param nrGauss of Gaussians
# \returns list with the plot
def nicePlot2D(self, xlen=1024, ylen=1024, nrGauss=5):
pr = [[self.__rnd.uniform(0.1, 0.01),
self.__rnd.uniform(0.01, 0.1), self.__rnd.uniform(0, ylen),
self.__rnd.uniform(0, xlen), self.__rnd.uniform(0.0, 1.)]
for i in range(nrGauss)]
return [[sum([pr[j][4] *
exp(-pr[j][0] * (i1 - pr[j][2]) ** 2 - pr[j][1] *
(i2 - pr[j][3]) ** 2)
for j in range(len(pr))])
for i1 in range(ylen)] for i2 in range(xlen)]
# checks scalar counter
# \param det detector group
# \param name counter name
# \param dtype numpy type
# \param nxtype nexus type
# \param values original values
# \param error data precision
# \param attrs dictionary with string attributes
def checkScalarField(self, det, name, dtype, nxtype, values, error=0,
attrs=None):
atts = {"type": nxtype, "units": "m",
"nexdatas_source": None, "nexdatas_strategy": None}
if attrs is not None:
atts = attrs
cnt = det.open(name)
# print("ONE")
# print(cnt.name)
# print(values)
# print(type(values[0]))
# print("FILE")
# print(cnt[...])
# print(type(cnt[0]))
self._tc.assertTrue(cnt.is_valid)
self._tc.assertEqual(cnt.name, name)
self._tc.assertTrue(hasattr(cnt.shape, "__iter__"))
self._tc.assertEqual(len(cnt.shape), 1)
self._tc.assertEqual(cnt.shape, (len(values),))
self._tc.assertEqual(cnt.dtype, dtype)
self._tc.assertEqual(cnt.size, len(values))
# pninx is not supporting reading string areas
if (
not isinstance(values[0], str) and
not isinstance(values[0], unicode) and
not isinstance(values[0], bytes)
):
value = cnt.read()
for i in range(len(value)):
if self._isNumeric(value[i]):
try:
self._tc.assertTrue(abs(value[i] - values[i]) <= error)
except Exception:
self._tc.assertEqual(values[i], value[i])
else:
self._tc.assertEqual(values[i], value[i])
for i in range(len(values)):
cv = cnt[i]
# if self._isNumeric(cnt[i]):
if self._isNumeric(cv):
if nxtype == "NX_BOOLEAN":
# print "BOOL: ", values[i] ,cnt[i]
self._tc.assertEqual(
Types.Converters.toBool(values[i]), cnt[i])
else:
self._tc.assertTrue(abs(values[i] - cnt[i]) <= error)
else:
# print("ONE")
# print(values)
# print(values[i])
# print(type(values[i]))
# print("FILE")
# print(cnt)
# print(cnt[i])
# print(type(cnt[i]))
self._tc.assertEqual(values[i], cnt[i])
self._tc.assertEqual(len(cnt.attributes), len(atts))
for a in atts:
at = cnt.attributes[a]
self._tc.assertTrue(at.is_valid)
self._tc.assertTrue(hasattr(at.shape, "__iter__"))
try:
self._tc.assertEqual(len(at.shape), 1)
self._tc.assertEqual(at.shape, (1,))
except Exception:
self._tc.assertEqual(len(at.shape), 0)
self._tc.assertEqual(at.dtype, "string")
self._tc.assertEqual(at.name, a)
if atts[a] is not None:
self._tc.assertEqual(at[...], atts[a])
# checks scalar counter
# \param det detector group
# \param name field name
# \param dtype numpy type
# \param nxtype nexus type
# \param values original values
# \param error data precision
# \param attrs dictionary with string attributes
def checkSingleScalarField(self, det, name, dtype, nxtype, values,
error=0,
attrs=None):
atts = {"type": nxtype, "units": "m",
"nexdatas_source": None, "nexdatas_strategy": None}
if attrs is not None:
atts = attrs
cnt = det.open(name)
self._tc.assertTrue(cnt.is_valid)
self._tc.assertEqual(cnt.name, name)
self._tc.assertTrue(hasattr(cnt.shape, "__iter__"))
self._tc.assertEqual(len(cnt.shape), 1)
self._tc.assertEqual(cnt.shape, (1,))
self._tc.assertEqual(cnt.dtype, dtype)
self._tc.assertEqual(cnt.size, 1)
# pninx is not supporting reading string areas
if not isinstance(values, str) and not isinstance(values, unicode):
value = cnt.read()
if self._isNumeric(value):
try:
self._tc.assertTrue(abs(values - value) <= error)
except Exception:
self._tc.assertEqual(values, value)
else:
self._tc.assertEqual(values, value)
if self._isNumeric(cnt.read()) and not (
isinstance(cnt[...], numpy.ndarray) and
(str(cnt[...].dtype) in ['object'] or
str(cnt[...].dtype).startswith("<U"))):
if not self._isNumeric(values):
# print "BOOL: ", values[i] ,cnt[i]
self._tc.assertEqual(
Types.Converters.toBool(values), cnt.read())
else:
try:
self._tc.assertTrue(abs(values - cnt.read()) <= error)
except Exception:
self._tc.assertEqual(values, cnt.read())
else:
self._tc.assertEqual(values, cnt.read())
self._tc.assertEqual(len(cnt.attributes), len(atts))
for a in atts:
at = cnt.attributes[a]
self._tc.assertTrue(at.is_valid)
self._tc.assertTrue(hasattr(at.shape, "__iter__"))
try:
self._tc.assertEqual(len(at.shape), 1)
self._tc.assertEqual(at.shape, (1,))
except Exception:
self._tc.assertEqual(len(at.shape), 0)
self._tc.assertEqual(at.dtype, "string")
self._tc.assertEqual(at.name, a)
if atts[a] is not None:
self._tc.assertEqual(at[...], atts[a])
# checks scalar counter
# \param det detector group
# \param name field name
# \param dtype numpy type
# \param nxtype nexus type
# \param values original values
# \param error data precision
# \param attrs dictionary with string attributes
def checkSingleStringScalarField(self, det, name, dtype, nxtype, values,
error=0,
attrs=None):
atts = {"type": nxtype, "units": "m",
"nexdatas_source": None, "nexdatas_strategy": None}
if attrs is not None:
atts = attrs
cnt = det.open(name)
self._tc.assertTrue(cnt.is_valid)
self._tc.assertEqual(cnt.name, name)
self._tc.assertTrue(hasattr(cnt.shape, "__iter__"))
self._tc.assertEqual(len(cnt.shape), 0)
self._tc.assertEqual(cnt.shape, ())
self._tc.assertEqual(cnt.dtype, dtype)
self._tc.assertEqual(cnt.size, 1)
# pninx is not supporting reading string areas
if not isinstance(values, str) and not isinstance(values, unicode):
value = cnt.read()
if self._isNumeric(value):
try:
self._tc.assertTrue(abs(values - value) <= error)
except Exception:
self._tc.assertEqual(values, value)
else:
self._tc.assertEqual(values, value)
if self._isNumeric(cnt.read()) and not (
isinstance(cnt[...], numpy.ndarray) and
(str(cnt[...].dtype) in ['object'] or
str(cnt[...].dtype).startswith("<U"))):
if not self._isNumeric(values):
# print "BOOL: ", values[i] ,cnt[i]
self._tc.assertEqual(
Types.Converters.toBool(values), cnt.read())
else:
try:
self._tc.assertTrue(abs(values - cnt.read()) <= error)
except Exception:
self._tc.assertEqual(values, cnt.read())
else:
self._tc.assertEqual(values, cnt.read())
self._tc.assertEqual(len(cnt.attributes), len(atts))
for a in atts:
at = cnt.attributes[a]
self._tc.assertTrue(at.is_valid)
self._tc.assertTrue(hasattr(at.shape, "__iter__"))
try:
self._tc.assertEqual(len(at.shape), 1)
self._tc.assertEqual(at.shape, (1,))
except Exception:
self._tc.assertEqual(len(at.shape), 0)
self._tc.assertEqual(at.dtype, "string")
self._tc.assertEqual(at.name, a)
if atts[a] is not None:
self._tc.assertEqual(at[...], atts[a])
# checks post scalar counter
# \param det detector group
# \param name field name
# \param dtype numpy type
# \param nxtype nexus type
# \param values original values
# \param error data precision
# \param attrs dictionary with string attributes
def checkPostScalarField(self, det, name, dtype, nxtype, values,
error=0, attrs=None):
atts = {"type": nxtype, "units": "m",
"postrun": None, "nexdatas_strategy": None}
if attrs is not None:
atts = attrs
cnt = det.open(name)
self._tc.assertTrue(cnt.is_valid)
self._tc.assertEqual(cnt.name, name)
self._tc.assertTrue(hasattr(cnt.shape, "__iter__"))
self._tc.assertEqual(len(cnt.shape), 1)
self._tc.assertEqual(cnt.shape, (0,))
self._tc.assertEqual(cnt.dtype, dtype)
self._tc.assertEqual(cnt.size, 0)
self._tc.assertEqual(len(cnt.attributes), len(atts))
for a in atts:
at = cnt.attributes[a]
self._tc.assertTrue(at.is_valid)
self._tc.assertTrue(hasattr(at.shape, "__iter__"))
try:
self._tc.assertEqual(len(at.shape), 1)
self._tc.assertEqual(at.shape, (1,))
except Exception:
self._tc.assertEqual(len(at.shape), 0)
self._tc.assertEqual(at.dtype, "string")
self._tc.assertEqual(at.name, a)
if atts[a] is not None:
self._tc.assertEqual(at[...], atts[a])
# checks XML scalar counter
# \param det detector group
# \param name field name
# \param dtype numpy type
# \param nxtype nexus type
# \param values original values
# \param error data precision
# \param attrs dictionary with string attributes
def checkXMLScalarField(self, det, name, dtype, nxtype, values,
error=0, attrs=None):
atts = {"type": nxtype, "units": "m", "nexdatas_strategy": None}
if attrs is not None:
atts = attrs
cnt = det.open(name)
self._tc.assertTrue(cnt.is_valid)
self._tc.assertEqual(cnt.name, name)
self._tc.assertTrue(hasattr(cnt.shape, "__iter__"))
self._tc.assertEqual(len(cnt.shape), 1)
self._tc.assertEqual(cnt.shape, (1,))
self._tc.assertEqual(cnt.dtype, dtype)
self._tc.assertEqual(cnt.size, 1)
self._tc.assertEqual(len(cnt.attributes), len(atts))
for a in atts:
at = cnt.attributes[a]
self._tc.assertTrue(at.is_valid)
self._tc.assertTrue(hasattr(at.shape, "__iter__"))
self._tc.assertEqual(len(at.shape), 0)
self._tc.assertEqual(at.shape, ())
self._tc.assertEqual(at.dtype, "string")
self._tc.assertEqual(at.name, a)
if atts[a] is not None:
self._tc.assertEqual(at[...], atts[a])
if not isinstance(values, str) and not isinstance(values, unicode):
value = cnt.read()
if self._isNumeric(value):
try:
self._tc.assertTrue(abs(values - value) <= error)
except Exception:
self._tc.assertEqual(values, value)
else:
self._tc.assertEqual(values, value)
if self._isNumeric(cnt.read()) and not (
isinstance(cnt[...], numpy.ndarray) and
(str(cnt[...].dtype) in ['object'] or
str(cnt[...].dtype).startswith("<U"))):
if not self._isNumeric(values):
self._tc.assertEqual(
Types.Converters.toBool(values), cnt.read())
else:
try:
self._tc.assertTrue(abs(values - cnt.read()) <= error)
except Exception:
self._tc.assertEqual(values, cnt.read())
else:
self._tc.assertEqual(values, cnt.read())
# checks XML scalar counter
# \param det detector group
# \param name field name
# \param dtype numpy type
# \param nxtype nexus type
# \param values original values
# \param error data precision
# \param attrs dictionary with string attributes
def checkXMLStringScalarField(self, det, name, dtype, nxtype, values,
error=0, attrs=None):
atts = {"type": nxtype, "units": "m", "nexdatas_strategy": None}
if attrs is not None:
atts = attrs
cnt = det.open(name)
self._tc.assertTrue(cnt.is_valid)
self._tc.assertEqual(cnt.name, name)
self._tc.assertTrue(hasattr(cnt.shape, "__iter__"))
self._tc.assertEqual(len(cnt.shape), 0)
self._tc.assertEqual(cnt.shape, ())
self._tc.assertEqual(cnt.dtype, dtype)
self._tc.assertEqual(cnt.size, 1)
self._tc.assertEqual(len(cnt.attributes), len(atts))
for a in atts:
at = cnt.attributes[a]
self._tc.assertTrue(at.is_valid)
self._tc.assertTrue(hasattr(at.shape, "__iter__"))
self._tc.assertEqual(len(at.shape), 0)
self._tc.assertEqual(at.shape, ())
self._tc.assertEqual(at.dtype, "string")
self._tc.assertEqual(at.name, a)
if atts[a] is not None:
self._tc.assertEqual(at[...], atts[a])
if not isinstance(values, str) and not isinstance(values, unicode):
value = cnt.read()
if self._isNumeric(value):
try:
self._tc.assertTrue(abs(values - value) <= error)
except Exception:
self._tc.assertEqual(values, value)
else:
self._tc.assertEqual(values, value)
if self._isNumeric(cnt.read()) and not (
isinstance(cnt[...], numpy.ndarray) and
(str(cnt[...].dtype) in ['object'] or
str(cnt[...].dtype).startswith("<U"))):
if not self._isNumeric(values):
self._tc.assertEqual(
Types.Converters.toBool(values), cnt.read())
else:
try:
self._tc.assertTrue(abs(values - cnt.read()) <= error)
except Exception:
self._tc.assertEqual(values, cnt.read())
else:
self._tc.assertEqual(values, cnt.read())
# checks spectrum field
# \param det detector group
# \param name counter name
# \param dtype numpy type
# \param nxtype nexus type
# \param values original values
# \param error data precision
# \param grows growing dimension
# \param attrs dictionary with string attributes
def checkSpectrumField(self, det, name, dtype, nxtype, values, error=0,
grows=0, attrs=None):
atts = {"type": nxtype, "units": "",
"nexdatas_source": None, "nexdatas_strategy": None}
if attrs is not None:
atts = attrs
cnt = det.open(name)
self._tc.assertTrue(cnt.is_valid)
self._tc.assertEqual(cnt.name, name)
self._tc.assertTrue(hasattr(cnt.shape, "__iter__"))
if grows and grows > 1:
lvalues = list(map(lambda *row: list(row), *values))
else:
lvalues = values
self._tc.assertEqual(len(cnt.shape), 2)
self._tc.assertEqual(cnt.shape, (len(lvalues), len(lvalues[0])))
self._tc.assertEqual(cnt.dtype, dtype)
self._tc.assertEqual(cnt.size, len(lvalues) * len(lvalues[0]))
# pninx is not supporting reading string areas
for i in range(len(lvalues)):
for j in range(len(lvalues[i])):
# print i, j, cnt[i,j], lvalues[i][j]
if self._isNumeric(cnt[i, 0]):
if nxtype == "NX_BOOLEAN":
self._tc.assertEqual(
Types.Converters.toBool(lvalues[i][j]), cnt[i, j])
else:
self._tc.assertTrue(
abs(cnt[i, j] - lvalues[i][j]) <= error)
else:
self._tc.assertEqual(lvalues[i][j], cnt[i, j])
self._tc.assertEqual(len(cnt.attributes), len(atts))
for a in atts:
at = cnt.attributes[a]
self._tc.assertTrue(at.is_valid)
self._tc.assertTrue(hasattr(at.shape, "__iter__"))
try:
self._tc.assertEqual(len(at.shape), 1)
self._tc.assertEqual(at.shape, (1,))
except Exception:
self._tc.assertEqual(len(at.shape), 0)
self._tc.assertEqual(at.dtype, "string")
self._tc.assertEqual(at.name, a)
if atts[a] is not None:
self._tc.assertEqual(at[...], atts[a])
# checks single spectrum field
# \param det detector group
# \param name counter name
# \param dtype numpy type
# \param nxtype nexus type
# \param values original values
# \param error data precision
# \param grows growing dimension
# \param attrs dictionary with string attributes
def checkSingleSpectrumField(self, det, name, dtype, nxtype, values,
error=0, grows=0, attrs=None):
atts = {"type": nxtype, "units": "",
"nexdatas_source": None, "nexdatas_strategy": None}
if attrs is not None:
atts = attrs
cnt = det.open(name)
self._tc.assertTrue(cnt.is_valid)
self._tc.assertEqual(cnt.name, name)
self._tc.assertTrue(hasattr(cnt.shape, "__iter__"))
self._tc.assertEqual(len(cnt.shape), 1)
self._tc.assertEqual(cnt.shape, (len(values),))
self._tc.assertEqual(cnt.dtype, dtype)
self._tc.assertEqual(cnt.size, len(values))
# pninx is not supporting reading string areas
for i in range(len(values)):
# print i, cnt[i], type(cnt[i]), values[i]
if self._isNumeric(cnt[i]):
if nxtype == "NX_BOOLEAN":
self._tc.assertEqual(
Types.Converters.toBool(values[i]), cnt[i])
else:
self._tc.assertTrue(abs(values[i] - cnt[i]) <= error)
else:
self._tc.assertEqual(values[i], cnt[i])
self._tc.assertEqual(len(cnt.attributes), len(atts))
for a in atts:
at = cnt.attributes[a]
self._tc.assertTrue(at.is_valid)
self._tc.assertTrue(hasattr(at.shape, "__iter__"))
try:
self._tc.assertEqual(len(at.shape), 1)
self._tc.assertEqual(at.shape, (1,))
except Exception:
self._tc.assertEqual(len(at.shape), 0)
self._tc.assertEqual(at.dtype, "string")
self._tc.assertEqual(at.name, a)
if atts[a] is not None:
self._tc.assertEqual(at[...], atts[a])
# checks single spectrum field
# \param det detector group
# \param name counter name
# \param dtype numpy type
# \param nxtype nexus type
# \param values original values
# \param error data precision
# \param grows growing dimension
# \param attrs dictionary with string attributes
def checkXMLSpectrumField(self, det, name, dtype, nxtype, values,
error=0, grows=0, attrs=None):
atts = {"type": nxtype, "units": "", "nexdatas_strategy": None}
if attrs is not None:
atts = attrs
cnt = det.open(name)
self._tc.assertTrue(cnt.is_valid)
self._tc.assertEqual(cnt.name, name)
self._tc.assertTrue(hasattr(cnt.shape, "__iter__"))
self._tc.assertEqual(len(cnt.shape), 1)
self._tc.assertEqual(cnt.shape, (len(values),))
self._tc.assertEqual(cnt.dtype, dtype)
self._tc.assertEqual(cnt.size, len(values))
# pninx is not supporting reading string areas
for i in range(len(values)):
if self._isNumeric(cnt[i]):
if nxtype == "NX_BOOLEAN":
self._tc.assertEqual(
Types.Converters.toBool(values[i]), cnt[i])
else:
self._tc.assertTrue(abs(values[i] - cnt[i]) <= error)
else:
self._tc.assertEqual(values[i], cnt[i])
self._tc.assertEqual(len(cnt.attributes), len(atts))
for a in atts:
at = cnt.attributes[a]
self._tc.assertTrue(at.is_valid)
self._tc.assertTrue(hasattr(at.shape, "__iter__"))
self._tc.assertEqual(len(at.shape), 0)
self._tc.assertEqual(cnt.shape, (len(values),))
self._tc.assertEqual(at.dtype, "string")
self._tc.assertEqual(at.name, a)
if atts[a] is not None:
self._tc.assertEqual(at[...], atts[a])
# checks spectrum field
# \param det detector group
# \param name counter name
# \param dtype numpy type
# \param nxtype nexus type
# \param values original values
# \param attrs dictionary with string attributes
def checkStringSpectrumField(self, det, name, dtype, nxtype, values,
attrs=None):
atts = {"type": nxtype, "units": "",
"nexdatas_source": None, "nexdatas_strategy": None}
if attrs is not None:
atts = attrs
cnts = [det.open(name + "_" + str(sz)) for sz in range(len(values[0]))]
for sz in range(len(values[0])):
self._tc.assertEqual(cnts[sz].name, name + "_" + str(sz))
for cnt in cnts:
self._tc.assertTrue(cnt.is_valid)
self._tc.assertTrue(hasattr(cnt.shape, "__iter__"))
self._tc.assertEqual(len(cnt.shape), 1)
self._tc.assertEqual(cnt.shape, (len(values),))
self._tc.assertEqual(cnt.dtype, dtype)
self._tc.assertEqual(cnt.size, len(values))
self._tc.assertEqual(len(cnt.attributes), len(atts))
for a in atts:
at = cnt.attributes[a]
self._tc.assertTrue(at.is_valid)
self._tc.assertTrue(hasattr(at.shape, "__iter__"))
self._tc.assertEqual(len(at.shape), 0)
self._tc.assertEqual(at.shape, ())
self._tc.assertEqual(at.dtype, "string")
self._tc.assertEqual(at.name, a)
if atts[a] is not None:
self._tc.assertEqual(at[...], atts[a])
for i in range(len(values)):
for j in range(len(values[i])):
# print "CNT", cnt[j]
# print i, j, cnts[j][i]," " , values[i][j]
self._tc.assertEqual(values[i][j], cnts[j][i])
# self._tc.assertEqual(values[i][j], cnts[j][i])
# checks single string spectrum field
# \param det detector group
# \param name counter name
# \param dtype numpy type
# \param nxtype nexus type
# \param values original values
# \param attrs dictionary with string attributes
def checkSingleStringSpectrumField(self, det, name, dtype, nxtype,
values, attrs=None):
atts = {"type": nxtype, "units": "",
"nexdatas_source": None, "nexdatas_strategy": None}
if attrs is not None:
atts = attrs
cnt = det.open(name)
self._tc.assertEqual(cnt.name, name)
self._tc.assertTrue(cnt.is_valid)
self._tc.assertTrue(hasattr(cnt.shape, "__iter__"))
self._tc.assertEqual(len(cnt.shape), 1)
self._tc.assertEqual(cnt.shape, (len(values),))
self._tc.assertEqual(cnt.dtype, dtype)
self._tc.assertEqual(cnt.size, len(values))
self._tc.assertEqual(len(cnt.attributes), len(atts))
for a in atts:
at = cnt.attributes[a]
self._tc.assertTrue(at.is_valid)
self._tc.assertTrue(hasattr(at.shape, "__iter__"))
self._tc.assertEqual(len(at.shape), 0)
self._tc.assertEqual(at.shape, ())
self._tc.assertEqual(at.dtype, "string")
self._tc.assertEqual(at.name, a)
if atts[a] is not None:
self._tc.assertEqual(at[...], atts[a])
for i in range(len(values)):
self._tc.assertEqual(values[i], cnt[i])
# checks image field
# \param det detector group
# \param name counter name
# \param dtype numpy type
# \param nxtype nexus type
# \param values original values
# \param error data precision
# \param grows growing dimension
# \param attrs dictionary with string attributes
def checkImageField(self, det, name, dtype, nxtype, values,
error=0, grows=0, attrs=None):
atts = {"type": nxtype, "units": "",
"nexdatas_source": None, "nexdatas_strategy": None}
if attrs is not None:
atts = attrs
cnt = det.open(name)
self._tc.assertTrue(cnt.is_valid)
self._tc.assertEqual(cnt.name, name)
self._tc.assertTrue(hasattr(cnt.shape, "__iter__"))
if grows == 3:
lvalues = list(map(
lambda *image: list(map(
lambda *row: list(row), *image)), *values))
elif grows == 2:
lvalues = list(map(lambda *row: list(row), *values))
else:
lvalues = values
self._tc.assertEqual(len(cnt.shape), 3)
self._tc.assertEqual(
cnt.shape, (len(lvalues), len(lvalues[0]), len(lvalues[0][0])))
self._tc.assertEqual(cnt.dtype, dtype)
self._tc.assertEqual(
cnt.size, len(lvalues) * len(lvalues[0]) * len(lvalues[0][0]))
# pninx is not supporting reading string areas
for i in range(len(lvalues)):
for j in range(len(lvalues[i])):
for k in range(len(lvalues[i][j])):
# print i, j, cnt[i,j], lvalues[i][j]
if self._isNumeric(cnt[i, 0, 0]):
if nxtype == "NX_BOOLEAN":
self._tc.assertEqual(
Types.Converters.toBool(lvalues[i][j][k]),
cnt[i, j, k])
else:
self._tc.assertTrue(
abs(lvalues[i][j][k] - cnt[i, j, k]) <= error)
else:
self._tc.assertEqual(lvalues[i][j][k], cnt[i, j, k])
self._tc.assertEqual(len(cnt.attributes), len(atts))
for a in atts:
at = cnt.attributes[a]
self._tc.assertTrue(at.is_valid)
self._tc.assertTrue(hasattr(at.shape, "__iter__"))
try:
self._tc.assertEqual(len(at.shape), 1)
self._tc.assertEqual(at.shape, (1,))
except Exception:
self._tc.assertEqual(len(at.shape), 0)
self._tc.assertEqual(at.dtype, "string")
self._tc.assertEqual(at.name, a)
if atts[a] is not None:
self._tc.assertEqual(at[...], atts[a])
# checks single image field
# \param det detector group
# \param name counter name
# \param dtype numpy type
# \param nxtype nexus type
# \param values original values
# \param error data precision
# \param grows growing dimension
# \param attrs dictionary with string attributes
def checkSingleImageField(self, det, name, dtype, nxtype, values, error=0,
grows=0, attrs=None):
atts = {"type": nxtype, "units": "",
"nexdatas_source": None, "nexdatas_strategy": None}
if attrs is not None:
atts = attrs
cnt = det.open(name)
self._tc.assertTrue(cnt.is_valid)
self._tc.assertEqual(cnt.name, name)
self._tc.assertTrue(hasattr(cnt.shape, "__iter__"))
self._tc.assertEqual(len(cnt.shape), 2)
self._tc.assertEqual(cnt.shape, (len(values), len(values[0])))
self._tc.assertEqual(cnt.dtype, dtype)
self._tc.assertEqual(cnt.size, len(values) * len(values[0]))
# pninx is not supporting reading string areas
for i in range(len(values)):
for j in range(len(values[i])):
if self._isNumeric(cnt[i, 0]):
if nxtype == "NX_BOOLEAN":
self._tc.assertEqual(
Types.Converters.toBool(values[i][j]), cnt[i, j])
else:
self._tc.assertTrue(
abs(cnt[i, j] - values[i][j]) <= error)
else:
self._tc.assertEqual(values[i][j], cnt[i, j])
self._tc.assertEqual(len(cnt.attributes), len(atts))
for a in atts:
at = cnt.attributes[a]
self._tc.assertTrue(at.is_valid)
self._tc.assertTrue(hasattr(at.shape, "__iter__"))
try:
self._tc.assertEqual(len(at.shape), 1)
self._tc.assertEqual(at.shape, (1,))
except Exception:
self._tc.assertEqual(len(at.shape), 0)
self._tc.assertEqual(at.dtype, "string")
self._tc.assertEqual(at.name, a)
if atts[a] is not None:
self._tc.assertEqual(at[...], atts[a])
# checks xml image field
# \param det detector group
# \param name counter name
# \param dtype numpy type
# \param nxtype nexus type
# \param values original values
# \param error data precision
# \param grows growing dimension
# \param attrs dictionary with string attributes
def checkXMLImageField(self, det, name, dtype, nxtype, values,
error=0, grows=0, attrs=None):
atts = {"type": nxtype, "units": "", "nexdatas_strategy": None}
if attrs is not None:
atts = attrs
cnt = det.open(name)
self._tc.assertTrue(cnt.is_valid)
self._tc.assertEqual(cnt.name, name)
self._tc.assertTrue(hasattr(cnt.shape, "__iter__"))
self._tc.assertEqual(len(cnt.shape), 2)
self._tc.assertEqual(cnt.shape, (len(values), len(values[0])))
self._tc.assertEqual(cnt.dtype, dtype)
self._tc.assertEqual(cnt.size, len(values) * len(values[0]))
# pninx is not supporting reading string areas
for i in range(len(values)):
for j in range(len(values[i])):
if self._isNumeric(cnt[i, 0]):
if nxtype == "NX_BOOLEAN":
self._tc.assertEqual(
Types.Converters.toBool(values[i][j]), cnt[i, j])
else:
self._tc.assertTrue(
abs(values[i][j] - cnt[i, j]) <= error)
else:
self._tc.assertEqual(values[i][j], cnt[i, j])
self._tc.assertEqual(len(cnt.attributes), len(atts))
for a in atts:
at = cnt.attributes[a]
self._tc.assertTrue(at.is_valid)
self._tc.assertTrue(hasattr(at.shape, "__iter__"))
self._tc.assertEqual(len(at.shape), 0)
self._tc.assertEqual(at.dtype, "string")
self._tc.assertEqual(at.name, a)
if atts[a] is not None:
self._tc.assertEqual(at[...], atts[a])
# checks string image field
# \param det detector group
# \param name counter name
# \param dtype numpy type
# \param nxtype nexus type
# \param values original values
# \param attrs dictionary with string attributes
def checkStringImageField(self, det, name, dtype, nxtype, values,
attrs=None):
atts = {"type": nxtype, "units": "",
"nexdatas_source": None, "nexdatas_strategy": None}
if attrs is not None:
atts = attrs
cnts = [[det.open(name + "_" + str(s1) + "_" + str(s2))
for s2 in range(len(values[0][0]))]
for s1 in range(len(values[0]))]
for s1 in range(len(values[0])):
for s2 in range(len(values[0][0])):
self._tc.assertEqual(
cnts[s1][s2].name, name + "_" + str(s1) + "_" + str(s2))
for icnt in cnts:
for cnt in icnt:
self._tc.assertTrue(cnt.is_valid)
self._tc.assertTrue(hasattr(cnt.shape, "__iter__"))
self._tc.assertEqual(len(cnt.shape), 1)
self._tc.assertEqual(cnt.shape, (len(values),))
self._tc.assertEqual(cnt.dtype, dtype)
self._tc.assertEqual(cnt.size, len(values))
self._tc.assertEqual(len(cnt.attributes), len(atts))
for a in atts:
at = cnt.attributes[a]
self._tc.assertTrue(at.is_valid)
self._tc.assertTrue(hasattr(at.shape, "__iter__"))
self._tc.assertEqual(len(at.shape), 1)
self._tc.assertEqual(at.shape, (1,))
self._tc.assertEqual(at.dtype, "string")
self._tc.assertEqual(at.name, a)
if atts[a] is not None:
self._tc.assertEqual(at[...], atts[a])
for i in range(len(values)):
for j in range(len(values[i])):
for k in range(len(values[i][j])):
self._tc.assertEqual(values[i][j][k], cnts[j][k][i])
# checks single string image field
# \param det detector group
# \param name counter name
# \param dtype numpy type
# \param nxtype nexus type
# \param values original values
# \param attrs dictionary with string attributes
def checkSingleStringImageField(self, det, name, dtype, nxtype, values,
attrs=None):
atts = {"type": nxtype, "units": "",
"nexdatas_source": None, "nexdatas_strategy": None}
if attrs is not None:
atts = attrs
cnts = [det.open(name + "_" + str(s1))
for s1 in range(len(values[0]))]
for s1 in range(len(values[0])):
self._tc.assertEqual(cnts[s1].name, name + "_" + str(s1))
for cnt in cnts:
self._tc.assertTrue(cnt.is_valid)
self._tc.assertTrue(hasattr(cnt.shape, "__iter__"))
self._tc.assertEqual(len(cnt.shape), 1)
self._tc.assertEqual(cnt.shape, (len(values),))
self._tc.assertEqual(cnt.dtype, dtype)
self._tc.assertEqual(cnt.size, len(values))
self._tc.assertEqual(len(cnt.attributes), len(atts))
for a in atts:
at = cnt.attributes[a]
self._tc.assertTrue(at.is_valid)
self._tc.assertTrue(hasattr(at.shape, "__iter__"))
self._tc.assertEqual(len(at.shape), 1)
self._tc.assertEqual(at.shape, (1,))
self._tc.assertEqual(at.dtype, "string")
self._tc.assertEqual(at.name, a)
if atts[a] is not None:
self._tc.assertEqual(at[...], atts[a])
for i in range(len(values)):
for j in range(len(values[i])):
self._tc.assertEqual(values[i][j], cnts[j][i])
# checks XML string image field
# \param det detector group
# \param name counter name
# \param dtype numpy type
# \param nxtype nexus type
# \param values original values
# \param attrs dictionary with string attributes
def checkXMLStringImageField(self, det, name, dtype, nxtype, values,
attrs=None):
atts = {"type": nxtype, "units": ""}
if attrs is not None:
atts = attrs
cnts = [det.open(name + "_" + str(s1))
for s1 in range(len(values[0]))]
for s1 in range(len(values[0])):
self._tc.assertEqual(cnts[s1].name, name + "_" + str(s1))
for cnt in cnts:
self._tc.assertTrue(cnt.is_valid)
self._tc.assertTrue(hasattr(cnt.shape, "__iter__"))
self._tc.assertEqual(len(cnt.shape), 1)
self._tc.assertEqual(cnt.shape, (len(values),))
self._tc.assertEqual(cnt.dtype, dtype)
self._tc.assertEqual(cnt.size, len(values))
self._tc.assertEqual(len(cnt.attributes), len(atts))
for a in atts:
at = cnt.attributes[a]
self._tc.assertTrue(at.is_valid)
self._tc.assertTrue(hasattr(at.shape, "__iter__"))
self._tc.assertEqual(len(at.shape), 1)
self._tc.assertEqual(at.shape, (1,))
self._tc.assertEqual(at.dtype, "string")
self._tc.assertEqual(at.name, a)
if atts[a] is not None:
self._tc.assertEqual(at[...], atts[a])
for i in range(len(values)):
for j in range(len(values[i])):
self._tc.assertEqual(values[i][j], cnts[j][i])
|
nexdatas/writer
|
test/Checkers.py
|
Python
|
gpl-3.0
| 52,497
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from uthportal.tasks.course import CourseTask
class ce437(CourseTask):
document_prototype = {
"code": "ce437",
"announcements": {
"link_site": "",
"link_eclass": "http://eclass.uth.gr/eclass/modules/announcements/rss.php?c=MHX287"
},
"info": {
"name": u"Αλγόριθμοι CAD",
"code_site": u"HY437",
"code_eclass": "MHX287",
"link_site": "http://inf-server.inf.uth.gr/courses/CE437/",
"link_eclass": "http://eclass.uth.gr/eclass/courses/MHX287/"
}
}
|
kkanellis/uthportal-server
|
uthportal/library/inf/courses/ce437.py
|
Python
|
gpl-3.0
| 635
|
from django import template
from DNAOrderApp.order.models import AffiliatedInstitute
register = template.Library()
# DEPRECATED - NOT IN USE
# def get_affiliated_institute_given_ai_list(ailist):
# """Returns all affiliated institute for fmprojectlist"""
# print "this is ailist", ailist
# html_string = "<ul>"
# for ai in ailist:
# html_string += "<li>" + ai.ai_name + "</li>"
# html_string+="</ul>"
# return html_string
def get_all_affiliated_institute_for_tss4(QuerySet_Affiliated_Institute):
"""Returns all affiliated institute for tmp-sample-submission-4.html"""
ai_list = QuerySet_Affiliated_Institute.all()
html_string = "<ul>"
for ai in ai_list:
html_string += "<li>" + ai.ai_name + "</li>"
html_string+="</ul>"
print "inside get_all_affiliated_institute with queryset affiliated institute"
return html_string
def get_all_affiliated_institute(Queryset_Affiliated_Institute):
"""Returns all affiliated institute for typeahead function in fmprojectlist"""
ai_list = []
for c in Queryset_Affiliated_Institute:
print str(c.ai_name)
ai_list.append(str(c.ai_name))
print 'this is a list', ai_list
return "[\"" + "\",\"".join(ai_list) + "\"]"
def get_affiliated_institute_by_contact(dnaorderappuser):
"""Returns the affiliated institute associated to the DNAOrderAppUser"""
return AffiliatedInstitute.objects.filter(dnaorderappuser__username__exact=dnaorderappuser)
# register.filter('get_affiliated_institute_given_ai_list', get_affiliated_institute_given_ai_list)
register.filter('get_all_affiliated_institute_for_tss4', get_all_affiliated_institute_for_tss4)
register.filter('get_all_affiliated_institute', get_all_affiliated_institute)
register.filter('get_affiliated_institute_by_contact', get_affiliated_institute_by_contact)
|
aw18/DNAOrderApp
|
DNAOrderApp/order/templatetags/get_affiliated_institute_list.py
|
Python
|
gpl-3.0
| 1,767
|
from PySide.QtGui import QGridLayout, QWidget
import color
import koeppencolor
from sphereview import SphereView
def climatecolor(tile, _):
h, c = tile.elevation, tile.climate
if c == None:
return (127,127,127) if h > 0 else (0,0,0)
k = c.koeppen
if h > 0:
color = koeppencolor.values[k[0]][k[1]]
else:
color = 0, 0, 0
return color
colorvalue = lambda t, _: color.value(t)
def population(tile, populated):
if tile.elevation > 0 and tile in populated:
return (192,192,192 - populated[tile] * 1.25)
return color.value(tile)
class WorldDisplay(QWidget):
_colorfunctions = [climatecolor, colorvalue, population]
def __init__(self, sim, selecthandler):
QWidget.__init__(self)
self._sim = sim
self._screen = None
self._rotate = 0
self._aspect = self._colorfunctions.index(colorvalue)
self._select = selecthandler
self.selected = None
self.setLayout(QGridLayout())
self.invalidate()
@property
def rotate(self):
return self._rotate
@rotate.setter
def rotate(self, value):
self._rotate = value
self._screen.rotate(self._rotate)
@property
def aspect(self):
return self._aspect
@aspect.setter
def aspect(self, value):
self._aspect = value
self.invalidate()
def tilecolor(self, tile, populated):
if tile is self.selected:
return (255,0,0)
return self._colorfunctions[self._aspect](tile, populated)
def select(self, x, y, z):
self.selected = self._sim.nearest((z,-x,y)) if abs(z) < 2 else None
self._select(self.selected)
def invalidate(self):
if self._screen is None:
self._screen = SphereView(self._sim.grid.faces, self)
self._screen.clicked.connect(self.select)
populated = {t: p for (t, (_, p)) in self._sim.populated.items()}
self._screen.usecolors({ v: self.tilecolor(t, populated) for (v, t) in self._sim.tiles.items() })
self._screen.rotate(self._rotate)
self.layout().addWidget(self._screen)
|
tps12/Tec-Nine
|
worlddisplay.py
|
Python
|
gpl-3.0
| 2,142
|
# ! /usr/bin/env python
# _*_ coding:utf-8 _*_
"""
@author = lucas.wang
@create_time = 2018-01-29
"""
from Tkinter import Tk
class Clipboard(object):
def addToClipboard(self, string):
"""字符串添加到剪贴板"""
r = Tk()
r.withdraw()
r.clipboard_clear()
r.clipboard_append(string)
r.update()
r.destroy()
def getClipboard(self):
"""返回剪贴板上的内容"""
r = Tk()
r.withdraw()
tmp = r.clipboard_get()
r.destroy()
return tmp
if __name__ == '__main__':
Clipboard().addToClipboard("alex lee")
Clipboard().addToClipboard("alex lee33333")
|
Lucas-Wong/ToolsProject
|
Clipboard/Clipboard.py
|
Python
|
gpl-3.0
| 672
|
from __future__ import absolute_import
from django.conf.urls import include, url
from django.conf import settings
from django.conf.urls.static import static
# from djangui.admin import
from .views import (celery_status, CeleryTaskView, celery_task_command, DjanguiScriptJSON,
DjanguiHomeView, DjanguiRegister, djangui_login, DjanguiProfileView)
from . import settings as djangui_settings
djangui_patterns = [
url(r'^celery/command$', celery_task_command, name='celery_task_command'),
url(r'^celery/status$', celery_status, name='celery_results'),
url(r'^celery/(?P<job_id>[a-zA-Z0-9\-]+)/$', CeleryTaskView.as_view(), name='celery_results_info'),
# url(r'^admin/', include(djangui_admin.urls)),
url(r'^djscript/(?P<script_group>[a-zA-Z0-9\-\_]+)/(?P<script_name>[a-zA-Z0-9\-\_]+)/(?P<job_id>[a-zA-Z0-9\-]+)$',
DjanguiScriptJSON.as_view(), name='djangui_script_clone'),
url(r'^djscript/(?P<script_group>[a-zA-Z0-9\-\_]+)/(?P<script_name>[a-zA-Z0-9\-\_]+)/$', DjanguiScriptJSON.as_view(), name='djangui_script'),
url(r'^profile/$', DjanguiProfileView.as_view(), name='profile_home'),
url(r'^$', DjanguiHomeView.as_view(), name='djangui_home'),
url(r'^$', DjanguiHomeView.as_view(), name='djangui_task_launcher'),
url('^{}'.format(djangui_settings.DJANGUI_LOGIN_URL.lstrip('/')), djangui_login, name='djangui_login'),
url('^{}'.format(djangui_settings.DJANGUI_REGISTER_URL.lstrip('/')), DjanguiRegister.as_view(), name='djangui_register'),
]
urlpatterns = [
url('^', include(djangui_patterns, namespace='djangui')),
url('^', include('django.contrib.auth.urls')),
]
|
Chris7/django-djangui
|
djangui/urls.py
|
Python
|
gpl-3.0
| 1,653
|
"""This plugin adds a menu File->Messages->Export Locations to Editor
which opens an editor with the contents of the Locations view.
It also add contextual menu to clear items for current file from
location view.
"""
import GPS
import gps_utils
import pygps
import os.path
def message_compare(a, b):
""" Comparison function between two messages: compare messages based on
line, then column, then text.
"""
if a.get_line() < b.get_line():
return -1
if a.get_column() < b.get_column():
return -1
if a.get_text() < b.get_text():
return -1
return 1
def in_locations_filter(context):
return context.module_name == "Location_View_Record"
@gps_utils.interactive(
name="export locations to editor",
contextual="Export messages to editor",
filter=in_locations_filter,
after="Change Directory...")
def export_locations_to_editor():
"""
Export all messages listed in the Locations view to an editor.
"""
categories = {}
# Get all messages
msgs = GPS.Message.list()
# Filter them and organize them by category and file
for m in msgs:
if m.get_flags() & 2:
file = m.get_file()
category = m.get_category()
if category in categories:
if file in categories[category]:
categories[category][file] += [m]
else:
categories[category][file] = [m]
else:
categories[category] = {file: [m]}
if not categories:
GPS.MDI.dialog("The Locations view is empty.")
return
# Construct a string that we will write in the editor
text = ""
categories_list = [c for c in categories]
categories_list.sort()
for c in categories_list:
text += c + "\n"
files_list = [f for f in categories[c]]
files_list.sort()
for f in files_list:
text += " %s\n" % f.path
messages = categories[c][f]
messages.sort(message_compare)
for m in messages:
text += " %s:%s %s\n" % (
m.get_line(),
m.get_column(),
m.get_text())
text += "\n"
# Open an editor
GPS.execute_action("new file")
buf = GPS.EditorBuffer.get()
# Write the contents
buf.insert(buf.at(1, 1), text)
def on_filter(context):
if context.file():
# Return True if there are any messages in the file context
# which have the flag '2' set to 1, meaning that they show up in
# the Locations view.
for m in GPS.Message.list(file=context.file()):
if m.get_flags() & 2:
return True
return False
def on_label(context):
return "Clear locations for <b>%s</b>" % (
os.path.basename(context.file().path))
@gps_utils.interactive(
category='Locations', filter=on_filter,
name='Clear locations for file',
contextual=on_label,
static_path="Clear locations")
def on_contextual():
context = GPS.current_context()
list = GPS.Message.list(file=context.file())
for m in list:
m.remove()
def in_call_trees_filter(context):
return context.module_name == "Callgraph_View"
@gps_utils.interactive(
name="export call trees to editor",
icon='gps-save-symbolic',
toolbar='Call Trees', button_label='Export to editor',
filter=in_call_trees_filter)
def export_call_trees_to_editor():
"""
Export the current contents of the Call Trees view to an editor.
"""
def dump_tree_model(model, indent):
values = []
if model:
for row in model:
first = row[0]
if first == 'computing...':
return []
if not row[1] or \
first.endswith(' called by ') or \
first.endswith('calls '):
values.append(indent + first)
else:
values.append(indent + first + '\t\t{}'.format(row[1]))
values.extend(
dump_tree_model(row.iterchildren(), indent + " "))
return values
m = pygps.get_widget_by_name("Call Graph Tree").get_model()
text = '\n'.join(dump_tree_model(m, ""))
# Open an editor and write the contents
GPS.execute_action("new file")
buf = GPS.EditorBuffer.get()
buf.delete() # in case some template was inserted
buf.insert(buf.at(1, 1), text)
|
qunying/gps
|
share/support/ui/locations_view_utils.py
|
Python
|
gpl-3.0
| 4,549
|
from gordon.io.audio import AudioFile
|
bmcfee/gordon
|
gordon/io/__init__.py
|
Python
|
gpl-3.0
| 39
|
version = 1.1
|
thomwiggers/besluiten
|
version.py
|
Python
|
gpl-3.0
| 14
|
#!/usr/bin/env python
# Written by Shlomi Fish, under the MIT Expat License.
import os
import os.path
import re
from sys import platform
IS_MAC = (platform == "darwin")
TEST_TAGS = os.getenv('TEST_TAGS', '')
def _has_tag(tag):
return re.search("\\b{}\\b".format(tag), TEST_TAGS)
PY_VERS = ([2] if _has_tag('WITH_PY2') else [])+[3]
SKIP_GTK = _has_tag('SKIP_GTK')
module_names = []
for d, _, files in os.walk("pysollib"):
for f in files:
if re.search("\\.py$", f):
module_names.append(
(d + "/" + re.sub("\\.py$", "", f))
.replace("/", ".").replace(os.sep, "."))
module_names.sort()
for module_name in module_names:
if "kivy" in module_name:
continue
is_gtk = ("gtk" in module_name)
for ver in PY_VERS:
if ((not is_gtk) or (ver == 2 and (not IS_MAC) and (not SKIP_GTK))):
def fmt(s):
return s % {'module_name': module_name, 'ver': ver}
open(os.path.join(".", "tests", "individually-importing", fmt("import_v%(ver)d_%(module_name)s.py")), 'w').write(fmt('''#!/usr/bin/env python%(ver)d
import sys
print('1..1')
sys.path.insert(0, ".")
import %(module_name)s
print('ok 1 - imported')
'''))
|
shlomif/PySolFC
|
scripts/gen_individual_importing_tests.py
|
Python
|
gpl-3.0
| 1,222
|
#skip#
####################################################################################################
import matplotlib.pyplot as plt
####################################################################################################
import PySpice.Logging.Logging as Logging
logger = Logging.setup_logging()
####################################################################################################
from PySpice.Spice.Netlist import Circuit
from PySpice.Unit import *
# from OperationalAmplifier import basic_comparator
####################################################################################################
circuit = Circuit('Astable Multivibrator')
source = circuit.V('cc', 'vcc', circuit.gnd, 15@u_V)
# Time constant
circuit.R(1, 'output', 'comparator', 1@u_kΩ)
circuit.C(1, 'comparator', circuit.gnd, 100@u_nF)
# Reference
circuit.R(2, 'output', 'reference', 100@u_kΩ)
circuit.R(3, 'vcc', 'reference', 100@u_kΩ)
circuit.R(4, 'reference', circuit.gnd, 100@u_kΩ)
# Comparator
# Fixme: ngspice is buggy with such subcircuit
# circuit.subcircuit(basic_comparator)
# circuit.X('comparator', 'BasicComparator', 'reference', 'comparator', 'vcc', circuit.gnd, 'output')
circuit.NonLinearVoltageSource(1, 'output', circuit.gnd,
expression='V(reference, comparator)',
table=((-micro(1), 0),
(micro(1), source.dc_value))
)
simulator = circuit.simulator(temperature=25, nominal_temperature=25)
# simulator.initial_condition(comparator=0) # Fixme: simulator.nodes.comparator == 0
simulator.node_set(comparator=0) # Fixme: simulator.nodes.comparator == 0
analysis = simulator.transient(step_time=1@u_us, end_time=500@u_us)
# Fixme: Xyce fails with Time step too small
figure, ax = plt.subplots(figsize=(20, 10))
ax.grid()
ax.plot(analysis.reference)
ax.plot(analysis.comparator)
ax.plot(analysis.output)
plt.tight_layout()
plt.show()
#f# save_figure('figure', 'astable.png')
|
FabriceSalvaire/PySpice
|
examples/operational-amplifier/astable.py
|
Python
|
gpl-3.0
| 2,047
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.utils import flt, cint, nowdate
from frappe import throw, _
import frappe.defaults
from frappe.utils import getdate
from erpnext.controllers.buying_controller import BuyingController
from erpnext.accounts.utils import get_account_currency
from frappe.desk.notifications import clear_doctype_notifications
form_grid_templates = {
"items": "templates/form_grid/item_grid.html"
}
class PurchaseReceipt(BuyingController):
def __init__(self, arg1, arg2=None):
super(PurchaseReceipt, self).__init__(arg1, arg2)
self.status_updater = [{
'source_dt': 'Purchase Receipt Item',
'target_dt': 'Purchase Order Item',
'join_field': 'purchase_order_item',
'target_field': 'received_qty',
'target_parent_dt': 'Purchase Order',
'target_parent_field': 'per_received',
'target_ref_field': 'qty',
'source_field': 'qty',
'percent_join_field': 'purchase_order',
'overflow_type': 'receipt'
},
{
'source_dt': 'Purchase Receipt Item',
'target_dt': 'Purchase Order Item',
'join_field': 'purchase_order_item',
'target_field': 'returned_qty',
'target_parent_dt': 'Purchase Order',
# 'target_parent_field': 'per_received',
# 'target_ref_field': 'qty',
'source_field': '-1 * qty',
# 'overflow_type': 'receipt',
'extra_cond': """ and exists (select name from `tabPurchase Receipt` where name=`tabPurchase Receipt Item`.parent and is_return=1)"""
}]
def validate(self):
self.validate_posting_time()
super(PurchaseReceipt, self).validate()
if not self._action=="submit":
self.set_status()
self.po_required()
self.validate_with_previous_doc()
self.validate_uom_is_integer("uom", ["qty", "received_qty"])
self.validate_uom_is_integer("stock_uom", "stock_qty")
pc_obj = frappe.get_doc('Purchase Common')
self.check_for_closed_status(pc_obj)
if getdate(self.posting_date) > getdate(nowdate()):
throw(_("Posting Date cannot be future date"))
def validate_with_previous_doc(self):
super(PurchaseReceipt, self).validate_with_previous_doc({
"Purchase Order": {
"ref_dn_field": "purchase_order",
"compare_fields": [["supplier", "="], ["company", "="], ["currency", "="]],
},
"Purchase Order Item": {
"ref_dn_field": "purchase_order_item",
"compare_fields": [["project", "="], ["uom", "="], ["item_code", "="]],
"is_child_table": True
}
})
if cint(frappe.db.get_single_value('Buying Settings', 'maintain_same_rate')) and not self.is_return:
self.validate_rate_with_reference_doc([["Purchase Order", "purchase_order", "purchase_order_item"]])
def po_required(self):
if frappe.db.get_value("Buying Settings", None, "po_required") == 'Yes':
for d in self.get('items'):
if not d.purchase_order:
frappe.throw(_("Purchase Order number required for Item {0}").format(d.item_code))
def get_already_received_qty(self, po, po_detail):
qty = frappe.db.sql("""select sum(qty) from `tabPurchase Receipt Item`
where purchase_order_item = %s and docstatus = 1
and purchase_order=%s
and parent != %s""", (po_detail, po, self.name))
return qty and flt(qty[0][0]) or 0.0
def get_po_qty_and_warehouse(self, po_detail):
po_qty, po_warehouse = frappe.db.get_value("Purchase Order Item", po_detail,
["qty", "warehouse"])
return po_qty, po_warehouse
# Check for Closed status
def check_for_closed_status(self, pc_obj):
check_list =[]
for d in self.get('items'):
if d.meta.get_field('purchase_order') and d.purchase_order and d.purchase_order not in check_list:
check_list.append(d.purchase_order)
pc_obj.check_for_closed_status('Purchase Order', d.purchase_order)
# on submit
def on_submit(self):
purchase_controller = frappe.get_doc("Purchase Common")
# Check for Approving Authority
frappe.get_doc('Authorization Control').validate_approving_authority(self.doctype,
self.company, self.base_grand_total)
# Set status as Submitted
frappe.db.set(self, 'status', 'Submitted')
self.update_prevdoc_status()
self.update_billing_status()
if not self.is_return:
purchase_controller.update_last_purchase_rate(self, 1)
# Updating stock ledger should always be called after updating prevdoc status,
# because updating ordered qty in bin depends upon updated ordered qty in PO
self.update_stock_ledger()
from erpnext.stock.doctype.serial_no.serial_no import update_serial_nos_after_submit
update_serial_nos_after_submit(self, "items")
self.make_gl_entries()
def check_next_docstatus(self):
submit_rv = frappe.db.sql("""select t1.name
from `tabPurchase Invoice` t1,`tabPurchase Invoice Item` t2
where t1.name = t2.parent and t2.purchase_receipt = %s and t1.docstatus = 1""",
(self.name))
if submit_rv:
frappe.throw(_("Purchase Invoice {0} is already submitted").format(self.submit_rv[0][0]))
def on_cancel(self):
pc_obj = frappe.get_doc('Purchase Common')
self.check_for_closed_status(pc_obj)
# Check if Purchase Invoice has been submitted against current Purchase Order
submitted = frappe.db.sql("""select t1.name
from `tabPurchase Invoice` t1,`tabPurchase Invoice Item` t2
where t1.name = t2.parent and t2.purchase_receipt = %s and t1.docstatus = 1""",
self.name)
if submitted:
frappe.throw(_("Purchase Invoice {0} is already submitted").format(submitted[0][0]))
frappe.db.set(self,'status','Cancelled')
self.update_prevdoc_status()
self.update_billing_status()
if not self.is_return:
pc_obj.update_last_purchase_rate(self, 0)
# Updating stock ledger should always be called after updating prevdoc status,
# because updating ordered qty in bin depends upon updated ordered qty in PO
self.update_stock_ledger()
self.make_gl_entries_on_cancel()
def get_current_stock(self):
for d in self.get('supplied_items'):
if self.supplier_warehouse:
bin = frappe.db.sql("select actual_qty from `tabBin` where item_code = %s and warehouse = %s", (d.rm_item_code, self.supplier_warehouse), as_dict = 1)
d.current_stock = bin and flt(bin[0]['actual_qty']) or 0
def get_rate(self,arg):
return frappe.get_doc('Purchase Common').get_rate(arg,self)
def get_gl_entries(self, warehouse_account=None):
from erpnext.accounts.general_ledger import process_gl_map
stock_rbnb = self.get_company_default("stock_received_but_not_billed")
expenses_included_in_valuation = self.get_company_default("expenses_included_in_valuation")
gl_entries = []
warehouse_with_no_account = []
negative_expense_to_be_booked = 0.0
stock_items = self.get_stock_items()
for d in self.get("items"):
if d.item_code in stock_items and flt(d.valuation_rate) and flt(d.qty):
if warehouse_account.get(d.warehouse):
stock_value_diff = frappe.db.get_value("Stock Ledger Entry",
{"voucher_type": "Purchase Receipt", "voucher_no": self.name,
"voucher_detail_no": d.name}, "stock_value_difference")
if not stock_value_diff:
continue
gl_entries.append(self.get_gl_dict({
"account": warehouse_account[d.warehouse]["name"],
"against": stock_rbnb,
"cost_center": d.cost_center,
"remarks": self.get("remarks") or _("Accounting Entry for Stock"),
"debit": stock_value_diff
}, warehouse_account[d.warehouse]["account_currency"]))
# stock received but not billed
stock_rbnb_currency = get_account_currency(stock_rbnb)
gl_entries.append(self.get_gl_dict({
"account": stock_rbnb,
"against": warehouse_account[d.warehouse]["name"],
"cost_center": d.cost_center,
"remarks": self.get("remarks") or _("Accounting Entry for Stock"),
"credit": flt(d.base_net_amount, d.precision("base_net_amount")),
"credit_in_account_currency": flt(d.base_net_amount, d.precision("base_net_amount")) \
if stock_rbnb_currency==self.company_currency else flt(d.net_amount, d.precision("net_amount"))
}, stock_rbnb_currency))
negative_expense_to_be_booked += flt(d.item_tax_amount)
# Amount added through landed-cost-voucher
if flt(d.landed_cost_voucher_amount):
gl_entries.append(self.get_gl_dict({
"account": expenses_included_in_valuation,
"against": warehouse_account[d.warehouse]["name"],
"cost_center": d.cost_center,
"remarks": self.get("remarks") or _("Accounting Entry for Stock"),
"credit": flt(d.landed_cost_voucher_amount),
"project": d.project
}))
# sub-contracting warehouse
if flt(d.rm_supp_cost) and warehouse_account.get(self.supplier_warehouse):
gl_entries.append(self.get_gl_dict({
"account": warehouse_account[self.supplier_warehouse]["name"],
"against": warehouse_account[d.warehouse]["name"],
"cost_center": d.cost_center,
"remarks": self.get("remarks") or _("Accounting Entry for Stock"),
"credit": flt(d.rm_supp_cost)
}, warehouse_account[self.supplier_warehouse]["account_currency"]))
# divisional loss adjustment
valuation_amount_as_per_doc = flt(d.base_net_amount, d.precision("base_net_amount")) + \
flt(d.landed_cost_voucher_amount) + flt(d.rm_supp_cost) + flt(d.item_tax_amount)
divisional_loss = flt(valuation_amount_as_per_doc - stock_value_diff,
d.precision("base_net_amount"))
if divisional_loss:
if self.is_return or flt(d.item_tax_amount):
loss_account = expenses_included_in_valuation
else:
loss_account = stock_rbnb
gl_entries.append(self.get_gl_dict({
"account": loss_account,
"against": warehouse_account[d.warehouse]["name"],
"cost_center": d.cost_center,
"remarks": self.get("remarks") or _("Accounting Entry for Stock"),
"debit": divisional_loss,
"project": d.project
}, stock_rbnb_currency))
elif d.warehouse not in warehouse_with_no_account or \
d.rejected_warehouse not in warehouse_with_no_account:
warehouse_with_no_account.append(d.warehouse)
# Cost center-wise amount breakup for other charges included for valuation
valuation_tax = {}
for tax in self.get("taxes"):
if tax.category in ("Valuation", "Valuation and Total") and flt(tax.base_tax_amount_after_discount_amount):
if not tax.cost_center:
frappe.throw(_("Cost Center is required in row {0} in Taxes table for type {1}").format(tax.idx, _(tax.category)))
valuation_tax.setdefault(tax.cost_center, 0)
valuation_tax[tax.cost_center] += \
(tax.add_deduct_tax == "Add" and 1 or -1) * flt(tax.base_tax_amount_after_discount_amount)
if negative_expense_to_be_booked and valuation_tax:
# Backward compatibility:
# If expenses_included_in_valuation account has been credited in against PI
# and charges added via Landed Cost Voucher,
# post valuation related charges on "Stock Received But Not Billed"
negative_expense_booked_in_pi = frappe.db.sql("""select name from `tabPurchase Invoice Item` pi
where docstatus = 1 and purchase_receipt=%s
and exists(select name from `tabGL Entry` where voucher_type='Purchase Invoice'
and voucher_no=pi.parent and account=%s)""", (self.name, expenses_included_in_valuation))
if negative_expense_booked_in_pi:
expenses_included_in_valuation = stock_rbnb
against_account = ", ".join([d.account for d in gl_entries if flt(d.debit) > 0])
total_valuation_amount = sum(valuation_tax.values())
amount_including_divisional_loss = negative_expense_to_be_booked
i = 1
for cost_center, amount in valuation_tax.items():
if i == len(valuation_tax):
applicable_amount = amount_including_divisional_loss
else:
applicable_amount = negative_expense_to_be_booked * (amount / total_valuation_amount)
amount_including_divisional_loss -= applicable_amount
gl_entries.append(
self.get_gl_dict({
"account": expenses_included_in_valuation,
"cost_center": cost_center,
"credit": applicable_amount,
"remarks": self.remarks or _("Accounting Entry for Stock"),
"against": against_account
})
)
i += 1
if warehouse_with_no_account:
frappe.msgprint(_("No accounting entries for the following warehouses") + ": \n" +
"\n".join(warehouse_with_no_account))
return process_gl_map(gl_entries)
def update_status(self, status):
self.set_status(update=True, status = status)
self.notify_update()
clear_doctype_notifications(self)
def update_billing_status(self, update_modified=True):
updated_pr = [self.name]
for d in self.get("items"):
if d.purchase_order_item:
updated_pr += update_billed_amount_based_on_po(d.purchase_order_item, update_modified)
for pr in set(updated_pr):
pr_doc = self if (pr == self.name) else frappe.get_doc("Purchase Receipt", pr)
pr_doc.update_billing_percentage(update_modified=update_modified)
self.load_from_db()
def update_billed_amount_based_on_po(po_detail, update_modified=True):
# Billed against Sales Order directly
billed_against_po = frappe.db.sql("""select sum(amount) from `tabPurchase Invoice Item`
where po_detail=%s and (pr_detail is null or pr_detail = '') and docstatus=1""", po_detail)
billed_against_po = billed_against_po and billed_against_po[0][0] or 0
# Get all Delivery Note Item rows against the Sales Order Item row
pr_details = frappe.db.sql("""select pr_item.name, pr_item.amount, pr_item.parent
from `tabPurchase Receipt Item` pr_item, `tabPurchase Receipt` pr
where pr.name=pr_item.parent and pr_item.purchase_order_item=%s
and pr.docstatus=1 and pr.is_return = 0
order by pr.posting_date asc, pr.posting_time asc, pr.name asc""", po_detail, as_dict=1)
updated_pr = []
for pr_item in pr_details:
# Get billed amount directly against Purchase Receipt
billed_amt_agianst_pr = frappe.db.sql("""select sum(amount) from `tabPurchase Invoice Item`
where pr_detail=%s and docstatus=1""", pr_item.name)
billed_amt_agianst_pr = billed_amt_agianst_pr and billed_amt_agianst_pr[0][0] or 0
# Distribute billed amount directly against PO between PRs based on FIFO
if billed_against_po and billed_amt_agianst_pr < pr_item.amount:
pending_to_bill = flt(pr_item.amount) - billed_amt_agianst_pr
if pending_to_bill <= billed_against_po:
billed_amt_agianst_pr += pending_to_bill
billed_against_po -= pending_to_bill
else:
billed_amt_agianst_pr += billed_against_po
billed_against_po = 0
frappe.db.set_value("Purchase Receipt Item", pr_item.name, "billed_amt", billed_amt_agianst_pr, update_modified=update_modified)
updated_pr.append(pr_item.parent)
return updated_pr
@frappe.whitelist()
def make_purchase_invoice(source_name, target_doc=None):
from frappe.model.mapper import get_mapped_doc
invoiced_qty_map = get_invoiced_qty_map(source_name)
def set_missing_values(source, target):
if len(target.get("items")) == 0:
frappe.throw(_("All items have already been invoiced"))
doc = frappe.get_doc(target)
doc.ignore_pricing_rule = 1
doc.run_method("set_missing_values")
doc.run_method("calculate_taxes_and_totals")
def update_item(source_doc, target_doc, source_parent):
target_doc.qty = source_doc.qty - invoiced_qty_map.get(source_doc.name, 0)
doclist = get_mapped_doc("Purchase Receipt", source_name, {
"Purchase Receipt": {
"doctype": "Purchase Invoice",
"validation": {
"docstatus": ["=", 1],
},
},
"Purchase Receipt Item": {
"doctype": "Purchase Invoice Item",
"field_map": {
"name": "pr_detail",
"parent": "purchase_receipt",
"purchase_order_item": "po_detail",
"purchase_order": "purchase_order",
},
"postprocess": update_item,
"filter": lambda d: abs(d.qty) - abs(invoiced_qty_map.get(d.name, 0))<=0
},
"Purchase Taxes and Charges": {
"doctype": "Purchase Taxes and Charges",
"add_if_empty": True
}
}, target_doc, set_missing_values)
return doclist
def get_invoiced_qty_map(purchase_receipt):
"""returns a map: {pr_detail: invoiced_qty}"""
invoiced_qty_map = {}
for pr_detail, qty in frappe.db.sql("""select pr_detail, qty from `tabPurchase Invoice Item`
where purchase_receipt=%s and docstatus=1""", purchase_receipt):
if not invoiced_qty_map.get(pr_detail):
invoiced_qty_map[pr_detail] = 0
invoiced_qty_map[pr_detail] += qty
return invoiced_qty_map
@frappe.whitelist()
def make_purchase_return(source_name, target_doc=None):
from erpnext.controllers.sales_and_purchase_return import make_return_doc
return make_return_doc("Purchase Receipt", source_name, target_doc)
@frappe.whitelist()
def update_purchase_receipt_status(docname, status):
pr = frappe.get_doc("Purchase Receipt", docname)
pr.update_status(status)
|
KanchanChauhan/erpnext
|
erpnext/stock/doctype/purchase_receipt/purchase_receipt.py
|
Python
|
gpl-3.0
| 16,742
|
#!/usr/bin/env python
'''Tree View/Tree Store
The GtkTreeStore is used to store data in tree form, to be used
later on by a GtkTreeView to display it. This demo builds a simple
GtkTreeStore and displays it. If you're new to the GtkTreeView widgets
and associates, look into the GtkListStore example first.'''
# pygtk version: Maik Hertha <maik.hertha@berlin.de>
import gobject
import gtk
# columns
(
HOLIDAY_NAME_COLUMN,
ALEX_COLUMN,
HAVOC_COLUMN,
TIM_COLUMN,
OWEN_COLUMN,
DAVE_COLUMN,
VISIBLE_COLUMN,
WORLD_COLUMN,
NUM_COLUMNS
) = range(9)
# tree data
january = \
[
["New Years Day", True, True, True, True, False, True ],
["Presidential Inauguration", False, True, False, True, False, False ],
["Martin Luther King Jr. day", False, True, False, True, False, False ]
]
february = \
[
[ "Presidents' Day", False, True, False, True, False, False ],
[ "Groundhog Day", False, False, False, False, False, False ],
[ "Valentine's Day", False, False, False, False, True, True ]
]
march = \
[
[ "National Tree Planting Day", False, False, False, False, False, False ],
[ "St Patrick's Day", False, False, False, False, False, True ]
]
april = \
[
[ "April Fools' Day", False, False, False, False, False, True ],
[ "Army Day", False, False, False, False, False, False ],
[ "Earth Day", False, False, False, False, False, True ],
[ "Administrative Professionals' Day", False, False, False, False, False, False ]
]
may = \
[
[ "Nurses' Day", False, False, False, False, False, False ],
[ "National Day of Prayer", False, False, False, False, False, False ],
[ "Mothers' Day", False, False, False, False, False, True ],
[ "Armed Forces Day", False, False, False, False, False, False ],
[ "Memorial Day", True, True, True, True, False, True ]
]
june = \
[
[ "June Fathers' Day", False, False, False, False, False, True ],
[ "Juneteenth(Liberation of Slaves)", False, False, False, False, False, False ],
[ "Flag Day", False, True, False, True, False, False ]
]
july = \
[
[ "Parents' Day", False, False, False, False, False, True ],
[ "Independence Day", False, True, False, True, False, False ]
]
august = \
[
[ "Air Force Day", False, False, False, False, False, False ],
[ "Coast Guard Day", False, False, False, False, False, False ],
[ "Friendship Day", False, False, False, False, False, False ]
]
september = \
[
[ "Grandparents' Day", False, False, False, False, False, True ],
[ "Citizenship Day or Constitution Day", False, False, False, False, False, False ],
[ "Labor Day", True, True, True, True, False, True ]
]
october = \
[
[ "National Children's Day", False, False, False, False, False, False ],
[ "Bosses' Day", False, False, False, False, False, False ],
[ "Sweetest Day", False, False, False, False, False, False ],
[ "Mother-in-Law's Day", False, False, False, False, False, False ],
[ "Navy Day", False, False, False, False, False, False ],
[ "Columbus Day", False, True, False, True, False, False ],
[ "Halloween", False, False, False, False, False, True ]
]
november = \
[
[ "Marine Corps Day", False, False, False, False, False, False ],
[ "Veterans' Day", True, True, True, True, False, True ],
[ "Thanksgiving", False, True, False, True, False, False ]
]
december = \
[
[ "Pearl Harbor Remembrance Day", False, False, False, False, False, False ],
[ "Christmas", True, True, True, True, False, True ],
[ "Kwanzaa", False, False, False, False, False, False ]
]
toplevel = \
[
["January", False, False, False, False, False, False, january],
["February", False, False, False, False, False, False, february],
["March", False, False, False, False, False, False, march],
["April", False, False, False, False, False, False, april],
["May", False, False, False, False, False, False, may],
["June", False, False, False, False, False, False, june],
["July", False, False, False, False, False, False, july],
["August", False, False, False, False, False, False, august],
["September", False, False, False, False, False, False, september],
["October", False, False, False, False, False, False, october],
["November", False, False, False, False, False, False, november],
["December", False, False, False, False, False, False, december]
]
class TreeStoreDemo(gtk.Window):
def __init__(self, parent=None):
gtk.Window.__init__(self)
try:
self.set_screen(parent.get_screen())
except AttributeError:
self.connect('destroy', lambda *w: gtk.main_quit())
self.set_title(self.__class__.__name__)
self.set_default_size(650, 400)
self.set_border_width(8)
vbox = gtk.VBox(False, 8)
self.add(vbox)
label = gtk.Label("Jonathan's Holiday Card Planning Sheet")
vbox.pack_start(label, False, False)
sw = gtk.ScrolledWindow()
sw.set_shadow_type(gtk.SHADOW_ETCHED_IN)
sw.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)
vbox.pack_start(sw)
# create model
model = self.__create_model()
# create treeview
treeview = gtk.TreeView(model)
treeview.set_rules_hint(True)
self.__add_columns(treeview)
sw.add(treeview)
# expand all rows after the treeview widget has been realized
treeview.connect('realize', lambda tv: tv.expand_all())
self.show_all()
def __create_model(self):
# create tree store
model = gtk.TreeStore(
gobject.TYPE_STRING,
gobject.TYPE_BOOLEAN,
gobject.TYPE_BOOLEAN,
gobject.TYPE_BOOLEAN,
gobject.TYPE_BOOLEAN,
gobject.TYPE_BOOLEAN,
gobject.TYPE_BOOLEAN,
gobject.TYPE_BOOLEAN)
# add data to the tree store
for month in toplevel:
iter = model.append(None)
model.set(iter,
HOLIDAY_NAME_COLUMN, month[HOLIDAY_NAME_COLUMN],
ALEX_COLUMN, False,
HAVOC_COLUMN, False,
TIM_COLUMN, False,
OWEN_COLUMN, False,
DAVE_COLUMN, False,
VISIBLE_COLUMN, False,
WORLD_COLUMN, False
)
# add children
for holiday in month[-1]:
child_iter = model.append(iter);
model.set(child_iter,
HOLIDAY_NAME_COLUMN, holiday[HOLIDAY_NAME_COLUMN],
ALEX_COLUMN, holiday[ALEX_COLUMN],
HAVOC_COLUMN, holiday[HAVOC_COLUMN],
TIM_COLUMN, holiday[TIM_COLUMN],
OWEN_COLUMN, holiday[OWEN_COLUMN],
DAVE_COLUMN, holiday[DAVE_COLUMN],
VISIBLE_COLUMN, True,
WORLD_COLUMN, holiday[WORLD_COLUMN-1]
)
return model
def on_item_toggled(self, cell, path_str, model):
# get selected column
column = cell.get_data('column')
# get toggled iter
iter = model.get_iter_from_string(path_str)
toggle_item = model.get_value(iter, column)
# do something with the value
toggle_item = not toggle_item
# set new value
model.set(iter, column, toggle_item)
def __add_columns(self, treeview):
model = treeview.get_model()
# column for holiday names
renderer = gtk.CellRendererText()
renderer.set_property("xalign", 0.0)
#col_offset = gtk.TreeViewColumn("Holiday", renderer, text=HOLIDAY_NAME_COLUMN)
column = gtk.TreeViewColumn("Holiday", renderer, text=HOLIDAY_NAME_COLUMN)
#column = gtk_tree_view_get_column(GTK_TREE_VIEW(treeview), col_offset - 1);
column.set_clickable(True)
treeview.append_column(column)
# alex column */
renderer = gtk.CellRendererToggle()
renderer.set_property("xalign", 0.0)
renderer.set_data("column", ALEX_COLUMN)
renderer.connect("toggled", self.on_item_toggled, model)
column = gtk.TreeViewColumn("Alex", renderer, active=ALEX_COLUMN,
visible=VISIBLE_COLUMN, activatable=WORLD_COLUMN)
# set this column to a fixed sizing(of 50 pixels)
#column = gtk_tree_view_get_column(GTK_TREE_VIEW(treeview), col_offset - 1);
column.set_sizing(gtk.TREE_VIEW_COLUMN_FIXED)
column.set_fixed_width(50)
column.set_clickable(True)
treeview.append_column(column)
# havoc column
renderer = gtk.CellRendererToggle();
renderer.set_property("xalign", 0.0)
renderer.set_data("column", HAVOC_COLUMN)
renderer.connect("toggled", self.on_item_toggled, model)
column = gtk.TreeViewColumn("Havoc", renderer, active=HAVOC_COLUMN,
visible=VISIBLE_COLUMN)
#column = treeview.get_column(col_offset - 1)
column.set_sizing(gtk.TREE_VIEW_COLUMN_FIXED)
column.set_fixed_width(50)
column.set_clickable(True)
treeview.append_column(column)
# tim column
renderer = gtk.CellRendererToggle();
renderer.set_property("xalign", 0.0)
renderer.set_data("column", TIM_COLUMN)
renderer.connect("toggled", self.on_item_toggled, model)
column = gtk.TreeViewColumn("Tim", renderer, active=TIM_COLUMN,
visible=VISIBLE_COLUMN, activatable=WORLD_COLUMN)
#column = treeview.get_column(col_offset - 1)
column.set_sizing(gtk.TREE_VIEW_COLUMN_FIXED)
column.set_fixed_width(50)
column.set_clickable(True)
treeview.append_column(column)
# owen column
renderer = gtk.CellRendererToggle();
renderer.set_property("xalign", 0.0)
renderer.set_data("column", OWEN_COLUMN)
renderer.connect("toggled", self.on_item_toggled, model)
column = gtk.TreeViewColumn("Owen", renderer, active=OWEN_COLUMN,
visible=VISIBLE_COLUMN)
#column = treeview.get_column(col_offset - 1)
column.set_sizing(gtk.TREE_VIEW_COLUMN_FIXED)
column.set_fixed_width(50)
column.set_clickable(True)
treeview.append_column(column)
# dave column
renderer = gtk.CellRendererToggle();
renderer.set_property("xalign", 0.0)
renderer.set_data("column", DAVE_COLUMN)
renderer.connect("toggled", self.on_item_toggled, model)
column = gtk.TreeViewColumn("Dave", renderer, active=DAVE_COLUMN,
visible=VISIBLE_COLUMN)
#column = treeview.get_column(col_offset - 1)
column.set_sizing(gtk.TREE_VIEW_COLUMN_FIXED)
column.set_fixed_width(50)
column.set_clickable(True)
treeview.append_column(column)
def main():
TreeStoreDemo()
gtk.main()
if __name__ == '__main__':
main()
|
chriskmanx/qmole
|
QMOLEDEV/pygtk-2.16.0/examples/pygtk-demo/demos/tree_store.py
|
Python
|
gpl-3.0
| 11,036
|
# Code404_Server - The serverside stuff and site for Code404_Server
# Copyright (C) 2015 Mitame, Doctor_N
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from sqlalchemy import create_engine, MetaData, sql
from sqlalchemy import Table, Column, Integer, String, Boolean, DateTime, Binary, ForeignKey, func
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
Base = declarative_base()
engine = create_engine("sqlite:///data.db")
def get_count(query):
count_q = query.statement.with_only_columns([func.count()]).order_by(None)
count = query.session.execute(count_q).scalar()
return count
class Setting(Base):
__tablename__ = "settings"
id = Column(Integer, primary_key=True, autoincrement=True)
name = Column(String(32), primary_key=True)
value = Column(String(128))
class User(Base):
__tablename__ = "users"
id = Column(Integer, primary_key=True, autoincrement=True)
login = Column(String(32), unique=True)
username = Column(String(32))
passhash = Column(Binary(32))
public = Column(Boolean)
def to_xml(self):
return "<user name='%s'>" % self.username
class Group(Base):
__tablename__ = "groups"
id = Column(Integer, primary_key=True, autoincrement=True)
name = Column(String(32), primary_key=True)
owner = Column(ForeignKey(User.id))
public = Column(Boolean)
class GroupLink(Base):
__tablename__ = "grouplinks"
id = Column(Integer, primary_key=True, autoincrement=True)
user = Column(ForeignKey(User.id))
group = Column(ForeignKey(Group.id))
class Level(Base):
__tablename__ = "levels"
id = Column(Integer, primary_key=True, autoincrement=True)
creator = Column(ForeignKey(User.id))
name = Column(String(32))
timestamp = Column(Integer)
public = Column(Boolean)
class Token(Base):
__tablename__ = "tokens"
id = Column(Integer, primary_key=True, autoincrement=True)
user_id = Column(ForeignKey(User.id))
token = Column(Binary(32))
expire = Column(DateTime)
class Subscription(Base):
__tablename__ = "subscriptions"
id = Column(Integer, primary_key=True, autoincrement=True)
user_id = Column(ForeignKey(User.id))
level_id = Column(ForeignKey(Level.id))
class Score(Base):
__tablename__ = "scores"
id = Column(Integer, primary_key=True, autoincrement=True)
user_id = Column(ForeignKey(User.id))
level_id = Column(ForeignKey(Level.id))
score = Column(Integer)
Base.metadata.create_all(engine)
Session = sessionmaker()
Session.configure(bind=engine)
|
CyboticCatfish/code404-server
|
code404/database.py
|
Python
|
gpl-3.0
| 3,190
|
#!/usr/bin/env python3
'''
Test the class in a function.
The instance will be created twice if the format_string funciont is invoked twice.
'''
def format_string(string, formatter=None):
"""Format a string using the formatter object,which is expected to have a format() emthod that accepts a string.
"""
class DefaultFormattter:
"""Format a string in title class"""
def format(self, string):
return str(string).title()
if not formatter:
formatter = DefaultFormattter()
print("Create a formatter instance!")
print("formatter id = {}".format(id(formatter)))
return formatter.format(string)
hello_string = "hello world, how are you today?"
hello_string2 = "hello mike, how are you going?"
print("input: "+ hello_string)
print("output:"+ format_string(hello_string))
print("")
print("input: "+ hello_string2)
print("output:"+ format_string(hello_string2))
|
jingwangian/tutorial
|
python/class/format_string.py
|
Python
|
gpl-3.0
| 936
|
import collections
'''
--- Part Two ---
Of course, that would be the message - if you hadn't agreed to use a modified repetition code instead.
In this modified code, the sender instead transmits what looks like random data, but for each character, the
character they actually want to send is slightly less likely than the others. Even after signal-jamming noise, you
can look at the letter distributions in each column and choose the least common letter to reconstruct the original
message.
In the above example, the least common character in the first column is a; in the second, d, and so on. Repeating
this process for the remaining characters produces the original message, advent.
Given the recording in your puzzle input and this new decoding methodology, what is the original message that Santa
is trying to send?
Your puzzle answer was hnfbujie.
'''
file_name = 'input.txt'
file_lines = [line.rstrip('\n') for line in open(file_name)]
num_list = []
for char in range(len(file_lines[0])):
num_list.append([])
for line in file_lines:
for char, index in zip(line, range(len(line))):
num_list[index].append(char)
for column in num_list:
letter = collections.Counter(column).most_common()[-1][0]
print(letter, end='')
print()
|
tetrismegistus/advent16
|
day6/day6_2.py
|
Python
|
gpl-3.0
| 1,318
|
''' Module to handle plugins including a template class for writing
plugins.
Programmer: Matts Bjorck
Last changed: 2008 07 23
'''
import os
import wx, StringIO, traceback
from utils import PluginHandler
head, tail = os.path.split(__file__)
# Look only after the file name and not the ending since
# the file ending can be pyc if compiled...
__FILENAME__ = tail.split('.')[0]
# This assumes that plugin is under the current dir may need
# changing
__MODULE_DIR__ = head
class Template:
''' A template class for handling plugins. Note that using the
New* mehtods will automatically remove them when removing the plugin.
Otherwise the programmer, if he/she makes more advanced changes in the gui,
have to take care of the deletion of objects.
'''
# TODO: Implement an Bind/Unbind mehtods.
#TODO: Add a Load data function Needs change in data as well.
def __init__(self, parent):
'''__init__(self, parent)
This method should be overloaded.
'''
self.parent = parent
self.plot_pages = []
self.input_pages = []
self.data_pages = []
self.menus = []
def NewPlotFolder(self, name, pos = -1):
'''NewPlotFolder(self, name) --> wx.Panel
Creates a new Folder in the Plot part of the panels. Returns
a wx.Panel which can be used to create custom controls.
o not forget to use RemovePlotfolder in the Destroy method.
'''
panel = wx.Panel(self.parent.plot_notebook, -1)
self.parent.plot_notebook.AddPage(panel, name)
index = self.parent.plot_notebook.GetPageCount()-1
self.plot_pages.append(index)
return panel
def NewInputFolder(self, name, pos = -1):
'''NewInputFolder(self, name, pos = -1) --> wx.Panel
Creates a new Folder in the Input part of the panels. Returns
a wx.Panel which can be used to create custom controls.
o not forget to use RemoveInputfolder in the Destroy method.
'''
panel = wx.Panel(self.parent.input_notebook, -1)
self.parent.input_notebook.AddPage(panel, name)
index = self.parent.input_notebook.GetPageCount()-1
self.input_pages.append(index)
return panel
def NewDataFolder(self, name, pos = -1):
'''NewDataFolder(self, name, pos = -1) --> wx.Panel
Creates a new Folder in the data part of the panels. Returns
a wx.Panel which can be used to create custom controls.
o not forget to use RemoveInputfolder in the Destroy method.
'''
panel = wx.Panel(self.parent.data_notebook, -1)
self.parent.data_notebook.AddPage(panel, name)
index = self.parent.data_notebook.GetPageCount()-1
self.data_pages.append(index)
return panel
def NewMenu(self, name):
'''NewMenu(self, name) --> wx.Menu
Creates an top menu that can be used to control the plugin. Remeber
to also implement RemoveMenu in the Destroy method.
'''
menu = wx.Menu()
self.parent.main_frame_menubar.Append(menu, name)
index = self.parent.main_frame_menubar.GetMenuCount()-1
self.menus.append(index)
return menu
def StatusMessage(self, text):
'''StatusMessage(self, text) --> None
Method that sets the staustext in the main window
'''
self.parent.main_frame_statusbar.SetStatusText(text, 1)
def ShowErrorDialog(self, message):
'''ShowErrorDialog(self, message) --> None
Shows an error dialog with message [string]
'''
ShowErrorDialog(self.parent, message)
def ShowInfoDialog(self, message):
'''ShowInfoDialog(self, message) --> None
Shows an info dialog with message [string]
'''
ShowInfoDialog(self.parent, message)
def ShowWarningDialog(self, message):
'''ShowWarningDialog(self, message) --> None
Shows an warning dialog with message [string]
'''
ShowWarningDialog(self.parent, message)
def ShowQuestionDialog(self, message):
'''ShowWarningDialog(self, message) --> None
Shows an warning dialog with message [string]
'''
return ShowQuestionDialog(self.parent, message)
def GetModel(self):
'''GetModel(self) --> model
Returns the model currently in use. This is a pointer to the model
object thus it will automatically always conatin the newest information.
'''
return self.parent.model
def GetSolverControl(self):
'''GetSolverControl(self) --> solver_control
Returns the solver_control object that controls all aspects of
the calculational part of the fitting.
'''
return self.parent.solver_control
def SetModelScript(self, script):
'''SetModelScript(self, script) --> None
Sets the script of the current model. This overwrite the current
script.
'''
self.parent.script_editor.SetText(script)
self.parent.model.set_script(script)
def CompileScript(self):
'''CompileScript(self) --> None
Compiles the model script
'''
self.parent.model.compile_script()
def OnNewModel(self, event):
'''OnNewModel(self) --> None
Function to be overridden. Called when a new model is being created.
'''
pass
def OnDataChanged(self, event):
'''OnDataChanged(self) --> None
Function to be overridden. Called when a new data set has been loaded
or deleted.
'''
pass
def OnOpenModel(self, event):
'''OnOpenModel(self, event) --> None
Function that is called after a new model has been loaded.
Used to set up plugin specific model stuff. To be overridden
'''
pass
def OnSimulate(self, event):
'''OnSimulate(self, event) --> None
Function that is called after a simulation has been done.
To be overridden
'''
pass
def OnFittingUpdate(self, event):
'''OnFittingUpdate(self, event) --> None
Function that is called when the fitting algorithm pushes an update event.
To be overridden
'''
pass
def Remove(self):
'''Remove(self) --> None
Removes all components.
'''
self.plot_pages.reverse()
self.input_pages.reverse()
self.data_pages.reverse()
# remove all pages from the notebooks
for i in self.plot_pages:
self.parent.plot_notebook.DeletePage(i)
for i in self.input_pages:
self.parent.input_notebook.DeletePage(i)
#print 'deleted page', i
for i in self.data_pages:
self.parent.data_notebook.DeletePage(i)
self.menus.reverse()
# Remove the menus
for i in self.menus:
self.parent.main_frame_menubar.Remove(i)
#END: Template
#==============================================================================
#==============================================================================
class PluginController:
''' A controller class to interact with the gui
so we can load and unload modules as well as
update the module list.
'''
def __init__(self, parent, menu, config):
'''__init__(self, parent, menu) --> None
Insertes menuitems for controlling plugins in menu.
Parent is the main window.
'''
self.plugin_handler = PluginHandler(parent, __MODULE_DIR__ \
, 'add_ons')
self.parent = parent
self.config = config
# make the menus
self.load_menu = wx.Menu()
menu.InsertMenu(0, -1,'Load', self.load_menu, 'Load a plugin')
self.unload_menu = wx.Menu()
menu.InsertMenu(1, -1,'Unload', self.unload_menu, 'Load a plugin')
menu.Append(-1, 'Update module list')
self.update_plugins()
def update_plugins(self):
'''update_modules(self) --> None
Updates the list of modules that can be loaded.
'''
# Remove all the items in load_menu
items = self.load_menu.GetMenuItems()
[self.load_menu.DeleteItem(item) for item in items]
# Get the new list of plugin modules
modlist = self.plugin_handler.get_plugins()
modlist.sort()
# Add new menu items
for mod in modlist:
menu = self.load_menu.Append(-1, mod)
self.parent.Bind(wx.EVT_MENU, self.LoadPlugin, menu)
self.update_config()
def RegisterPlugin(self, plugin):
''' RegisterPlugin(self, plugin) --> None
Adds a plugin to the unload list so that it can be removed later.
'''
menu = self.unload_menu.Append(-1, plugin)
self.parent.Bind(wx.EVT_MENU, self.UnLoadPlugin, menu)
self.update_plugins()
def update_config(self):
'''update_config(self) --> None
Updates the config object
'''
loaded_plugins = self.plugin_handler.get_loaded_plugins()
plugins_str = ';'.join(loaded_plugins)
self.config.set('plugins', 'loaded plugins', plugins_str)
def LoadDefaultPlugins(self):
'''LoadDefaultPlugins(self) --> None
Tries to load the default plugins from the config object
if they are not already loaded.
'''
plugin_str = self.config.get('plugins', 'loaded plugins')
# Check so we have any plugins to load else bail out
if plugin_str == '':
return
existing_plugins = self.plugin_handler.get_possible_plugins()
for plugin in plugin_str.split(';'):
# Check so the plugin is not loaded and that is exist
if not self.plugin_handler.is_loaded(plugin):
if plugin in existing_plugins:
try:
self.plugin_handler.load_plugin(plugin)
except:
outp = StringIO.StringIO()
traceback.print_exc(200, outp)
tbtext = outp.getvalue()
outp.close()
ShowErrorDialog(self.parent, 'Can NOT load plugin '\
+ plugin + '\nPython traceback below:\n\n' + tbtext)
self.RegisterPlugin(plugin)
else:
ShowInfoDialog(self.parent, 'Could not find plugin "%s"'\
'. Either there is an error in the config file'\
' or the plugin is not installed.'%(plugin))
self.update_config()
# Callbacks
def LoadPlugin(self, event):
'''OnLoadPlugin(self, event) --> None
Loads a plugin from a menu choice.
'''
# Get the name of the plugin
menuitem = self.load_menu.FindItemById(event.GetId())
plugin = menuitem.GetText()
try:
self.plugin_handler.load_plugin(plugin)
except:
outp = StringIO.StringIO()
traceback.print_exc(200, outp)
tbtext = outp.getvalue()
outp.close()
ShowErrorDialog(self.parent, 'Can NOT load plugin ' + plugin\
+ '\nPython traceback below:\n\n' + tbtext)
else:
self.RegisterPlugin(plugin)
#if event:
# # Do not forget - pass the event on
# event.Skip()
def UnLoadPlugin(self, event):
'''UnLoadPlugin(self, event) --> None
UnLoads (removes) a plugin module.
'''
menuitem = self.unload_menu.FindItemById(event.GetId())
plugin = menuitem.GetText()
try:
self.plugin_handler.unload_plugin(plugin)
except Exception, e:
outp = StringIO.StringIO()
traceback.print_exc(200, outp)
tbtext = outp.getvalue()
outp.close()
ShowErrorDialog(self.parent, 'Can NOT unload plugin object'+ \
plugin + '\nPython traceback below:\n\n' + tbtext)
else:
# Remove the item from the list
self.unload_menu.DeleteItem(menuitem)
# Update the available plugins
self.update_plugins()
#if event:
# # Do not forget - pass the event on
# event.Skip()
def OnNewModel(self, event):
'''OnNewModel(self, event) --> None
Runs plugin code when the user tries to load a new model
'''
for name in self.plugin_handler.loaded_plugins:
self.plugin_handler.loaded_plugins[name].OnNewModel(event)
#if event:
# # Do not forget - pass the event on
# event.Skip()
def OnDataChanged(self, event):
'''OnNewModel(self, event) --> None
Runs plugin code when the user tries to load new data
'''
for name in self.plugin_handler.loaded_plugins:
self.plugin_handler.loaded_plugins[name].OnDataChanged(event)
#if event:
# # Do not forget - pass the event on
# event.Skip()
def OnOpenModel(self, event):
'''OnOpenModel(self, event) --> None
Runs plugin code when the user tries to open a model
'''
for name in self.plugin_handler.loaded_plugins:
self.plugin_handler.loaded_plugins[name].OnOpenModel(event)
self.LoadDefaultPlugins()
#if event:
# # Do not forget - pass the event on
# event.Skip()
def OnSimulate(self, event):
'''OnOpenModel(self, event) --> None
Runs plugin code when the user has simulated the model
'''
for name in self.plugin_handler.loaded_plugins:
self.plugin_handler.loaded_plugins[name].OnSimulate(event)
#if event:
# # Do not forget - pass the event on
# event.Skip()
def OnFittingUpdate(self, event):
'''OnOpenModel(self, event) --> None
Runs plugin code when the Fitting algorithm wants to update its output.
'''
for name in self.plugin_handler.loaded_plugins:
self.plugin_handler.loaded_plugins[name].OnFittingUpdate(event)
#if event:
# # Do not forget - pass the event on
# event.Skip()
#==============================================================================
# Utility Dialog functions..
def ShowInfoDialog(frame, message):
dlg = wx.MessageDialog(frame, message,
'Information',
wx.OK | wx.ICON_INFORMATION
)
dlg.ShowModal()
dlg.Destroy()
def ShowErrorDialog(frame, message, position = ''):
dlg = wx.MessageDialog(frame, message,
'ERROR',
wx.OK | wx.ICON_ERROR
)
dlg.ShowModal()
dlg.Destroy()
def ShowWarningDialog(frame, message):
dlg = wx.MessageDialog(frame, message, 'Warning',
wx.OK | wx.ICON_ERROR
)
dlg.ShowModal()
dlg.Destroy()
def ShowQuestionDialog(frame, message, title = 'Question'):
dlg = wx.MessageDialog(frame, message,
title,
wx.YES_NO | wx.ICON_QUESTION
)
result = dlg.ShowModal() == wx.ID_YES
dlg.Destroy()
return result
|
joshp123/genx
|
plugins/add_on_framework.py
|
Python
|
gpl-3.0
| 16,107
|
#!/usr/bin/env python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import math
import random
import itertools
import scipy.misc
import os
import cv2 as cv
import numpy as np
import tensorflow as tf
slim=tf.contrib.slim
import matplotlib.pyplot as plt
#%matplotlib inline #Only for Ipython
class gameOb():
def __init__(self,coordinates,size,intensity,channel,reward,name):
self.x=coordinates[0]
self.y=coordinates[1]
self.size=size
self.intensity=intensity
self.channel=channel
self.reward=reward
self.name=name
class gameEnv():
def __init__(self,size):
self.sizeX=size;
self.sizeY=size;
self.actions=4
self.objects=[]
self.window="env"
a=self.reset()
#cv.imshow(self.window,a)
#cv.waitKey(5)
#plt.imshow(a,interpolation="nearest")
def newPosition(self):
iterables=[range(self.sizeX),range(self.sizeY)]
points=[]
for t in itertools.product(*iterables):#combination
points.append(t)
currentPositions=[]
for objectA in self.objects:
if (objectA.x,objectA.y) not in currentPositions:
currentPositions.append((objectA.x,objectA.y))
for pos in currentPositions:
points.remove(pos)
#print(len(points))
location =np.random.choice(range(len(points)),replace=False)
#print("Pos: ",points[location])
return points[location]
def checkGoal(self):
others=[]
for obj in self.objects:
if obj.name=='hero':
hero=obj
else:
others.append(obj)
for other in others:
if hero.x==other.x and hero.y==other.y:
self.objects.remove(other)
if other.reward==1:
self.objects.append(gameOb(self.newPosition(),1,1,1,1,'goal'))
else:
self.objects.append(gameOb(self.newPosition(),1,1,0,-1,'fire'))
return other.reward,False
return 0.0,False
def renderEnv(self):
a=np.ones([self.sizeY+2,self.sizeX+2,3])
a[1:-1,1:-1,:]=0
hero=None
for item in self.objects:
a[item.y+1:item.y+item.size+1,item.x+1:item.x+item.size+1,item.channel]=item.intensity
b=scipy.misc.imresize(a[:,:,0],[84,84,1],interp='nearest')
c=scipy.misc.imresize(a[:,:,1],[84,84,1],interp='nearest')
d=scipy.misc.imresize(a[:,:,2],[84,84,1],interp='nearest')
a=np.stack([b,c,d],axis=2)
return a
def step(self,action):
self.moveChar(action)
reward,done=self.checkGoal()
state=self.renderEnv()
return state,reward,done
def reset(self):
self.objects=[]
hero=gameOb(self.newPosition(),1,1,2,None,'hero')
self.objects.append(hero)
goal=gameOb(self.newPosition(),1,1,1,1,"goal")
self.objects.append(goal)
hole=gameOb(self.newPosition(),1,1,0,-1,"fire")
self.objects.append(hole)
goal2=gameOb(self.newPosition(),1,1,1,1,"goal")
self.objects.append(goal2)
hole2=gameOb(self.newPosition(),1,1,0,-1,"fire")
self.objects.append(hole2)
goal3=gameOb(self.newPosition(),1,1,1,1,"goal")
self.objects.append(goal3)
goal4=gameOb(self.newPosition(),1,1,1,1,"goal")
self.objects.append(goal4)
state=self.renderEnv()
self.state=state
return state
def moveChar(self,direction):
hero=self.objects[0]
heroX=hero.x
heroY=hero.y
if direction==0 and hero.y>=1:
hero.y-=1
if direction==1 and hero.y<self.sizeY-2:
hero.y+=1
if direction==2 and hero.x>=1:
hero.x-=1
if direction==3 and hero.x<self.sizeX-2:
hero.x+=1
self.objects[0]=hero
env=gameEnv(size=5)
class Qnetwork():
def __init__(self,h_size):
self.scalarInput=tf.placeholder(shape=[None,21168],dtype=tf.float32)
self.imageIn=tf.reshape(self.scalarInput,shape=[-1,84,84,3])
self.conv1=tf.contrib.layers.convolution2d(inputs=self.imageIn,num_outputs=32,kernel_size=[8,8],stride=[4,4],padding='VALID',biases_initializer=None)
self.conv2=tf.contrib.layers.convolution2d(inputs=self.conv1,num_outputs=64,kernel_size=[4,4],stride=[2,2],padding='VALID',biases_initializer=None)
self.conv3=tf.contrib.layers.convolution2d(inputs=self.conv2,num_outputs=64,kernel_size=[3,3],stride=[1,1],padding='VALID',biases_initializer=None)
self.conv4=tf.contrib.layers.convolution2d(inputs=self.conv3,num_outputs=h_size,kernel_size=[7,7],stride=[1,1],padding='VALID',biases_initializer=None)
self.streamAC,self.streamVC = tf.split(3,2,self.conv4) #A: the value generated from action, V: the value of environment
self.streamA= tf.contrib.layers.flatten(self.streamAC)
self.streamV= tf.contrib.layers.flatten(self.streamVC)
self.AW=tf.Variable(tf.random_normal([h_size//2,env.actions],dtype=tf.float32))
self.VW=tf.Variable(tf.random_normal([h_size//2,1],dtype=tf.float32))
self.Advantage=tf.matmul(self.streamA,self.AW)
self.Value=tf.matmul(self.streamV,self.VW)
self.Qout=self.Value+tf.subtract(self.Advantage,tf.reduce_mean(self.Advantage,reduction_indices=1,keep_dims=True))
self.predict=tf.argmax(self.Qout,1)
self.targetQ=tf.placeholder(shape=[None],dtype=tf.float32)
self.actions=tf.placeholder(shape=[None],dtype=tf.int32)
self.actions_onehot=tf.one_hot(self.actions,env.actions,dtype=tf.float32)
self.Q=tf.reduce_sum(tf.multiply(self.Qout,self.actions_onehot),reduction_indices=1)
self.td_error=tf.square(self.targetQ-self.Q)
self.loss=tf.reduce_mean(self.td_error)
self.trainer=tf.train.AdamOptimizer(learning_rate=0.0001)
self.updateModel=self.trainer.minimize(self.loss)
class experience_buffer():
def __init__(self,buffer_size=50000):
self.buffer=[]
self.buffer_size=buffer_size
def add(self,experience):
if (len(self.buffer)+len(experience))>=self.buffer_size:
self.buffer[0:len(experience)+len(self.buffer)-self.buffer_size]=[]
self.buffer.extend(experience)
def sample(self,size):
return np.reshape(np.array(random.sample(self.buffer,size)),[size,5])
def processState(states):
return np.reshape(states,[21168])
def updateTargetGraph(tfVars,tau):
total_var=len(tfVars)
op_holder=[]
for idx,var in enumerate(tfVars[0:total_var//2]):
op_holder.append(
tfVars[idx+total_var//2].assign((var.value()*tau)+(tfVars[idx+total_var//2].value()*(1-tau))))
return op_holder
def updateTarget(op_holder,sess):
for op in op_holder:
sess.run(op)
batch_size=32
update_freq=4
y=.99
startE=1
endE=0.1
annealing_steps=10000.
num_episodes=10000
pre_train_steps=10000
max_epLength=50
load_model=False
path="./dqn"
h_size=512
tau=0.001
mainQN=Qnetwork(h_size)
targetQN=Qnetwork(h_size)
trainables=tf.trainable_variables()
targetOps=updateTargetGraph(trainables,tau)
myBuffer=experience_buffer()
e=startE
stepDrop=(startE-endE)/annealing_steps
rList=[]
total_step=0
saver=tf.train.Saver()
if not os.path.exists(path):
os.mkdir(path)
with tf.Session() as sess:
f=open("./Rewards.txt",'w');
if load_model==True:
print('Loading Model...')
ckpt=tf.train.get_checkpoint_state(path)
saver.restore(sess,ckpt.model_checkpoint_path)
sess.run(tf.global_variables_initializer())
updateTarget(targetOps,sess)
for i in range(num_episodes+1):
episodeBuffer=experience_buffer()
s=env.reset()
s=processState(s)
d=False
rAll=0
j=0
while j<max_epLength:
j+=1
if np.random.rand(1)<e or total_step<pre_train_steps:
a=np.random.randint(0,4)
else:
a=sess.run(mainQN.predict,feed_dict={mainQN.scalarInput: [s]})[0]
s1,r,d=env.step(a)
s1=processState(s1)
total_step+=1
episodeBuffer.add(np.reshape([s,a,r,s1,d],[1,5]))
if total_step>pre_train_steps:
if e>endE:
e-=stepDrop
if total_step%(update_freq)==0:
trainBatch=myBuffer.sample(batch_size)
A = sess.run(mainQN.predict,feed_dict={mainQN.scalarInput: np.vstack(trainBatch[:,3])})
Q = sess.run(targetQN.Qout,feed_dict={targetQN.scalarInput: np.vstack(trainBatch[:,3])})
doubleQ=Q[range(batch_size),A]
targetQ=trainBatch[:,2]+y*doubleQ
# print(A)
# print(Q)
# print("____")
# print(doubleQ)
sess.run(mainQN.updateModel,feed_dict={mainQN.scalarInput: np.vstack(trainBatch[:,0]),
mainQN.targetQ: targetQ,
mainQN.actions: trainBatch[:,1]})
updateTarget(targetOps,sess)
rAll+=r
s=s1
if d==True:
break
myBuffer.add(episodeBuffer.buffer)
rList.append(rAll)
if i>0 and i%25==0:
print('episode',i,', average reward of last 25 episode', np.mean(rList[-25:]))
f.write('%.3f\n'%np.mean(rList[-25:]))
if i>0 and i%1000==0:
saver.save(sess,path+'/model-'+str(i)+'.cptk')
print("Save Model")
f.close()
saver.save(sess,path+"/model-"+str(i)+'.cptk')
|
DrarPan/tensorflow
|
reinforcement_learning/QLearning/GridWorld.py
|
Python
|
gpl-3.0
| 8,451
|
"""
Digest information retrieved from the Weather Underground API
"""
def get_near_stations_by_position(lat, lng):
"""
Returns a clean array with all stations near the given location
"""
from .endpoints import geolookup_by_position
data = geolookup_by_position(lat, lng)
stations = [
(data['location']['country'], data['location']['city'])
]
near_stations = data['location']['nearby_weather_stations']
for k in near_stations:
for sensor in near_stations[k]['station']:
stations.append(
(sensor['country'], sensor['city'])
)
return stations
def get_conditions_by_station(stations):
"""
Get current weather conditions for all station in stations
stations should be:
stations = [
(country_code, station_name),
...
]
"""
from .endpoints import conditions
data = []
for station in stations:
d = conditions(station[0], station[1])['current_observation']
data.append({
'station_id' : d['station_id'],
'x' : d['display_location']['longitude'],
'y' : d['display_location']['latitude'],
'z' : d['display_location']['elevation'],
'obs_time' : d['observation_time'],
'temp' : d['temp_c'],
'humidity_relative' : d['relative_humidity'],
'wind_dir' : d['wind_dir'],
'wind_degrees' : d['wind_degrees'],
'wind_mph' : d['wind_mph'],
'wind_kph' : d['wind_kph'],
'pressure_mb' : d['pressure_mb'],
'pressure_in' : d['pressure_in'],
'pressure_trend' : d['pressure_trend'],
"visibility_km" : d['visibility_km'],
"solarradiation" : d['solarradiation'],
"precip_1hr_in" : d['precip_1hr_in'],
"precip_1hr_metric" : d['precip_1hr_metric'],
"precip_today_string" : d['precip_today_string'],
"precip_today_in" : d['precip_today_in'],
"precip_today_metric" : d['precip_today_metric']
})
return data
def conditions_to_shp(lat, lng, shpOut):
"""
Get conditions for the near stations and record them in a shapefile
"""
import os
from gasp.toshp.gdal import array_to_shp
stations = get_near_stations_by_position(lat, lng)
stat_conditions = get_conditions_by_station(stations)
array_to_shp(stat_conditions, shpOut)
return shpOut
|
JoaquimPatriarca/senpy-for-gis
|
gasp/fromapi/weather/digest.py
|
Python
|
gpl-3.0
| 2,733
|
from nose.tools import *
from virtuback import app
from virtuback import db
from flask import json
from hashlib import sha256
class SetupAPI:
def setUp(self):
print('SETUP!')
app.config['TESTING'] = True
self.client = app.test_client()
self.db = db._client.virtuback_test.users
self.db.insert({
'_id': 1,
'name': 'Vilhelm von Ehrenheim',
'email': 'vonehrenheim@gmail.com',
'password': sha256(b'abc123').hexdigest()
})
self.db.insert({
'_id': 2,
'name': 'Tester Testsson',
'email': 'test@test.com',
'password': sha256(b'qwerty').hexdigest()
})
def tearDown(self):
print('TEAR DOWN!')
self.db.remove()
class TestGET(SetupAPI):
def test_404(self):
res = self.client.get('/api/v1.0/nevertobefound')
assert res.status == "404 NOT FOUND"
def test_get_user_not_there(self):
res = self.client.get('/api/v1.0/user/0')
assert res.status == "404 NOT FOUND"
def test_get_users(self):
res = self.client.get('/api/v1.0/users')
data = json.loads(res.data)
assert len(data['users']) == 2
assert data['users'][0]['name'] == 'Vilhelm von Ehrenheim'
def test_get_user_ok(self):
res = self.client.get('/api/v1.0/user/1')
print(res.data)
data = json.loads(res.data)['user']
assert data['id'] == 1
assert data['name'] == 'Vilhelm von Ehrenheim'
assert data['email'] == 'vonehrenheim@gmail.com'
class TestDELETE(SetupAPI):
def test_delete_user_not_there(self):
res = self.client.delete('/api/v1.0/user/0')
assert res.status == "404 NOT FOUND"
def test_delete_user_ok(self):
# Add a user we can remove
res = self.client.post('/api/v1.0/users', data="""{
"name": "Tmp",
"email": "tmp@test.com",
"password": "thisisavalidpass"
}""", content_type='application/json')
data = json.loads(res.data)['user']
res = self.client.delete('/api/v1.0/user/' + str(data['id']))
assert res.status == "200 OK"
res = self.client.get('/api/v1.0/user/' + str(data['id']))
assert res.status == "404 NOT FOUND"
|
while/virtuback
|
tests/virtuback_tests.py
|
Python
|
gpl-3.0
| 2,340
|
import sys
from settings import *
# Used in an additional process!
class Information:
@staticmethod
def battle(player_party, enemy_party):
sys.stdout.flush()
box_width = GUI_ITEM_WIDTH
gui_width = GUI_WIDTH - 2 * box_width
rwidth = box_width + gui_width
def info_row(*characters, right=False):
print_two = False
if len(characters) == 2:
print_two = True
#name_length = len(character.name)
border = "#" * box_width
tmp = "# {} #"
fmt_width = box_width-(len(tmp)-2)
name = []
health = []
level = []
exp = []
for character in characters:
name.append(tmp.format(character.name.center(fmt_width)))
health.append(tmp.format(f"HP: {int(character.hp)}/{int(character.max_hp)}".center(fmt_width)))
level.append(tmp.format(f"Level: {character.level}".center(fmt_width)))
try:
exp.append(tmp.format(f"EXP: {character.exp}".center(fmt_width)))
except (AttributeError, NotImplementedError):
exp.append(tmp.format(f"EXP: NaN".center(fmt_width)))
if not print_two:
if not right:
return f"{border}\n{name[0]}\n{health[0]}\n{level[0]}\n{exp[0]}\n{border}"
else:
return f"{' '*rwidth}{border}\n{' '*rwidth}{name[0]}\n{' '*rwidth}{health[0]}\n{' '*rwidth}{level[0]}\n{' '*rwidth}{exp[0]}\n{' '*rwidth}{border}"
else:
return f"{border}{' '*gui_width}{border}\n{name[0]}{' '*gui_width}{name[1]}\n{health[0]}{' '*gui_width}{health[1]}\n{level[0]}{' '*gui_width}{level[1]}\n{exp[0]}{' '*gui_width}{exp[1]}\n{border}{' '*gui_width}{border}"
chars = list(sum(zip(player_party, enemy_party+[0]), ())[:-1])
output = [f"\n{'Player party'.center(box_width)}{' '*gui_width}{'Enemy party'.center(box_width)}"]
min_length = min(len(player_party), len(enemy_party))
for i in range(min_length):
output.append(info_row(player_party[i], enemy_party[i]))
if min_length < len(enemy_party):
for i in enemy_party[min_length-1:]:
output.append(info_row(i, right=True))
elif min_length < len(player_party):
for i in player_party[min_length-1:]:
output.append(info_row(i))
with open('temp.gui', 'w') as f:
f.write("\n".join(output))
|
Diapolo10/text_rpg
|
src/gui.py
|
Python
|
gpl-3.0
| 2,632
|
# -*- encoding: utf-8 -*-
import random
import string
from django.contrib.auth.models import User, Group, Permission
from django.core.management.base import BaseCommand, CommandError
import click
from event.models import *
class Command(BaseCommand):
help = 'Creates new Make Things event'
def create_group(self):
group = Group.objects.create(name='Organizers')
permissions = [
'add_faq', 'change_faq', 'delete_faq',
'add_sponsor', 'change_sponsor', 'delete_sponsor',
'change_user',
'change_website',
'add_workshop', 'change_workshop', 'delete_workshop',
'add_workshopleader', 'change_workshopleader', 'delete_workshopleader'
]
for permission in permissions:
perm_obj = Permission.objects.get(codename=permission)
group.permissions.add(perm_obj)
group.save()
return group
def handle(self, *args, **options):
#Basics
click.echo("Hello sir or madam! My name is Verynicebot and I'm here to help you create your new Make Things event. So exciting!")
click.echo("Let's start with some basics.")
city = click.prompt("What is the name of the city?")
country = click.prompt("What is the name of the country?")
date = click.prompt("What is the date of the event? (Format: YYYY-MM-DD)")
url = click.prompt("What should be the URL of website? makethings.io/xxxx")
click.echo(u"Ok, got that! Your new event will happen in {0}, {1} on {2}".format(city, country, date))
#Main organizer
team = []
click.echo("Now let's talk about the team. First the main organizer:")
main_name = click.prompt("First and last name")
main_email = click.prompt("E-mail address")
try:
team.append({'first_name': main_name.split(' ')[0], 'last_name': main_name.split(' ')[1], 'email': main_email})
except IndexError:
team.append({'first_name': main_name, 'last_name': '', 'email': main_email})
click.echo(u"All right, the main organizer of Make Things in {0} is {1} ({2})".format(city, main_name, main_email))
#Team
add_team = click.prompt("Do you want to add additional team members? y/n")
i = 1
while add_team != 'n':
i += 1
name = click.prompt("First and last name of #{0} member".format(i))
email = click.prompt("E-mail address of #{0} member".format(i))
if len(name) > 0:
try:
team.append({'first_name': name.split(' ')[0], 'last_name': name.split(' ')[1], 'email': email})
except IndexError:
team.append({'first_name': main_name, 'last_name': '', 'email': main_email})
click.echo(u"All right, the #{0} team member of Make Things in {1} is {2} ({3})".format(i, city, name, email))
add_team = click.prompt("Do you want to add additional team members? y/n")
#Save data
click.echo("OK! That's it. Now I'll create your event.")
click.echo("Here is an access info for team members:")
main_organizer = None
members = []
for member in team:
member['password'] = ''.join(random.choice(string.ascii_lowercase + string.digits) for _ in range(8))
user = User.objects.create(email=member['email'],
first_name=member['first_name'],
last_name=member['last_name'],
is_active=True,
is_staff=True)
user.set_password(member['password'])
user.save()
try:
group = Group.objects.get(name='Organizers')
except:
group = self.create_group()
group.user_set.add(user)
if not main_organizer:
main_organizer = user
members.append(user)
click.echo(u"{0} - email: {1} password: {2}".format(member['first_name'], member['email'], member['password']))
event = Event.objects.create(city=city, country=country, main_organizer=main_organizer)
website = Website.objects.create(event=event, url=url, date=date, status=0, about_title=u"Make Things in {0}".format(city), organizers_title=u"Make Things in {0} is organized by".format(city))
for member in members:
website.team.add(member)
member.event = event
member.save()
click.echo(u"Website is ready here: http://makethings.io/{0}".format(url))
click.echo("Congrats on yet another event!")
|
Makerland/makethings.io
|
core/management/commands/newevent.py
|
Python
|
gpl-3.0
| 4,726
|
import webapp2
import os
import math
import logging
from Analyzer import Analyzer
from Measurement import Measurement
from CallAgent import CallAgent
from google.appengine.api import users
from google.appengine.ext.webapp import template
from google.appengine.ext import ndb
"""
This class handles the GET from /analyze
It calls its delegate 'Analyzer' which processes and renders the
data in the Datastore.
"""
class AnalyzeHandler(webapp2.RequestHandler):
def get(self):
user = users.get_current_user()
if user:
analyzer = Analyzer()
analyzer.get(self)
else:
self.redirect(users.create_login_url(self.request.uri))
"""
This class handles the POST request from the board
"""
class MeasurementHandler(webapp2.RequestHandler):
def post(self):
measure = Measurement()
measure.brightness = self.request.get('bright')
measure.temperature = self.request.get('temp')
measure.put()
"""
This class handles requests from Twilio. We don't care
if there's a post or get request.
"""
class CallAgentHandler(webapp2.RequestHandler):
def get(self):
agent = CallAgent()
agent.get(self)
def post(self):
agent = CallAgent()
agent.post(self)
"""
This class renders the standard web page at "/"
Replace this with appropriate content
"""
class MainPage(webapp2.RequestHandler):
def get(self):
user = users.get_current_user()
if user:
self.response.headers['Content-Type'] = 'text/plain'
self.response.write('Hello, World!')
else:
self.redirect(users.create_login_url(self.request.uri))
application = webapp2.WSGIApplication([
('/', MainPage),
('/analyze', AnalyzeHandler),
('/measure', MeasurementHandler),
('/tell', CallAgentHandler)
], debug=True)
def main():
logging.getLogger().setLevel(logging.DEBUG)
wabapp.util.run_wsgi_app(application)
if __name__ == '__main__':
main()
|
anawas/wasserblog
|
main.py
|
Python
|
gpl-3.0
| 1,832
|
import re
from dis_snek.client import Snake
from dis_snek.models import Scale, slash_command
from discordbot import command
from discordbot.command import MtgContext
from magic import image_fetcher
from magic.models import Card
class Art(Scale):
@slash_command('art')
@command.slash_card_option()
async def art(self, ctx: MtgContext, card: Card) -> None:
"""Display the artwork of the requested card."""
if card is not None:
file_path = re.sub('.jpg$', '.art_crop.jpg', image_fetcher.determine_filepath([card]))
success = await image_fetcher.download_scryfall_card_image(card, file_path, version='art_crop')
if success:
await ctx.send_image_with_retry(file_path)
else:
await ctx.send('{author}: Could not get image.'.format(author=ctx.author.mention))
art.autocomplete('card')(command.autocomplete_card)
m_art = command.alias_message_command_to_slash_command(art)
def setup(bot: Snake) -> None:
Art(bot)
|
PennyDreadfulMTG/Penny-Dreadful-Tools
|
discordbot/commands/art.py
|
Python
|
gpl-3.0
| 1,032
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Nov 8 21:52:39 2020
@author: piccinini
"""
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
x = np.loadtxt('eigvalues_heterog.txt')
eig = x[1:,0]
psis = x[1:,1:].T
r = (psis/eig).T
IP = 1e5/np.cumsum(r, axis=0)
# plt.semilogy(IP,'o', mfc='None', ms=3)
plt.plot(np.mean(IP, axis=1),'ko', mfc='None', ms=2)
plt.grid(1)
|
rbpiccinini/codes
|
eigendrake/output/IP.py
|
Python
|
gpl-3.0
| 412
|
# -*- coding: utf-8 -*-
#
# Copyright: (c) 2018, F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import sys
from nose.plugins.skip import SkipTest
if sys.version_info < (2, 7):
raise SkipTest("F5 Ansible modules require Python >= 2.7")
from ansible.module_utils.basic import AnsibleModule
try:
from library.modules.bigip_dns_nameserver import ApiParameters
from library.modules.bigip_dns_nameserver import ModuleParameters
from library.modules.bigip_dns_nameserver import ModuleManager
from library.modules.bigip_dns_nameserver import ArgumentSpec
# In Ansible 2.8, Ansible changed import paths.
from test.units.compat import unittest
from test.units.compat.mock import Mock
from test.units.compat.mock import patch
from test.units.modules.utils import set_module_args
except ImportError:
try:
from ansible.modules.network.f5.bigip_dns_nameserver import ApiParameters
from ansible.modules.network.f5.bigip_dns_nameserver import ModuleParameters
from ansible.modules.network.f5.bigip_dns_nameserver import ModuleManager
from ansible.modules.network.f5.bigip_dns_nameserver import ArgumentSpec
# Ansible 2.8 imports
from units.compat import unittest
from units.compat.mock import Mock
from units.compat.mock import patch
from units.modules.utils import set_module_args
except ImportError:
raise SkipTest("F5 Ansible modules require the f5-sdk Python library")
fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures')
fixture_data = {}
def load_fixture(name):
path = os.path.join(fixture_path, name)
if path in fixture_data:
return fixture_data[path]
with open(path) as f:
data = f.read()
try:
data = json.loads(data)
except Exception:
pass
fixture_data[path] = data
return data
class TestParameters(unittest.TestCase):
def test_module_parameters(self):
args = dict(
name='foo',
address='10.10.10.10',
service_port=80,
route_domain=20,
tsig_key='key1',
partition='Common'
)
p = ModuleParameters(params=args)
assert p.name == 'foo'
assert p.address == '10.10.10.10'
assert p.service_port == 80
assert p.route_domain == '/Common/20'
assert p.tsig_key == '/Common/key1'
def test_api_parameters(self):
args = load_fixture('load_ltm_dns_nameserver_1.json')
p = ApiParameters(params=args)
assert p.name == 'foo'
assert p.address == '127.0.0.1'
assert p.service_port == 53
assert p.route_domain == '/Common/0'
assert p.tsig_key == '/Common/key1'
@patch('ansible.module_utils.f5_utils.AnsibleF5Client._get_mgmt_root',
return_value=True)
class TestManager(unittest.TestCase):
def setUp(self):
self.spec = ArgumentSpec()
def test_create_monitor(self, *args):
set_module_args(dict(
name='foo',
address='10.10.10.10',
service_port=80,
route_domain=20,
tsig_key='key1',
partition='Common',
server='localhost',
password='password',
user='admin'
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
# Override methods in the specific type of manager
mm = ModuleManager(module=module)
mm.exists = Mock(side_effect=[False, True])
mm.create_on_device = Mock(return_value=True)
results = mm.exec_module()
assert results['changed'] is True
|
yfried/ansible
|
test/units/modules/network/f5/test_bigip_dns_nameserver.py
|
Python
|
gpl-3.0
| 3,915
|
#Copyright 2011 Dan Klinedinst
#
#This file is part of Gibson.
#
#Gibson is free software: you can redistribute it and/or modify it
#under the terms of the GNU General Public License as published by the
#Free Software Foundation, either version 3 of the License, or any
#later version.
#Gibson is distributed in the hope that it will be useful, but WITHOUT
#ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
#FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
#for more details.
#
#You should have received a copy of the GNU General Public License
#along with Gibson. If not, see <http://www.gnu.org/licenses/>.
from gibson import parse_nmap
from gibson import threedee_math
from gibson.slugs import network
from gibson.views import build
from gibson.ui import keyboard_events
from gibson.ui import gui
from gibson.views import build_subnet
from gibson import config
from gibson.views import node
from gibson.views import adhoc
from gibson.views import blades
from direct.showbase.ShowBase import ShowBase
from direct.task import Task
from pandac.PandaModules import *
from direct.gui.OnscreenText import OnscreenText
from direct.gui.OnscreenImage import OnscreenImage
from direct.interval.IntervalGlobal import Sequence
from panda3d.core import Point3, Fog
from direct.showbase import DirectObject
import netaddr
import time
from math import pi, sin, cos
import sys
import xml.sax
from xml.sax.handler import feature_namespaces, ContentHandler
import string
import os
import socket
import re
from optparse import OptionParser
import traceback
from gibson import *
class Panda(ShowBase):
def __init__(self, options):
ShowBase.__init__(self)
self.options = options
if self.options.configfile:
conf_file = self.options.configfile
else:
conf_file = 'None'
configuration = config.ConfigFile(conf_file)
self.slug_speed = int(configuration.slug_speed())
self.slug_timeout = int(configuration.slug_timeout())
self.new_node_counter = 0
self.slugs = {}
self.lasts = {}
self.view = "hybrid"
self.hybridview = NodePath("hybridview")
self.hybridview.reparentTo(render)
self.subnetview = NodePath("subnetview")
self.subnetview.reparentTo(render)
self.subnetview.hide()
self.nodeview = NodePath("nodeview")
self.nodeview.reparentTo(render)
self.nodeview.hide()
self.bladeview = NodePath("bladeview")
self.bladeview.reparentTo(render)
self.bladeview.hide()
self.dummy_center_node = render.attachNewNode("dummy_center_node")
self.dummy_center_node.setPos(0, 0, 0)
self.camera.reparentTo(self.dummy_center_node)
if configuration.bg_color():
bg_color = configuration.bg_color()
colors = bg_color.split(',')
bg_color = (float(colors[0]), float(colors[1]), float(colors[2]))
else:
bg_color = (0.69,0.77,0.88)
self.setBackgroundColor(bg_color)
if configuration.skyshape():
try:
skybox_model = getPath("model", configuration.skyshape())
except:
traceback.print_exc()
skybox_model = None
if not skybox_model:
skybox_model = getPath("model", "skybox.egg")
try:
self.skybox = self.loader.loadModel(skybox_model)
#self.skybox.setScale(500) # make big enough to cover whole terrain, else there'll be problems with the water reflections
#self.skybox.reparentTo(render)
#self.camLens.setFar(70)
except:
traceback.print_exc()
print "Skybox Model not found"
if configuration.skybox_texture():
try:
texture = getPath("image", configuration.skybox_texture())
except:
texture = None
if not texture:
texture= getPath("image", "sky.jpg")
try:
self.skyboxTexture = self.loader.loadTexture(texture)
except:
print "Skybox texture not found."
#self.skyboxTexture.setWrapU(Texture.WMRepeat)
#self.skyboxTexture.setWrapV(Texture.WMRepeat)
self.skybox.reparentTo(self.cam)
#self.skybox.setScale(10)
self.skybox.setScale(.01)
self.skybox.setX(1.4)
self.skybox.setCompass()
self.camLens.setFar(500)
#myFog = Fog("Fog Name")
#myFog.setColor(.8,.8,.8)
#myFog.setExpDensity(.01)
#myFog.setLinearRange(125,500)
#myFog.setLinearFallback(4,160,320)
#self.cam.attachNewNode(myFog)
#render.setFog(myFog)
#print self.skybox.getTightBounds()
self.skybox.setTexture(self.skyboxTexture, 1)
#self.skybox.setH(60)
self.skybox.setBin('background', 1)
self.skybox.setDepthWrite(0)
self.skybox.setLightOff()
self.skybox.setCollideMask(BitMask32.allOff())
self.disableMouse()
self.useDrive()
base.drive.node().setIgnoreMouse(1)
base.drive.node().setPos(-10, -160, 9)
base.drive.node().setHpr(340, 0, 0)
plight = DirectionalLight('my plight')
plnp = self.hybridview.attachNewNode(plight)
plnp.setHpr(310, 0 ,30)
self.hybridview.setLight(plnp)
alight = AmbientLight('alight')
alight.setColor(VBase4(0.4, 0.4, 0.4, 1))
alnp = self.hybridview.attachNewNode(alight)
self.hybridview.setLight(alnp)
#keys = keyboard_events.KeyboardEvents(base.drive.node(), self)
#self.model = build.BuildModel(self.options.configfile)
#self.model.map_servers(self, parse_nmap.networkMap)
#self.taskMgr.add(self.followCameraTask, "FollowCameraTask")
#self.low_x = {}
#self.high_x = {}
# Get Mouse Clicks
self.myHandler = CollisionHandlerQueue()
self.myTraverser = CollisionTraverser()
self.myTraverser.setRespectPrevTransform(True)
# Uncomment following line to make collisions (mouse clicks) visible on screen
#self.myTraverser.showCollisions(render)
pickerNode = CollisionNode('mouseRay')
pickerNP = camera.attachNewNode(pickerNode)
pickerNode.setFromCollideMask(GeomNode.getDefaultCollideMask())
self.pickerRay = CollisionRay()
pickerNode.addSolid(self.pickerRay)
self.myTraverser.addCollider(pickerNP, self.myHandler)
# Receive events
self.cManager = QueuedConnectionManager()
self.cReader = QueuedConnectionReader(self.cManager, 0)
self.cWriter = ConnectionWriter(self.cManager,0)
activeConnections=[]
udpSocket = self.cManager.openUDPConnection(1723)
self.cReader.addConnection(udpSocket)
self.taskMgr.add(self.tskReaderPolling,"Poll the connection reader",-40)
self.taskMgr.add(self.tskDestroySlugs, "Destroy slugs whose time has expired", 1)
# Create GUI and switch modes / views
keys = keyboard_events.KeyboardEvents(base.drive.node(), self)
# Build the hybrid view (default) model
if configuration.autobuild() == "true":
self.model = build.BuildModel(self.options.configfile)
self.model.map_servers(self, parse_nmap.networkMap)
self.model.map_routers(self.options.configfile)
#self.taskMgr.add(self.followCameraTask, "FollowCameraTask")
m = MouseClick()
interface = gui.KeyboardEvents(keys, self.model, base.drive.node(), self)
# Build the subnet view model
self.subnet = build_subnet.BuildSubnetModel(base.drive.node(), self, conf_file)
#self.hybridview.hide()
#blade = blades.BladeView(self)
def tskReaderPolling(self,taskdata):
if self.cReader.dataAvailable():
datagram=NetDatagram()
if self.cReader.getData(datagram):
data = datagram.getMessage().split("|")
id_builder = (data[1], data[3], data[4], data[5], data[6])
id = "".join(id_builder)
for i in self.model.master_zone_array:
for j in i:
if self.is_member_subnet(data[3], j.split()):
if data[3] not in self.model.servers:
#print data[3] + "is in" + j
adhoc.NewServer(data[3], self)
if self.is_member_subnet(data[5], j.split()):
if data[5] not in self.model.servers:
#print data[5] + "is in" + j
adhoc.NewServer(data[5], self)
try:
self.slugs[id] = network.createSlug(self, data, self.subnet)
except:
traceback.print_exc()
if self.view == "node" and self.single_node.IP == data[3]:
node_event = node.NodeEvent(data, self)
return Task.cont
def tskDestroySlugs(self, taskdata):
for slug in self.slugs.itervalues():
try:
if time.time() - float(slug.node.getTag("createTime")) > self.slug_timeout:
slug.node.removeNode()
except:
pass
return Task.cont
def followCameraTask(self, task):
#visible_range = {}
#for k, v in self.model.servers.iteritems():
# v.show()
return task.cont
def moveSlugsTask(self, task):
#for k, v in self.slugs.iteritems():
# if self.view == "hybrid":
# self.slugs[k].pingpong.loop()
return Task.cont
def find_high_x(self, k, coords, visible_range, task):
# These aren't currently used because, well, they don't work
for i in self.model.subnet_list:
if self.model.is_member_subnet(k, i.split()):
if coords[0] < visible_range[i][0]:
visible_range[i] = (coords[0], visible_range[i][1])
if coords[0] > visible_range[i][1]:
visible_range[i] = (visible_range[i][0], coords[0])
for i in self.model.private_net_list:
if self.model.is_member_subnet(k, i.split()):
if coords[0] < visible_range[i][0]:
visible_range[i] = (coords[0], visible_range[i][1])
if coords[0] > visible_range[i][1]:
visible_range[i] = (visible_range[i][0], coords[0])
def find_low_x(self, k, coords, visible_range, task):
# Ditto
for i in self.model.subnet_list:
if self.model.is_member_subnet(k, i.split()):
if coords[0] > visible_range[i][0]:
visible_range[i] = (coords[0], visible_range[i][1])
if coords[0] < visible_range[i][1]:
visible_range[i] = (visible_range[i][0], coords[0])
for i in self.model.private_net_list:
if self.model.is_member_subnet(k, i.split()):
if coords[0] > visible_range[i][0]:
visible_range[i] = (coords[0], visible_range[i][1])
if coords[0] < visible_range[i][1]:
visible_range[i] = (visible_range[i][0], coords[0])
def objectClicked(self):
mpos = base.mouseWatcherNode.getMouse()
self.pickerRay.setFromLens(base.camNode, mpos.getX(), mpos.getY())
self.myTraverser.traverse(render)
# Assume for simplicity's sake that myHandler is a CollisionHandlerQueue.
if self.myHandler.getNumEntries() > 0:
for i in range(self.myHandler.getNumEntries()):
entry = self.myHandler.getEntry(i)
self.myHandler.sortEntries()
pickedObj = self.myHandler.getEntry(0).getIntoNodePath()
obj_id = pickedObj.getNetTag('myObjectTag')
print obj_id
try:
pickedObj2 = self.myHandler.getEntry(1).getIntoNodePath()
obj_id2 = pickedObj2.getNetTag('myObjectTag')
#print obj_id2
except:
pass
try:
pickedObj3 = self.myHandler.getEntry(2).getIntoNodePath()
obj_id3 = pickedObj3.getNetTag('myObjectTag')
#print obj_id3
except:
pass
pickedObj = pickedObj.findNetTag('myObjectTag')
if not pickedObj.isEmpty():
if obj_id == "PopUp":
print "Pop Up"
if pickedObj.getAncestor(1).getNetTag("type") == "Tunnel":
pickedObj.removeNode()
else:
pickedObj.removeNode()
elif obj_id == "ServerPopUp":
pickedObj.removeNode()
elif (re.search("^[0-9]*\.[0-9]*\.[0-9]*\.[0-9]*$", str(obj_id))):
self.findClickedServer(pickedObj, str(obj_id))
else:
self.findClickedSlug(pickedObj, str(obj_id))
def objectRightClicked(self):
mpos = base.mouseWatcherNode.getMouse()
self.pickerRay.setFromLens(base.camNode, mpos.getX(), mpos.getY())
self.myTraverser.traverse(render)
# Assume for simplicity's sake that myHandler is a CollisionHandlerQueue.
if self.myHandler.getNumEntries() > 0:
for i in range(self.myHandler.getNumEntries()):
entry = self.myHandler.getEntry(i)
self.myHandler.sortEntries()
pickedObj = self.myHandler.getEntry(0).getIntoNodePath()
obj_id = pickedObj.getNetTag('myObjectTag')
pickedObj = pickedObj.findNetTag('myObjectTag')
if not pickedObj.isEmpty():
if (re.search("^[0-9]*\.[0-9]*\.[0-9]*\.[0-9]*$", str(obj_id))):
self.goToNodeView(pickedObj, str(obj_id))
def findClickedSlug(self, slug, slug_id):
for i in self.slugs.itervalues():
try:
match = ":".join(i.data[1:5])
abc = i.node
except:
continue
if (match == slug_id):
info = TextNode(str(slug_id))
text = "\n".join(i.data[1:])
info.clearTextColor()
info.setText(text)
info.setCardAsMargin(0, 0, 0.5, 0)
info.setCardColor(1.0, 1.0, 1.0, 0.7)
info.setTextColor(1.0, 0.0, 0.0, 1.0)
info.setFrameAsMargin(0, 0, 0.5, 0)
info.setFrameColor(0.0, 0.0, 0.0, .9)
info.setCardDecal(True)
clickable = info.generate()
self.popup = self.hybridview.attachNewNode(clickable)
self.popup.reparentTo(i.node)
self.popup.setH(270)
#self.popup.setScale(0.25)
x, y, z = i.node.getPos()
#self.popup.setPos(-3, 3, 3)
self.popup.setTag('myObjectTag', 'PopUp')
self.popup.setLightOff()
if i.node.getNetTag('type') == "Tunnel":
self.popup.setPos(0.5, -1, 2)
self.popup.setScale(0.025, 0.05, 0.167)
self.popup.setColorScale(0, 0, 0, 0.9)
#self.popup.setBillboardAxis()
self.popup.setH(self.camera, 150)
self.popup.setCompass(self.camera)
else:
self.popup.setScale(0.10)
self.popup.setPos(3, 5, 1)
self.popup.setH(270)
#self.popup.setH(self.camera, 150)
#self.popup.setCompass(self.camera)
#i.node.setScale(1)
def findClickedServer(self, server, IP):
info = TextNode(IP)
try:
hostname = socket.gethostbyaddr(IP)[0]
except socket.herror:
hostname = "Unknown"
os = parse_nmap.networkMap[IP].osclass
text = hostname[:8] + "\n" + IP + "\n" + os
for i in parse_nmap.networkMap[IP].services:
text += "\n" + str(i[0]) + "/" + str(i[1])
print text
info.setText(text)
info.setCardAsMargin(0, 0, 0.5, 0)
info.setCardColor(1.0, 1.0, 1.0, 0.7)
info.setTextColor(0.0, 0.0, 0.0, 1.0)
info.setFrameAsMargin(0, 0, 0.5, 0)
info.setFrameColor(0.0, 0.0, 0.0, .9)
info.setCardDecal(True)
clickable = info.generate()
self.popup = self.hybridview.attachNewNode(clickable)
self.popup.reparentTo(server)
#self.popup.setH(270)
#self.popup.setScale(0.5)
self.popup.setPos(-3, -5, 0)
self.popup.setTag('myObjectTag', 'ServerPopUp')
self.popup.setLightOff()
def goToNodeView(self, original_object, IP):
self.single_node = node.NodeView(self, IP, parse_nmap.networkMap[IP].services)
self.view = "node"
self.hybridview.hide()
self.subnetview.hide()
self.nodeview.show()
def is_member_subnet(self, IP, subnets):
try:
if netaddr.all_matching_cidrs(IP, subnets):
return True
except:
return False
class MouseClick(DirectObject.DirectObject):
def __init__(self):
self.accept('mouse1', self.leftClick)
self.accept('space', self.leftClick)
self.accept('mouse3', self.rightClick)
def leftClick(self):
scene.objectClicked()
def rightClick(self):
scene.objectRightClicked()
|
robscetury/gibson
|
lib/gibson/programs/network.py
|
Python
|
gpl-3.0
| 17,688
|
# -*- coding: utf-8 -*-
"""
Registrar
~~~~~
copyright: (c) 2014 by Halfmoon Labs, Inc.
copyright: (c) 2015 by Blockstack.org
This file is part of Registrar.
Registrar is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Registrar is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Registrar. If not, see <http://www.gnu.org/licenses/>.
"""
try:
from config_local import *
except Exception as e:
print e
print "using default config"
import os
DEBUG = True
# --------------------------------------------------
NAMECOIND_READONLY = False
NAMECOIND_USE_HTTPS = True
NAMECOIND_PORT = os.environ['NAMECOIND_PORT']
NAMECOIND_SERVER = os.environ['NAMECOIND_SERVER']
NAMECOIND_USER = os.environ['NAMECOIND_USER']
NAMECOIND_PASSWD = os.environ['NAMECOIND_PASSWD']
#WALLET_PASSPHRASE = os.environ['WALLET_PASSPHRASE']
#--------------------------------------------------
MONGODB_URI = os.environ['MONGODB_URI']
OLD_DB = os.environ['OLD_DB']
AWSDB_URI = os.environ['AWSDB_URI']
MONGOLAB_URI = os.environ['MONGOLAB_URI']
DEFAULT_HOST = '127.0.0.1'
MEMCACHED_PORT = '11211'
MEMCACHED_TIMEOUT = 15 * 60
FRONTEND_SECRET = os.environ['FRONTEND_SECRET']
|
john-light/registrar
|
registrar/config.py
|
Python
|
gpl-3.0
| 1,715
|
#!/usr/bin/python3
# main.py
#
# Copyright (C) 2015 Yaroslav Strukevich <ah2.two.2o@gmal.com>
#
# This file is part of User Friendly Linux Package Manager.
#
# User Friendly Linux Package Manager is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# User Friendly Linux Package Manager is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with User Friendly Linux Package Manager. If not, see <http://www.gnu.org/licenses/>.
import sys
import os
import subprocess
import re
import hashlib
def install(package, deep, version):
return 0
def update(package, deep):
if (version > package_version):
install(package, deep, version)
return 0
def purge(package):
return 0
def sync():
return 0
def sync_overlay():
return 0
def check_config():
return 0
def find():
popen = subprocess.Popen(['find', '$UFLD', '$UFLOD', '-name', 'python*', '-depth'], stdout=subprocess.PIPE, shell=True)
out, err = popen.communicate()
out = str(out)
out = re.sub("b\'|\'", "", out)
if len(out) < 3:
popen = subprocess.Popen(['find', '$UFLD', '$UFLOD', '-name', 'python*', '-depth'], stdout=subprocess.PIPE, shell=True)
out, err = popen.communicate()
out = str(out)
out = re.sub("b\'|\'", "", out)
if len(out) < 3:
return "I don`t find package in repository and overlay"
mas = out.split('\\n')
return mas
def checksum(locate):
f = open(locate+"/Manifest", 'r')
mas = [{ 'hash': x.split()[1], 'version': x.split()[0] } for x in f.read().split('\n')]
f2 = open()
hash = hashlib.sha512("Nobody inspects the spammish repetition").hexdigest()
return 0
find()
|
BloodySucker/uflpm
|
main.py
|
Python
|
gpl-3.0
| 2,129
|
import sys
# Let's do some dep checking and handle missing ones gracefully
try:
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from PyQt4.Qt import Qt
import PyQt4.QtCore as QtCore
except ImportError:
print "You need to have PyQT installed to run Electrum in graphical mode."
print "If you have pip installed try 'sudo pip install pyqt' if you are on Debian/Ubuntu try 'sudo apt-get install python-qt4'."
sys.exit(0)
from decimal import Decimal as D
from electrum_myr.util import get_resource_path as rsrc
from electrum_myr.bitcoin import is_valid
from electrum_myr.i18n import _
import decimal
import json
import os.path
import random
import re
import time
from electrum_myr.wallet import Wallet, WalletStorage
import webbrowser
import history_widget
import receiving_widget
from electrum_myr import util
import datetime
from electrum_myr.version import ELECTRUM_VERSION as electrum_version
from electrum_myr.util import format_satoshis, age
from main_window import ElectrumWindow
import shutil
from util import *
bitcoin = lambda v: v * 100000000
def IconButton(filename, parent=None):
pixmap = QPixmap(filename)
icon = QIcon(pixmap)
return QPushButton(icon, "", parent)
def resize_line_edit_width(line_edit, text_input):
metrics = QFontMetrics(qApp.font())
# Create an extra character to add some space on the end
text_input += "A"
line_edit.setMinimumWidth(metrics.width(text_input))
def load_theme_name(theme_path):
try:
with open(os.path.join(theme_path, "name.cfg")) as name_cfg_file:
return name_cfg_file.read().rstrip("\n").strip()
except IOError:
return None
def theme_dirs_from_prefix(prefix):
if not os.path.exists(prefix):
return []
theme_paths = {}
for potential_theme in os.listdir(prefix):
theme_full_path = os.path.join(prefix, potential_theme)
theme_css = os.path.join(theme_full_path, "style.css")
if not os.path.exists(theme_css):
continue
theme_name = load_theme_name(theme_full_path)
if theme_name is None:
continue
theme_paths[theme_name] = prefix, potential_theme
return theme_paths
def load_theme_paths():
theme_paths = {}
theme_paths.update(theme_dirs_from_prefix(util.data_dir()))
return theme_paths
class TransactionWindow(QDialog):
def set_label(self):
label = unicode(self.label_edit.text())
self.parent.wallet.labels[self.tx_id] = label
super(TransactionWindow, self).accept()
def __init__(self, transaction_id, parent):
super(TransactionWindow, self).__init__()
self.tx_id = str(transaction_id)
self.parent = parent
self.setModal(True)
self.resize(200,100)
self.setWindowTitle(_("Transaction successfully sent"))
self.layout = QGridLayout(self)
history_label = "%s\n%s" % (_("Your transaction has been sent."), _("Please enter a label for this transaction for future reference."))
self.layout.addWidget(QLabel(history_label))
self.label_edit = QLineEdit()
self.label_edit.setPlaceholderText(_("Transaction label"))
self.label_edit.setObjectName("label_input")
self.label_edit.setAttribute(Qt.WA_MacShowFocusRect, 0)
self.label_edit.setFocusPolicy(Qt.ClickFocus)
self.layout.addWidget(self.label_edit)
self.save_button = QPushButton(_("Save"))
self.layout.addWidget(self.save_button)
self.save_button.clicked.connect(self.set_label)
self.exec_()
class MiniWindow(QDialog):
def __init__(self, actuator, expand_callback, config):
super(MiniWindow, self).__init__()
self.actuator = actuator
self.config = config
self.btc_balance = None
self.use_exchanges = ["Blockchain", "CoinDesk"]
self.quote_currencies = ["BRL", "CNY", "EUR", "GBP", "RUB", "USD"]
self.actuator.set_configured_currency(self.set_quote_currency)
self.actuator.set_configured_exchange(self.set_exchange)
# Needed because price discovery is done in a different thread
# which needs to be sent back to this main one to update the GUI
self.connect(self, SIGNAL("refresh_balance()"), self.refresh_balance)
self.balance_label = BalanceLabel(self.change_quote_currency, self)
self.balance_label.setObjectName("balance_label")
# Bitcoin address code
self.address_input = QLineEdit()
self.address_input.setPlaceholderText(_("Enter a Myriadcoin address or contact"))
self.address_input.setObjectName("address_input")
self.address_input.setFocusPolicy(Qt.ClickFocus)
self.address_input.textChanged.connect(self.address_field_changed)
resize_line_edit_width(self.address_input,
"1BtaFUr3qVvAmwrsuDuu5zk6e4s2rxd2Gy")
self.address_completions = QStringListModel()
address_completer = QCompleter(self.address_input)
address_completer.setCaseSensitivity(False)
address_completer.setModel(self.address_completions)
self.address_input.setCompleter(address_completer)
address_layout = QHBoxLayout()
address_layout.addWidget(self.address_input)
self.amount_input = QLineEdit()
self.amount_input.setPlaceholderText(_("... and amount") + " (%s)"%self.actuator.g.base_unit())
self.amount_input.setObjectName("amount_input")
self.amount_input.setFocusPolicy(Qt.ClickFocus)
# This is changed according to the user's displayed balance
self.amount_validator = QDoubleValidator(self.amount_input)
self.amount_validator.setNotation(QDoubleValidator.StandardNotation)
self.amount_validator.setDecimals(8)
self.amount_input.setValidator(self.amount_validator)
# This removes the very ugly OSX highlighting, please leave this in :D
self.address_input.setAttribute(Qt.WA_MacShowFocusRect, 0)
self.amount_input.setAttribute(Qt.WA_MacShowFocusRect, 0)
self.amount_input.textChanged.connect(self.amount_input_changed)
#if self.actuator.g.wallet.seed:
self.send_button = QPushButton(_("&Send"))
#else:
# self.send_button = QPushButton(_("&Create"))
self.send_button.setObjectName("send_button")
self.send_button.setDisabled(True);
self.send_button.clicked.connect(self.send)
# Creating the receive button
self.switch_button = QPushButton( QIcon(":icons/switchgui.png"),'' )
self.switch_button.setMaximumWidth(25)
self.switch_button.setFlat(True)
self.switch_button.clicked.connect(expand_callback)
main_layout = QGridLayout(self)
main_layout.addWidget(self.balance_label, 0, 0, 1, 3)
main_layout.addWidget(self.switch_button, 0, 3)
main_layout.addWidget(self.address_input, 1, 0, 1, 4)
main_layout.addWidget(self.amount_input, 2, 0, 1, 2)
main_layout.addWidget(self.send_button, 2, 2, 1, 2)
self.send_button.setMaximumWidth(125)
self.history_list = history_widget.HistoryWidget()
self.history_list.setObjectName("history")
self.history_list.hide()
self.history_list.setAlternatingRowColors(True)
main_layout.addWidget(self.history_list, 3, 0, 1, 4)
self.receiving = receiving_widget.ReceivingWidget(self)
self.receiving.setObjectName("receiving")
# Add to the right side
self.receiving_box = QGroupBox(_("Select a receiving address"))
extra_layout = QGridLayout()
# Checkbox to filter used addresses
hide_used = QCheckBox(_('Hide used addresses'))
hide_used.setChecked(True)
hide_used.stateChanged.connect(self.receiving.toggle_used)
# Events for receiving addresses
self.receiving.clicked.connect(self.receiving.copy_address)
self.receiving.itemDoubleClicked.connect(self.receiving.edit_label)
self.receiving.itemChanged.connect(self.receiving.update_label)
# Label
extra_layout.addWidget( QLabel(_('Selecting an address will copy it to the clipboard.') + '\n' + _('Double clicking the label will allow you to edit it.') ),0,0)
extra_layout.addWidget(self.receiving, 1,0)
extra_layout.addWidget(hide_used, 2,0)
extra_layout.setColumnMinimumWidth(0,200)
self.receiving_box.setLayout(extra_layout)
main_layout.addWidget(self.receiving_box,0,4,-1,3)
self.receiving_box.hide()
self.main_layout = main_layout
quit_shortcut = QShortcut(QKeySequence("Ctrl+Q"), self)
quit_shortcut.activated.connect(self.close)
close_shortcut = QShortcut(QKeySequence("Ctrl+W"), self)
close_shortcut.activated.connect(self.close)
g = self.config.get("winpos-lite",[4, 25, 351, 149])
self.setGeometry(g[0], g[1], g[2], g[3])
show_hist = self.config.get("gui_show_history",False)
self.show_history(show_hist)
show_hist = self.config.get("gui_show_receiving",False)
self.toggle_receiving_layout(show_hist)
self.setWindowIcon(QIcon(":icons/electrum-myr.png"))
self.setWindowTitle("Electrum-MYR")
self.setWindowFlags(Qt.Window|Qt.MSWindowsFixedSizeDialogHint)
self.layout().setSizeConstraint(QLayout.SetFixedSize)
self.setObjectName("main_window")
def context_menu(self):
view_menu = QMenu()
themes_menu = view_menu.addMenu(_("&Themes"))
selected_theme = self.actuator.selected_theme()
theme_group = QActionGroup(self)
for theme_name in self.actuator.theme_names():
theme_action = themes_menu.addAction(theme_name)
theme_action.setCheckable(True)
if selected_theme == theme_name:
theme_action.setChecked(True)
class SelectThemeFunctor:
def __init__(self, theme_name, toggle_theme):
self.theme_name = theme_name
self.toggle_theme = toggle_theme
def __call__(self, checked):
if checked:
self.toggle_theme(self.theme_name)
delegate = SelectThemeFunctor(theme_name, self.toggle_theme)
theme_action.toggled.connect(delegate)
theme_group.addAction(theme_action)
view_menu.addSeparator()
show_receiving = view_menu.addAction(_("Show Receiving addresses"))
show_receiving.setCheckable(True)
show_receiving.toggled.connect(self.toggle_receiving_layout)
show_receiving.setChecked(self.config.get("gui_show_receiving",False))
show_history = view_menu.addAction(_("Show History"))
show_history.setCheckable(True)
show_history.toggled.connect(self.show_history)
show_history.setChecked(self.config.get("gui_show_history",False))
return view_menu
def toggle_theme(self, theme_name):
self.actuator.change_theme(theme_name)
# Recompute style globally
qApp.style().unpolish(self)
qApp.style().polish(self)
def closeEvent(self, event):
g = self.geometry()
self.config.set_key("winpos-lite", [g.left(),g.top(),g.width(),g.height()],True)
self.actuator.g.closeEvent(event)
qApp.quit()
def pay_from_URI(self, URI):
try:
dest_address, amount, label, message, request_url = util.parse_URI(URI)
except:
return
self.address_input.setText(dest_address)
self.address_field_changed(dest_address)
self.amount_input.setText(str(amount))
def activate(self):
pass
def deactivate(self):
pass
def set_exchange(self, use_exchange):
if use_exchange not in self.use_exchanges:
return
self.use_exchanges.remove(use_exchange)
self.use_exchanges.insert(0, use_exchange)
self.refresh_balance()
def set_quote_currency(self, currency):
"""Set and display the fiat currency country."""
if currency not in self.quote_currencies:
return
self.quote_currencies.remove(currency)
self.quote_currencies.insert(0, currency)
self.refresh_balance()
def change_quote_currency(self, forward=True):
if forward:
self.quote_currencies = \
self.quote_currencies[1:] + self.quote_currencies[0:1]
else:
self.quote_currencies = \
self.quote_currencies[-1:] + self.quote_currencies[0:-1]
self.actuator.set_config_currency(self.quote_currencies[0])
self.refresh_balance()
def refresh_balance(self):
if self.btc_balance is None:
# Price has been discovered before wallet has been loaded
# and server connect... so bail.
return
self.set_balances(self.btc_balance)
self.amount_input_changed(self.amount_input.text())
def set_balances(self, btc_balance):
"""Set the bitcoin balance and update the amount label accordingly."""
self.btc_balance = btc_balance
quote_text = self.create_quote_text(btc_balance)
if quote_text:
quote_text = "(%s)" % quote_text
amount = self.actuator.g.format_amount(btc_balance)
unit = self.actuator.g.base_unit()
self.balance_label.set_balance_text(amount, unit, quote_text)
self.setWindowTitle("Electrum %s - %s %s" % (electrum_version, amount, unit))
def amount_input_changed(self, amount_text):
"""Update the number of bitcoins displayed."""
self.check_button_status()
try:
amount = D(str(amount_text)) * (10**self.actuator.g.decimal_point)
except decimal.InvalidOperation:
self.balance_label.show_balance()
else:
quote_text = self.create_quote_text(amount)
if quote_text:
self.balance_label.set_amount_text(quote_text)
self.balance_label.show_amount()
else:
self.balance_label.show_balance()
def create_quote_text(self, btc_balance):
"""Return a string copy of the amount fiat currency the
user has in bitcoins."""
from electrum_myr.plugins import run_hook
r = {}
run_hook('get_fiat_balance_text', btc_balance, r)
return r.get(0,'')
def send(self):
if self.actuator.send(self.address_input.text(),
self.amount_input.text(), self):
self.address_input.setText("")
self.amount_input.setText("")
def check_button_status(self):
"""Check that the bitcoin address is valid and that something
is entered in the amount before making the send button clickable."""
try:
value = D(str(self.amount_input.text())) * (10**self.actuator.g.decimal_point)
except decimal.InvalidOperation:
value = None
# self.address_input.property(...) returns a qVariant, not a bool.
# The == is needed to properly invoke a comparison.
if (self.address_input.property("isValid") == True and
value is not None and 0 < value <= self.btc_balance):
self.send_button.setDisabled(False)
else:
self.send_button.setDisabled(True)
def address_field_changed(self, address):
# label or alias, with address in brackets
match2 = re.match("(.*?)\s*\<([1-9A-HJ-NP-Za-km-z]{26,})\>",
address)
if match2:
address = match2.group(2)
self.address_input.setText(address)
if is_valid(address):
self.check_button_status()
self.address_input.setProperty("isValid", True)
self.recompute_style(self.address_input)
else:
self.send_button.setDisabled(True)
self.address_input.setProperty("isValid", False)
self.recompute_style(self.address_input)
if len(address) == 0:
self.address_input.setProperty("isValid", None)
self.recompute_style(self.address_input)
def recompute_style(self, element):
self.style().unpolish(element)
self.style().polish(element)
def copy_address(self):
receive_popup = ReceivePopup(self.receive_button)
self.actuator.copy_address(receive_popup)
def update_completions(self, completions):
self.address_completions.setStringList(completions)
def update_history(self, tx_history):
self.history_list.empty()
for item in tx_history[-10:]:
tx_hash, conf, is_mine, value, fee, balance, timestamp = item
label = self.actuator.g.wallet.get_label(tx_hash)[0]
v_str = self.actuator.g.format_amount(value, True)
self.history_list.append(label, v_str, age(timestamp))
def the_website(self):
webbrowser.open("http://myr.electr.us")
def toggle_receiving_layout(self, toggle_state):
if toggle_state:
self.receiving_box.show()
else:
self.receiving_box.hide()
self.config.set_key("gui_show_receiving", toggle_state)
def show_history(self, toggle_state):
if toggle_state:
self.main_layout.setRowMinimumHeight(3,200)
self.history_list.show()
else:
self.main_layout.setRowMinimumHeight(3,0)
self.history_list.hide()
self.config.set_key("gui_show_history", toggle_state)
class BalanceLabel(QLabel):
SHOW_CONNECTING = 1
SHOW_BALANCE = 2
SHOW_AMOUNT = 3
def __init__(self, change_quote_currency, parent=None):
super(QLabel, self).__init__(_("Connecting..."), parent)
self.change_quote_currency = change_quote_currency
self.state = self.SHOW_CONNECTING
self.balance_text = ""
self.amount_text = ""
self.parent = parent
def mousePressEvent(self, event):
"""Change the fiat currency selection if window background is clicked."""
if self.state != self.SHOW_CONNECTING:
if event.button() == Qt.LeftButton:
self.change_quote_currency()
else:
position = event.globalPos()
menu = self.parent.context_menu()
menu.exec_(position)
def set_balance_text(self, amount, unit, quote_text):
"""Set the amount of bitcoins in the gui."""
if self.state == self.SHOW_CONNECTING:
self.state = self.SHOW_BALANCE
self.balance_text = "<span style='font-size: 18pt'>%s</span>"%amount\
+ " <span style='font-size: 10pt'>%s</span>" % unit \
+ " <span style='font-size: 10pt'>%s</span>" % quote_text
if self.state == self.SHOW_BALANCE:
self.setText(self.balance_text)
def set_amount_text(self, quote_text):
self.amount_text = "<span style='font-size: 10pt'>%s</span>" % quote_text
if self.state == self.SHOW_AMOUNT:
self.setText(self.amount_text)
def show_balance(self):
if self.state == self.SHOW_AMOUNT:
self.state = self.SHOW_BALANCE
self.setText(self.balance_text)
def show_amount(self):
if self.state == self.SHOW_BALANCE:
self.state = self.SHOW_AMOUNT
self.setText(self.amount_text)
def ok_cancel_buttons(dialog):
row_layout = QHBoxLayout()
row_layout.addStretch(1)
ok_button = QPushButton(_("OK"))
row_layout.addWidget(ok_button)
ok_button.clicked.connect(dialog.accept)
cancel_button = QPushButton(_("Cancel"))
row_layout.addWidget(cancel_button)
cancel_button.clicked.connect(dialog.reject)
return row_layout
class PasswordDialog(QDialog):
def __init__(self, parent):
super(QDialog, self).__init__(parent)
self.setModal(True)
self.password_input = QLineEdit()
self.password_input.setEchoMode(QLineEdit.Password)
main_layout = QVBoxLayout(self)
message = _('Please enter your password')
main_layout.addWidget(QLabel(message))
grid = QGridLayout()
grid.setSpacing(8)
grid.addWidget(QLabel(_('Password')), 1, 0)
grid.addWidget(self.password_input, 1, 1)
main_layout.addLayout(grid)
main_layout.addLayout(ok_cancel_buttons(self))
self.setLayout(main_layout)
def run(self):
if not self.exec_():
return
return unicode(self.password_input.text())
class ReceivePopup(QDialog):
def leaveEvent(self, event):
self.close()
def setup(self, address):
label = QLabel(_("Copied your Myriadcoin address to the clipboard!"))
address_display = QLineEdit(address)
address_display.setReadOnly(True)
resize_line_edit_width(address_display, address)
main_layout = QVBoxLayout(self)
main_layout.addWidget(label)
main_layout.addWidget(address_display)
self.setMouseTracking(True)
self.setWindowTitle("Electrum - " + _("Receive Myriadcoin payment"))
self.setWindowFlags(Qt.Window|Qt.FramelessWindowHint|
Qt.MSWindowsFixedSizeDialogHint)
self.layout().setSizeConstraint(QLayout.SetFixedSize)
#self.setFrameStyle(QFrame.WinPanel|QFrame.Raised)
#self.setAlignment(Qt.AlignCenter)
def popup(self):
parent = self.parent()
top_left_pos = parent.mapToGlobal(parent.rect().bottomLeft())
self.move(top_left_pos)
center_mouse_pos = self.mapToGlobal(self.rect().center())
QCursor.setPos(center_mouse_pos)
self.show()
class MiniActuator:
"""Initialize the definitions relating to themes and
sending/receiving bitcoins."""
def __init__(self, main_window):
"""Retrieve the gui theme used in previous session."""
self.g = main_window
self.theme_name = self.g.config.get('litegui_theme','Cleanlook')
self.themes = load_theme_paths()
self.load_theme()
def load_theme(self):
"""Load theme retrieved from wallet file."""
try:
theme_prefix, theme_path = self.themes[self.theme_name]
except KeyError:
util.print_error("Theme not found!", self.theme_name)
return
full_theme_path = "%s/%s/style.css" % (theme_prefix, theme_path)
with open(full_theme_path) as style_file:
qApp.setStyleSheet(style_file.read())
def theme_names(self):
"""Sort themes."""
return sorted(self.themes.keys())
def selected_theme(self):
"""Select theme."""
return self.theme_name
def change_theme(self, theme_name):
"""Change theme."""
self.theme_name = theme_name
self.g.config.set_key('litegui_theme',theme_name)
self.load_theme()
def set_configured_exchange(self, set_exchange):
use_exchange = self.g.config.get('use_exchange')
if use_exchange is not None:
set_exchange(use_exchange)
def set_configured_currency(self, set_quote_currency):
"""Set the inital fiat currency conversion country (USD/EUR/GBP) in
the GUI to what it was set to in the wallet."""
currency = self.g.config.get('currency')
# currency can be none when Electrum is used for the first
# time and no setting has been created yet.
if currency is not None:
set_quote_currency(currency)
def set_config_exchange(self, conversion_exchange):
self.g.config.set_key('exchange',conversion_exchange,True)
self.g.update_status()
def set_config_currency(self, conversion_currency):
"""Change the wallet fiat currency country."""
self.g.config.set_key('currency',conversion_currency,True)
self.g.update_status()
def copy_address(self, receive_popup):
"""Copy the wallet addresses into the client."""
addrs = [addr for addr in self.g.wallet.addresses(True)
if not self.g.wallet.is_change(addr)]
# Select most recent addresses from gap limit
addrs = addrs[-self.g.wallet.gap_limit:]
copied_address = random.choice(addrs)
qApp.clipboard().setText(copied_address)
receive_popup.setup(copied_address)
receive_popup.popup()
def waiting_dialog(self, f):
s = Timer()
s.start()
w = QDialog()
w.resize(200, 70)
w.setWindowTitle('Electrum-MYR')
l = QLabel(_('Sending transaction, please wait.'))
vbox = QVBoxLayout()
vbox.addWidget(l)
w.setLayout(vbox)
w.show()
def ff():
s = f()
if s: l.setText(s)
else: w.close()
w.connect(s, QtCore.SIGNAL('timersignal'), ff)
w.exec_()
w.destroy()
def send(self, address, amount, parent_window):
"""Send bitcoins to the target address."""
dest_address = self.fetch_destination(address)
if dest_address is None or not is_valid(dest_address):
QMessageBox.warning(parent_window, _('Error'),
_('Invalid Myriadcoin Address') + ':\n' + address, _('OK'))
return False
amount = D(unicode(amount)) * (10*self.g.decimal_point)
print "amount", amount
return
if self.g.wallet.use_encryption:
password_dialog = PasswordDialog(parent_window)
password = password_dialog.run()
if not password:
return
else:
password = None
fee = 0
# 0.1 BTC = 10000000
if amount < bitcoin(1) / 10:
# 0.001 BTC
fee = bitcoin(1) / 1000
try:
tx = self.g.wallet.mktx([(dest_address, amount)], password, fee)
except Exception as error:
QMessageBox.warning(parent_window, _('Error'), str(error), _('OK'))
return False
if tx.is_complete():
h = self.g.wallet.send_tx(tx)
self.waiting_dialog(lambda: False if self.g.wallet.tx_event.isSet() else _("Sending transaction, please wait..."))
status, message = self.g.wallet.receive_tx(h, tx)
if not status:
import tempfile
dumpf = tempfile.NamedTemporaryFile(delete=False)
dumpf.write(tx)
dumpf.close()
print "Dumped error tx to", dumpf.name
QMessageBox.warning(parent_window, _('Error'), message, _('OK'))
return False
TransactionWindow(message, self)
else:
filename = 'unsigned_tx_%s' % (time.mktime(time.gmtime()))
try:
fileName = QFileDialog.getSaveFileName(QWidget(), _("Select a transaction filename"), os.path.expanduser('~/%s' % (filename)))
with open(fileName,'w') as f:
f.write(json.dumps(tx.as_dict(),indent=4) + '\n')
QMessageBox.information(QWidget(), _('Unsigned transaction created'), _("Unsigned transaction was saved to file:") + " " +fileName, _('OK'))
except Exception as e:
QMessageBox.warning(QWidget(), _('Error'), _('Could not write transaction to file: %s' % e), _('OK'))
return True
def fetch_destination(self, address):
recipient = unicode(address).strip()
# alias
match1 = re.match("^(|([\w\-\.]+)@)((\w[\w\-]+\.)+[\w\-]+)$",
recipient)
# label or alias, with address in brackets
match2 = re.match("(.*?)\s*\<([1-9A-HJ-NP-Za-km-z]{26,})\>",
recipient)
if match1:
dest_address = \
self.g.wallet.get_alias(recipient, True,
self.show_message, self.question)
return dest_address
elif match2:
return match2.group(2)
else:
return recipient
class MiniDriver(QObject):
INITIALIZING = 0
CONNECTING = 1
SYNCHRONIZING = 2
READY = 3
def __init__(self, main_window, mini_window):
super(QObject, self).__init__()
self.g = main_window
self.network = main_window.network
self.window = mini_window
if self.network:
self.network.register_callback('updated',self.update_callback)
self.network.register_callback('status', self.update_callback)
self.state = None
self.initializing()
self.connect(self, SIGNAL("updatesignal()"), self.update)
self.update_callback()
# This is a hack to workaround that Qt does not like changing the
# window properties from this other thread before the runloop has
# been called from.
def update_callback(self):
self.emit(SIGNAL("updatesignal()"))
def update(self):
if not self.network:
self.initializing()
#elif not self.network.interface:
# self.initializing()
elif not self.network.is_connected():
self.connecting()
if self.g.wallet is None:
self.ready()
elif not self.g.wallet.up_to_date:
self.synchronizing()
else:
self.ready()
self.update_balance()
self.update_completions()
self.update_history()
self.window.receiving.update_list()
def initializing(self):
if self.state == self.INITIALIZING:
return
self.state = self.INITIALIZING
self.window.deactivate()
def connecting(self):
if self.state == self.CONNECTING:
return
self.state = self.CONNECTING
self.window.deactivate()
def synchronizing(self):
if self.state == self.SYNCHRONIZING:
return
self.state = self.SYNCHRONIZING
self.window.deactivate()
def ready(self):
if self.state == self.READY:
return
self.state = self.READY
self.window.activate()
def update_balance(self):
conf_balance, unconf_balance = self.g.wallet.get_balance()
balance = D(conf_balance + unconf_balance)
self.window.set_balances(balance)
def update_completions(self):
completions = []
for addr, label in self.g.wallet.labels.items():
if addr in self.g.wallet.addressbook:
completions.append("%s <%s>" % (label, addr))
self.window.update_completions(completions)
def update_history(self):
tx_history = self.g.wallet.get_tx_history()
self.window.update_history(tx_history)
if __name__ == "__main__":
app = QApplication(sys.argv)
with open(rsrc("style.css")) as style_file:
app.setStyleSheet(style_file.read())
mini = MiniWindow()
sys.exit(app.exec_())
|
wozz/electrum-myr
|
gui/qt/lite_window.py
|
Python
|
gpl-3.0
| 31,102
|
import numpy as np
import matplotlib.pyplot as plt
identity = lambda x: x
class DenoisingAutoencoder(object):
"""
Denoising autoencoder.
"""
def sigmoid(self, x):
#
# TODO: implement sigmoid
#
#if x>=0:
return 1./(1+np.exp(-x))
#return np.exp(x)/(1+np.exp(x))
def sigmoid_deriv(self, x):
#
# TODO: implement sigmoid derivative
#
res=self.sigmoid(x)
return res*(1-res)
def ac_func(self, x, function_name = 'SIGMOID'):
# Implement your activation function here
fname_upper = function_name.upper()
if fname_upper =='SIGMOID':
return self.sigmoid(x)
else:
raise fname_upper + " Not implemented Yet"
def ac_func_deriv(self, x, function_name = 'SIGMOID'):
# Implement the derivative of your activation function here
fname_upper = function_name.upper()
if fname_upper == 'SIGMOID':
return self.sigmoid_deriv(x)
else:
raise fname_upper + " Not implemented Yet"
def __init__(self, layer_units, weights=None):
self.weights = weights
self.layer_units = layer_units
def init_weights(self, seed=0):
"""
Initialize weights.
layer_units: tuple stores the size of each layer.
weights: structured weights.
"""
"""
Initialize weights.
layer_units: tuple stores the size of each layer.
weights: structured weights.
"""
# Note layer_units[2] = layer_units[0]
layer_units = self.layer_units
n_layers = len(layer_units)
assert n_layers == 3
np.random.seed(seed)
# Initialize parameters randomly based on layer sizes
r = np.sqrt(6) / np.sqrt(layer_units[1] + layer_units[0])
# We'll choose weights uniformly from the interval [-r, r)
weights = [{} for i in range(n_layers - 1)]
weights[0]['W'] = np.random.random((layer_units[0], layer_units[1])) * 2.0 * r - r
weights[1]['W'] = np.random.random((layer_units[1], layer_units[2])) * 2.0 * r - r
weights[0]['b'] = np.zeros(layer_units[1])
weights[1]['b'] = np.zeros(layer_units[2])
self.weights = weights
return self.weights
def predict(self, X_noisy, reg=3e-3, activation_function='sigmoid'):
weights = self.weights
# Weight parameters
W0 = weights[0]['W']
b0 = weights[0]['b']
W1 = weights[1]['W']
b1 = weights[1]['b']
# TODO: Implement forward pass here. It should be the same forward pass that you implemented in the loss function
X_hp = X_noisy.dot(W0)+b0
X_h = self.sigmoid(X_hp)
Xhh = X_h.dot(W1)+b1
scores = self.sigmoid(Xhh)
return scores
def loss(self, X_noisy, X, reg=3e-3, activation_function='sigmoid'):
weights = self.weights
# Weighting parameters
W0 = weights[0]['W']
b0 = weights[0]['b']
W1 = weights[1]['W']
b1 = weights[1]['b']
scores = None
#############################################################################
# TODO: Perform the forward pass, computing the scores for the input. #
# Store the result in the scores variable, which should be an array of #
# shape (N, N). #
#############################################################################
X_hp = X_noisy.dot(W0)+b0
X_h = self.sigmoid(X_hp)
Xhh = X_h.dot(W1)+b1
scores = self.sigmoid(Xhh)
#############################################################################
# END OF YOUR CODE #
#############################################################################
#############################################################################
# TODO: Compute the loss. This should include #
# (i) the data loss (square error loss), #
# (ii) L2 regularization for W1 and W2, and #
# Store the result in the variable loss, which should be a scalar. #
# (Don't forget to investigate the effect of L2 loss) #
#############################################################################
scores-=X
loss = .5*np.sum(np.square(scores), axis=1).mean() + .5*reg*(np.sum(W0**2)+np.sum(W1**2))
#############################################################################
# END OF YOUR CODE #
#############################################################################
grads = [{},{}]
#############################################################################
# TODO: Compute the backward pass, computing the derivatives of the weights #
# and biases. Store the results in the grads dictionary. For example, #
# grads['W1'] should store the gradient on W1, and be a matrix of same size #
#############################################################################
sgd = self.sigmoid_deriv#np.vectorize(self.sigmoid_deriv)
scores = scores*sgd(Xhh)
grads[1]['W'] = X_h.T.dot(scores)/X_noisy.shape[0] + reg*W1
grads[1]['b'] = np.sum(scores, axis=0)/X_noisy.shape[0]
scores = scores.dot(W1.T)*sgd(X_hp)
grads[0]['W'] = X_noisy.T.dot(scores)/X_noisy.shape[0] + reg*W0
grads[0]['b'] = np.sum(scores, axis=0)/X_noisy.shape[0]
#############################################################################
# END OF YOUR CODE #
#############################################################################
return loss, grads
def train_with_SGD(self, X, noise=identity,
learning_rate=1e-3, learning_rate_decay=0.95,
reg=3e-3, num_iters=100,
batchsize=128, momentum='classic', mu=0.9, verbose=False,
activation_function='sigmoid'):
num_train = X.shape[0]
loss_history = []
layer_units = self.layer_units
n_layers = len(layer_units)
velocity = [{} for i in range(n_layers - 1)]
velocity[0]['W'] = np.zeros((layer_units[0], layer_units[1]))
velocity[1]['W'] = np.zeros((layer_units[1], layer_units[2]))
velocity[0]['b'] = np.zeros(layer_units[1])
velocity[1]['b'] = np.zeros(layer_units[2])
for it in xrange(num_iters):
batch_indicies = np.random.choice(num_train, batchsize, replace = False)
X_batch = X[batch_indicies]
# Compute loss and gradients
noisy_X_batch = noise(X_batch)
loss, grads = self.loss(noisy_X_batch, X_batch, reg, activation_function=activation_function)
loss_history.append(loss)
#########################################################################
# TODO: Use the gradients in the grads dictionary to update the #
# parameters of the network (stored in the dictionary self.params) #
# using gradient descent. #
#########################################################################
# You can start and test your implementation without momentum. After
# making sure that it works, you can add momentum
for i in range(len(grads)):
for x in grads[i]:
velocity[i][x] = mu*velocity[i][x] - learning_rate*grads[i][x]
self.weights[i][x] += velocity[i][x]
#########################################################################
# END OF YOUR CODE #
#########################################################################
if verbose and it % 10 == 0:
print 'SGD: iteration %d / %d: loss %f' % (it, num_iters, loss)
# Every 5 iterations.
if it % 5 == 0:
# Decay learning rate
learning_rate *= learning_rate_decay
return { 'loss_history': loss_history, }
|
kadircet/CENG
|
783/HW2/METU/denoising_autoencoder.py
|
Python
|
gpl-3.0
| 8,230
|
threads_init = lambda *a, **k: None
class DBusConnection(object):
pass
class DBusConnectionFlags(object):
AUTHENTICATION_CLIENT = 1
pass
|
timvideos/gst-switch
|
docs/fake-lib/gi/repository/Gio.py
|
Python
|
gpl-3.0
| 147
|
"""
Script will generate mock comparisons and output ranking/scoring data after every round of comparisons
uses Scoring and Pairing algorithms directly without using the full ComPAIR backend
Outputs:
- for every algorithms: file named 'out'+algorithm name+'.csv' in the scripts folder
- for every round: display a row with the current rankings, the current scores, and placement stats
"""
import os
import random
import math
import unicodecsv as csv
from scipy.stats import spearmanr, pearsonr, kendalltau
import numpy
import multiprocessing
from enum import Enum
from compair.algorithms import ComparisonPair, ScoredObject, ComparisonWinner
from compair.algorithms.score import calculate_score_1vs1
from compair.algorithms.pair import generate_pair
from compair.models import PairingAlgorithm, ScoringAlgorithm
CURRENT_FOLDER = os.getcwd() + '/scripts'
REPETITIONS = 100
NUMBER_OF_STUDENTS = 100
NUMBER_OF_ANSWERS = 100
NUMBER_OF_COMPARISONS_PER_STUDENT = 10
NUMBER_OF_ROUNDS = NUMBER_OF_COMPARISONS_PER_STUDENT * 2
ROUND_LENGTH = NUMBER_OF_ANSWERS / 2
CONCURRENCY = 8
class WinnerSelector(Enum):
always_correct = "always_correct"
correct_with_error = "correct_with_error"
guessing = "guessing"
closely_matched_errors = "closely_matched_errors"
pairing_packages = [
#PairingAlgorithm.adaptive.value,
PairingAlgorithm.adaptive_min_delta.value,
#PairingAlgorithm.random.value
]
scoring_packages = [
#ScoringAlgorithm.comparative_judgement.value,
ScoringAlgorithm.elo.value,
ScoringAlgorithm.true_skill.value
]
winner_selectors = [
(WinnerSelector.always_correct, 1.0, "100% Correct"),
(WinnerSelector.closely_matched_errors, 0.05, "Sigma 0.05"),
(WinnerSelector.closely_matched_errors, 0.06, "Sigma 0.06"),
(WinnerSelector.closely_matched_errors, 0.07, "Sigma 0.07"),
(WinnerSelector.closely_matched_errors, 0.08, "Sigma 0.08"),
(WinnerSelector.closely_matched_errors, 0.09, "Sigma 0.09"),
(WinnerSelector.closely_matched_errors, 0.10, "Sigma 0.10"),
(WinnerSelector.correct_with_error, 0.9, "90% Correct"),
(WinnerSelector.correct_with_error, 0.8, "80% Correct"),
(WinnerSelector.correct_with_error, 0.7, "70% Correct"),
(WinnerSelector.correct_with_error, 0.6, "60% Correct"),
]
REPORT_FOLDER = "{}/report comparisons {} answers {} students {} repetitions {}".format(
CURRENT_FOLDER, NUMBER_OF_COMPARISONS_PER_STUDENT, NUMBER_OF_ANSWERS, NUMBER_OF_STUDENTS, REPETITIONS
)
if not os.path.exists(REPORT_FOLDER):
os.makedirs(REPORT_FOLDER)
def _decide_winner(winner_selector, correct_rate, key1_grade, key2_grade):
if winner_selector == WinnerSelector.always_correct:
return always_correct(key1_grade, key2_grade)
elif winner_selector == WinnerSelector.guessing:
return guessing()
elif winner_selector == WinnerSelector.correct_with_error:
return correct_with_error(key1_grade, key2_grade, correct_rate)
elif winner_selector == WinnerSelector.closely_matched_errors:
return closely_matched_errors(key1_grade, key2_grade, correct_rate)
else:
raise Exception()
def always_correct(value1, value2):
return correct_with_error(value1, value2, 1.0)
def correct_with_error(value1, value2, correct_rate):
if value1 == value2:
return guessing()
correct_answer = ComparisonWinner.key1 if value1 > value2 else ComparisonWinner.key2
incorrect_answer = ComparisonWinner.key1 if value1 < value2 else ComparisonWinner.key2
return correct_answer if random.random() <= correct_rate else incorrect_answer
def guessing():
return ComparisonWinner.key1 if random.random() <= 0.5 else ComparisonWinner.key2
def closely_matched_errors(value1, value2, sigma):
# make the actual values of answers fuzzy (represents perceived value errors)
fuzzy_value1 = numpy.random.normal(value1, sigma, 1)[0]
fuzzy_value2 = numpy.random.normal(value2, sigma, 1)[0]
# return the correct winner using fuzzy perceived values
return always_correct(fuzzy_value1, fuzzy_value2)
def _run_helper(args):
return _run(*args)
def _run(file_path, pairing_package_name, scoring_package_name, winner_selector, correct_rate):
random.seed()
numpy.random.seed()
actual_grades = numpy.random.normal(0.78, 0.1, NUMBER_OF_ANSWERS)
grade_by_answer_key = {}
answers = []
results = []
for key, grade in enumerate(actual_grades):
grade_by_answer_key[key+1] = grade
answers.append(ScoredObject(
key=key+1, score=0, variable1=None, variable2=None,
rounds=0, opponents=0, wins=0, loses=0
))
students = []
for key in range(NUMBER_OF_STUDENTS):
students.append({
'key': key,
'comparisons_left': NUMBER_OF_COMPARISONS_PER_STUDENT,
'comparisons_completed': []
})
comparisons = []
for round_count in range(1, NUMBER_OF_ROUNDS+1):
if len(students) == 0:
break
for comparison_in_round in range(ROUND_LENGTH):
if len(students) == 0:
break
student = random.choice(students)
student_comparisons = student['comparisons_completed']
comparison_pair = generate_pair(
package_name=pairing_package_name,
scored_objects=answers,
comparison_pairs=student_comparisons
)
key1 = comparison_pair.key1
key1_grade = grade_by_answer_key[key1]
key2 = comparison_pair.key2
key2_grade = grade_by_answer_key[key2]
winner = _decide_winner(
winner_selector, correct_rate,
key1_grade, key2_grade
)
comparison_pair = comparison_pair._replace(winner=winner)
comparisons.append(comparison_pair)
student['comparisons_completed'].append(comparison_pair)
student['comparisons_left'] -= 1
if student['comparisons_left'] <= 0:
indexes = [i for i, s in enumerate(students) if student['key'] == s['key']]
del students[indexes[0]]
index1 = next(index for index, answer in enumerate(answers) if answer.key == key1)
index2 = next(index for index, answer in enumerate(answers) if answer.key == key2)
result1, results2 = calculate_score_1vs1(
package_name=scoring_package_name,
key1_scored_object=answers[index1],
key2_scored_object=answers[index2],
winner=winner,
other_comparison_pairs=comparisons
)
answers[index1] = result1
answers[index2] = results2
current_scores = [answer.score for answer in answers]
r_value, pearsonr_p_value = pearsonr(actual_grades, current_scores)
results.append(str(r_value))
#print("Round {} ----------- pearsonr={} value=={}".format(
# round_count, r_value, pearsonr_p_value
#))
with open(file_path, "a") as csvfile:
out = csv.writer(csvfile)
out.writerow(results)
job_args = []
for (winner_selector, correct_rate, correct_rate_str) in winner_selectors:
for pairing_package_name in pairing_packages:
for scoring_package_name in scoring_packages:
file_name = "{} {} {}.csv".format(
scoring_package_name, pairing_package_name, correct_rate_str
)
file_path = "{}/{}".format(REPORT_FOLDER, file_name)
with open(file_path, "w+") as csvfile:
out = csv.writer(csvfile)
out.writerow(["Round {}".format(index) for index in range(1, NUMBER_OF_ROUNDS+1)])
for _ in range(REPETITIONS):
args = (file_path, pairing_package_name, scoring_package_name, winner_selector, correct_rate)
job_args.append(args)
print("Starting {} jobs".format(len(job_args)))
pool = multiprocessing.Pool(processes=CONCURRENCY)
pool.map(_run_helper, job_args)
print("")
print("Finished!")
|
ubc/compair
|
scripts/generate_correlation_reports_direct.py
|
Python
|
gpl-3.0
| 8,082
|
#!/usr/bin/env python
# Copyright (C) 2015 Shea G Craig <shea.craig@sas.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Automate Phase Testing With Munki
Updates a list of pkgsinfo files with a force_install_after_date value
and unattended_install value of False.
"""
import argparse
import datetime
import os
import subprocess
import sys
from xml.parsers.expat import ExpatError
try:
import mount_shares_better
except ImportError:
mount_shares_better = None
try:
sys.path.append("/usr/local/munki/munkilib")
import FoundationPlist as plistlib
except ImportError:
import plistlib
# TODO: Get this from Munki preferences.
PKGINFO_EXTENSIONS = (".pkginfo", ".plist")
# TODO (Shea): this should be a preference.
TESTING_CATALOGS = {"development", "testing", "phase1", "phase2", "phase3"}
def main():
"""Build and parse args, and then kick-off action function."""
parser = build_argparser()
args = parser.parse_args()
args.repo = get_munki_repo(args)
args.func(args)
def build_argparser():
"""Create our argument parser."""
description = ("Set the force_install_after_date key and value for any "
"number of pkginfo files")
parser = argparse.ArgumentParser(description=description)
# Global arguments
parser.add_argument("-r", "--repo", help="Path to Munki repo. Will use "
"munkiimport's configured repo if not specified.")
parser.add_argument("-u", "--repo_url", help="Full mount URL to Munki "
"repo. Will attempt to mount if the share is missing.")
subparser = parser.add_subparsers(help="Sub-command help")
# Collect arguments
phelp = ("Collect available updates and generate markdown listing. "
"Product names that begin with 'Placeholder' (case-insensitive) "
"will be ignored.")
collect_parser = subparser.add_parser("collect", help=phelp)
collect_parser.set_defaults(func=collect)
phelp = "Path to save output files."
collect_parser.add_argument("output_path", help=phelp)
# Prepare arguments
phelp = ("Set the force_install_after_date and unattended_install value "
"for any number of pkginfo files to be phase tested.")
prepare_parser = subparser.add_parser("prepare", help=phelp)
phelp = (
"Date to use as the value for force_install_after_date. Format is: "
"'yyyy-mm-ddThh:mm:ssZ'. For example, August 3rd 2011 at 1PM is the "
"following: '2011-08-03T13:00:00Z'. OR, use a blank string (i.e. '') "
"to remove the force_install_after_date key/value pair.")
prepare_parser.add_argument("date", help=phelp)
phelp = "Catalog to set on pkginfo files."
prepare_parser.add_argument("phase", help=phelp)
phelp = ("Any number of paths to pkginfo files to update, or a path to a "
"file to use for input. Format should have one path per line, "
"with comments allowed.")
prepare_parser.add_argument("pkginfo", help=phelp, nargs="*")
prepare_parser.set_defaults(func=prepare)
# release subcommand
phelp = ("Set the force_install_after_date and unattended_install for any "
"number of pkginfo files to be released to production.")
release_parser = subparser.add_parser("release", help=phelp)
phelp = (
"Date to use as the value for force_install_after_date. Format is: "
"'yyyy-mm-ddThh:mm:ssZ'. For example, August 3rd 2011 at 1PM is the "
"following: '2011-08-03T13:00:00Z'. OR, use a blank string (i.e. '') "
"to remove the force_install_after_date key/value pair.")
release_parser.add_argument("date", help=phelp)
phelp = ("Any number of paths to pkginfo files to update, or a path to a "
"file to use for input. Format should have one path per line, "
"with comments allowed.")
release_parser.add_argument("pkginfo", help=phelp, nargs="*")
release_parser.set_defaults(func=release)
# bulk subcommand
phelp = "Set a top-level key on any number of pkginfo files."
bulk_parser = subparser.add_parser("bulk", help=phelp)
phelp = "Key to set."
bulk_parser.add_argument("key", help=phelp)
phelp = "Value to set on key, or '-' (literal hyphen) to remove the key."
bulk_parser.add_argument("val", help=phelp)
phelp = ("Any number of paths to pkginfo files to update, or a path to a "
"file to use for input. Format should have one path per line, "
"with comments allowed.")
bulk_parser.add_argument("pkginfo", help=phelp, nargs="*")
bulk_parser.set_defaults(func=bulk)
return parser
def get_munki_repo(args):
"""Use cli arg for repo, otherwise, get from munkiimport prefs."""
prefs = read_plist(
"~/Library/Preferences/com.googlecode.munki.munkiimport.plist")
repo = args.repo if args.repo else prefs.get("repo_path")
repo_url = args.repo_url if args.repo_url else prefs.get("repo_url")
if not is_mounted(repo):
repo = mount(repo_url)
return repo
def read_plist(path):
"""Read the plist at path."""
return plistlib.readPlist(os.path.expanduser(path))
def is_mounted(path):
"""Return whether path is attached to the current filesystem."""
return os.path.exists(path)
def mount(path):
"""Mount the share specified by path."""
mount_location = None
if not is_mounted(path):
if hasattr(mount_shares_better, "mount_share"):
try:
mount_location = mount_shares_better.mount_share(path)
except mount_shares_better.MountException as error:
print error.message
else:
if os.uname() == "Darwin":
mount_base = "/Volumes"
else:
mount_base = "/mnt"
mount_point = os.path.join(mount_base, os.path.basename(path))
args = ["mount_afp", path, mount_point]
try:
subprocess.check_call(args)
except subprocess.CalledProcessError as error:
raise PhasetoolError(
"Unable to mount {} at {} with error '{}'.".format(
path, mount_point, error.meesage))
mount_location = mount_point
return mount_location
def collect(args):
"""Collect available updates."""
pkginfos = get_testing_pkginfos(args.repo)
output_path = os.path.expanduser(args.output_path)
prefix = os.path.join(output_path,
datetime.datetime.now().strftime("%Y%m%d-%H%M%S"))
write_markdown(pkginfos, "{}-phase_testing.md".format(prefix))
write_path_list(pkginfos, "{}-phase_testing_files.txt".format(prefix))
def get_testing_pkginfos(repo):
"""Return all pkginfo files with testing catalogs."""
pkginfos = {}
pkginfo_dir = os.path.join(repo, "pkgsinfo")
for dirpath, _, filenames in os.walk(pkginfo_dir):
for pfile in [fname for fname in filenames if is_pkginfo(fname)]:
try:
path = os.path.join(dirpath, pfile)
pkginfo_file = read_plist(path)
except ExpatError:
continue
if (is_testing(pkginfo_file) and
not is_placeholder(pkginfo_file.get("name"))):
pkginfos[path] = pkginfo_file
return pkginfos
def is_testing(pkginfo):
"""Return whether a pkginfo file specifies any testing catalogs."""
catalogs = pkginfo.get("catalogs")
return any(catalog in TESTING_CATALOGS for catalog in catalogs)
def is_placeholder(record_name):
"""Return whether a name is considered a placeholder."""
return record_name.upper().startswith("PLACEHOLDER")
def is_pkginfo(candidate):
"""Return whether a filename is a pkginfo by extension."""
return os.path.splitext(candidate)[-1].lower() in PKGINFO_EXTENSIONS
def write_markdown(data, path):
"""Write markdown data string to path."""
# TODO: Add template stuff.
month = datetime.datetime.now().strftime("%B")
schedule = []
today = datetime.date.today()
phases = (("Phase 1", 0, 3),
("Phase 2", 6, 10),
("Phase 3", 13, 17),
("Production", 20, 25))
output = [u"## {} Phase Testing Updates\n".format(month)]
output.append("## Schedule")
output.append("| Phase | Available | Required |")
output.append("| ----- | --------- | -------- |")
for phase in phases:
start = today + datetime.timedelta(days=phase[1])
end = today + datetime.timedelta(days=phase[2])
output.append("| {} | {} | {} |".format(phase[0], start, end))
output.append("")
for _, item_val in sorted(data.items()):
output.append(u"- {} {}".format(
item_val.get("display_name") or item_val.get("name"),
item_val["version"]))
output_string = u"\n".join(output).encode("utf-8")
write_file(output_string, path)
def write_path_list(data, path):
"""Write pkginfo path data to path."""
output_string = u"\n".join(sorted(data.keys())).encode("utf-8")
write_file(output_string, path)
def write_file(output_string, path):
"""Write output_string to path."""
with open(path, "w") as markdown_file:
markdown_file.write(output_string)
def prepare(args):
"""Set keys relevent to phase deployment."""
if (len(args.pkginfo) is 1 and
not args.pkginfo[0].endswith((".plist", ".pkginfo"))):
# File input
paths_to_change = get_pkginfo_from_file(args.pkginfo[0])
else:
paths_to_change = args.pkginfo
if not args.date:
date = None
elif not is_valid_date(args.date):
print "Invalid date! Please check formatting."
sys.exit(1)
else:
date = get_datetime(args.date)
for path in paths_to_change:
if os.path.exists(path):
pkginfo = read_plist(path)
set_force_install_after_date(date, pkginfo)
set_unattended_install(False, pkginfo)
set_catalog(args.phase, pkginfo)
plistlib.writePlist(pkginfo, path)
def release(args):
"""Set keys relevent to production deployment."""
if (len(args.pkginfo) is 1 and
not args.pkginfo[0].endswith((".plist", ".pkginfo"))):
# File input
paths_to_change = get_pkginfo_from_file(args.pkginfo[0])
else:
paths_to_change = args.pkginfo
if not args.date:
date = None
elif not is_valid_date(args.date):
print "Invalid date! Please check formatting."
sys.exit(1)
else:
date = get_datetime(args.date)
for path in paths_to_change:
if os.path.exists(path):
pkginfo = read_plist(path)
set_force_install_after_date(date, pkginfo)
set_unattended_install(True, pkginfo)
set_catalog("production", pkginfo)
plistlib.writePlist(pkginfo, path)
def bulk(args):
"""Set a key on multiple pkginfo files."""
if (len(args.pkginfo) is 1 and
not args.pkginfo[0].endswith((".plist", ".pkginfo"))):
# File input
paths_to_change = get_pkginfo_from_file(args.pkginfo[0])
else:
paths_to_change = args.pkginfo
for path in paths_to_change:
if os.path.exists(path):
pkginfo = read_plist(path)
if args.val == "-":
remove_key(args.key, pkginfo)
else:
set_key(args.key, args.val, pkginfo)
plistlib.writePlist(pkginfo, path)
def get_pkginfo_from_file(path):
"""Convert file contents into a list of paths, ignoring comments."""
with open(path) as paths:
paths_to_change = [
os.path.expanduser(path.strip("\n\t\"'"))
for path in paths.readlines() if not path.startswith("#")]
return paths_to_change
def get_datetime(date):
"""Return a datetime object for correctly formatted string date."""
return datetime.datetime.strptime(date, "%Y-%m-%dT%H:%M:%SZ")
def is_valid_date(date):
"""Ensure date is in the correct format.
Format is: 'yyyy-mm-ddThh:mm:ssZ'. For example, August 3rd 2011 at
1PM is: '2011-08-03T13:00:00Z'.
date (string): Date string to validate.
Returns:
Boolean.
"""
result = False
try:
_ = datetime.datetime.strptime(date, "%Y-%m-%dT%H:%M:%SZ")
result = True
except ValueError:
pass
return result
def set_force_install_after_date(date, pkginfo):
"""Set the force_install_after_date value for pkginfo file.
Args:
date (datetime.datetime): Date to force install after.
pkginfo (plist): File to on which to change date.
"""
if date:
pkginfo["force_install_after_date"] = date
elif pkginfo.get("force_install_after_date"):
del pkginfo["force_install_after_date"]
def set_unattended_install(val, pkginfo):
"""Set the unattended_install value for pkginfo file.
Args:
val (bool): Value to set.
pkginfo (plist): File to on which to change date.
"""
set_key("unattended_install", val, pkginfo)
def set_catalog(val, pkginfo):
"""Set the catalog value to val, clearing other entries.
Args:
val (string): Catalog to set.
pkginfo (plist): File to on which to change date.
"""
catalogs = []
if val:
catalogs.append(val)
set_key("catalogs", [val], pkginfo)
def set_key(key, val, pkginfo):
"""Set pkginfo's key to val.
Args:
key (string): The key name.
val (string, bool, datetime, int, list, dict): Value to set.
List and dict values may include any combination of other
valid types from this list.
pkginfo (plist): The pkginfo plist object to change.
"""
pkginfo[key] = val
def remove_key(key, pkginfo):
"""Remove a key/value pair from a pkginfo file.
Args:
key (string): Key to remove.
pkginfo (plist): The pkginfo plist object to change.
"""
if key in pkginfo:
del pkginfo[key]
# TODO: This could be restricted to only when files are changed
# TODO: Output when something is changed/ not changed.
if __name__ == "__main__":
main()
|
sheagcraig/phasetool
|
phasetool.py
|
Python
|
gpl-3.0
| 14,855
|
# -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/items.html
import scrapy
class PyraclaItem(scrapy.Item):
# define the fields for your item here like:
# name = scrapy.Field()
pass
|
aarkwright/pyracla
|
pyracla/items.py
|
Python
|
gpl-3.0
| 286
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Generates the tr file based on the defined PyQt Project File
"""
import os, sys
import subprocess
PYTHONPATH = os.path.split(sys.executable)[0]
PLYPATH = os.path.join(PYTHONPATH, "Lib\\site-packages\\PyQt4\\pylupdate4.exe")
LREPATH = os.path.join(PYTHONPATH, "Lib\\site-packages\\PyQt4\\lrelease.exe")
FILEPATH = os.path.realpath(os.path.dirname(sys.argv[0]))
FILES = ("..\\core\\arcgeo.py",
"..\\core\\project.py",
"..\\core\\shape.py",
"..\\dxfimport\\geoent_arc.py",
"..\\dxfimport\\geoent_circle.py",
"..\\dxfimport\\geoent_line.py",
"..\\dxfimport\\importer.py",
"..\\globals\\config.py",
"..\\gui\\canvas.py",
"..\\gui\\canvas2d.py",
"..\\gui\\canvas3d.py",
"..\\gui\\messagebox.py",
"..\\gui\\popupdialog.py",
"..\\gui\\treehandling.py",
"..\\postpro\\postprocessor.py",
"..\\postpro\\postprocessorconfig.py",
"..\\postpro\\tspoptimisation.py",
"..\\dxf2gcode.py",
"..\\dxf2gcode.ui"
)
TSFILES = ("dxf2gcode_de_DE.ts",
"dxf2gcode_fr.ts",
"dxf2gcode_ru.ts")
FILESSTR = ""
for FILE in FILES:
FILESSTR += ("%s\\i18n\\%s " % (FILEPATH, FILE))
TSFILESTR = ""
for TSFILE in TSFILES:
TSFILESTR += ("%s\\i18n\\%s " % (FILEPATH, TSFILE))
OPTIONS = "-ts"
cmd1 = ("%s %s %s %s\n" % (PLYPATH, FILESSTR, OPTIONS, TSFILESTR))
print(cmd1)
print(subprocess.call(cmd1, shell=True))
cmd2 = ("%s %s\n" % (LREPATH, TSFILESTR))
print(cmd2)
print(subprocess.call(cmd2, shell=True))
print("\nREADY")
|
Poofjunior/dxf2gcode
|
make_tr.py
|
Python
|
gpl-3.0
| 1,707
|
#!/usr/bin/python
"""
06 Feb 2011
use slr to compute evolutionary rates
"""
__author__ = "Francois-Jose Serra"
__email__ = "francois@barrabin.org"
__licence__ = "GPLv3"
__version__ = "0.0"
from ete_dev import EvolTree
tree = EvolTree ("data/S_example/measuring_S_tree.nw")
tree.link_to_alignment ("data/S_example/alignment_S_measuring_evol.fasta")
tree.run_model ('SLR')
slr = tree.get_evol_model ('SLR')
slr.set_histface (up=False, kind='curve',errors=True,
hlines = [1.0,0.3], hlines_col=['black','grey'])
tree.show (histfaces=['SLR'])
|
khughitt/ete
|
examples/evol/7_slr.py
|
Python
|
gpl-3.0
| 574
|
#!/usr/bin/python
import toasim
import sys,random
nreal = 1
header = toasim.header()
header.parfile_name=sys.argv[1]
header.timfile_name=sys.argv[2]
par=open(sys.argv[1])
tim=open(sys.argv[2])
header.orig_parfile=par.read()
header.idealised_toas=tim.read()
reals=list()
i=0
while i < len(sys.argv):
if sys.argv[i] == "--nreal":
i+=1
nreal=int(sys.argv[i])
continue
i+=1
r=0
while r < nreal:
offsets=list()
ntoa=0
for line in header.idealised_toas.split("\n"):
if line.startswith(" "):
elems=line.strip().split()
error=float(elems[3])*1e-6
toa=elems[2]
offsets.append(random.lognormvariate(-6,3)*error) # *random.uniform(-1,1))
ntoa+=1
r+=1
print("\b\b\b\b\b\b\b\b", end=' ')
print("%d"%r, end=' ')
reals.append(toasim.correction(header,offsets,0,0,0,""))
header.ntoa=ntoa
header.nrealisations=nreal
print("\nWriting....")
header.orig_parfile=""
header.idealised_toas=""
file=open(header.timfile_name+".addRFI","w")
header.write(file)
for real in reals:
real.write(file)
file.close()
|
mattpitkin/tempo2
|
python/toasim/bin/fake_rfi.py
|
Python
|
gpl-3.0
| 1,132
|
# Copyright (C) 2017 Custodia Project Contributors - see LICENSE file
from __future__ import absolute_import
import json
from .base import CustodiaServerRunner
class TestKey(CustodiaServerRunner):
def test_store_key(self, custodia_server):
container = 'secrets/bucket{}/'.format(self.get_unique_number())
mykey = '{}mykey'.format(container)
resp = custodia_server.post(container, headers=self.request_headers)
assert resp.status_code == 201
resp = custodia_server.put(mykey, json={"type": "simple",
"value": 'P@ssw0rd'},
headers=self.request_headers)
assert resp.status_code == 201
resp = custodia_server.get(container, headers=self.request_headers)
assert resp.status_code == 200
data = json.loads(resp.text)
assert 'mykey' in data
resp = custodia_server.get(mykey, headers=self.request_headers)
assert resp.status_code == 200
data = json.loads(resp.text)
assert 'type' in data
assert data['type'] == 'simple'
assert 'value' in data
assert data['value'] == 'P@ssw0rd'
def test_store_key_again(self, custodia_server):
container = 'secrets/bucket{}/'.format(self.get_unique_number())
mykey = '{}mykey'.format(container)
resp = custodia_server.post(container, headers=self.request_headers)
assert resp.status_code == 201
resp = custodia_server.put(mykey, json={"type": "simple",
"value": 'P@ssw0rd'},
headers=self.request_headers)
assert resp.status_code == 201
resp = custodia_server.get(container, headers=self.request_headers)
assert resp.status_code == 200
data = json.loads(resp.text)
assert 'mykey' in data
resp = custodia_server.get(mykey, headers=self.request_headers)
assert resp.status_code == 200
data = json.loads(resp.text)
assert 'type' in data
assert data['type'] == 'simple'
assert 'value' in data
assert data['value'] == 'P@ssw0rd'
resp = custodia_server.put(mykey, json={"type": "simple",
"value": 'P@ssw0rd'},
headers=self.request_headers)
assert resp.status_code == 409
resp = custodia_server.get(mykey, headers=self.request_headers)
assert resp.status_code == 200
data = json.loads(resp.text)
assert 'type' in data
assert data['type'] == 'simple'
assert 'value' in data
assert data['value'] == 'P@ssw0rd'
def test_store_key_forbidden_key(self, custodia_server):
container = 'secrets/bucket{}/'.format(self.get_unique_number())
mykey = '{}mykey'.format(container)
resp = custodia_server.post(container, headers=self.request_headers)
assert resp.status_code == 201
resp = custodia_server.put(mykey, json={"type": "simple",
"value": 'P@ssw0rd'},
headers={})
assert resp.status_code == 403
resp = custodia_server.get(container, headers=self.request_headers)
assert resp.status_code == 200
assert resp.text == '[]'
resp = custodia_server.get(mykey, headers=self.request_headers)
assert resp.status_code == 404
def test_store_key_not_valid_container(self, custodia_server):
bucket_number = self.get_unique_number()
container = 'secrets/bucket{}/'.format(bucket_number)
invalid_container = 'secrets/invalid_bucket{}/'.format(bucket_number)
mykey_with_ivalid_bucket = '{}mykey'.format(invalid_container)
resp = custodia_server.post(container, headers=self.request_headers)
assert resp.status_code == 201
resp = custodia_server.put(mykey_with_ivalid_bucket,
json={"type": "simple",
"value": 'P@ssw0rd'},
headers=self.request_headers)
assert resp.status_code == 404
resp = custodia_server.get(container, headers=self.request_headers)
assert resp.status_code == 200
assert resp.text == '[]'
def test_store_key_directory_instead_of_key(self, custodia_server):
container = 'secrets/bucket{}/'.format(self.get_unique_number())
mykey_dir = '{}mykey/'.format(container)
resp = custodia_server.post(container, headers=self.request_headers)
assert resp.status_code == 201
resp = custodia_server.put(mykey_dir, json={"type": "simple",
"value": 'P@ssw0rd'},
headers=self.request_headers)
assert resp.status_code == 405
resp = custodia_server.get(container, headers=self.request_headers)
assert resp.status_code == 200
assert resp.text == '[]'
def test_get_key(self, custodia_server):
container = 'secrets/bucket{}/'.format(self.get_unique_number())
mykey = '{}mykey'.format(container)
resp = custodia_server.post(container, headers=self.request_headers)
assert resp.status_code == 201
resp = custodia_server.put(mykey, json={"type": "simple",
"value": 'P@ssw0rd'},
headers=self.request_headers)
assert resp.status_code == 201
resp = custodia_server.get(container, headers=self.request_headers)
assert resp.status_code == 200
data = json.loads(resp.text)
assert 'mykey' in data
resp = custodia_server.get(mykey, headers=self.request_headers)
assert resp.status_code == 200
data = json.loads(resp.text)
assert 'type' in data
assert data['type'] == 'simple'
assert 'value' in data
assert data['value'] == 'P@ssw0rd'
# there need to be application/octet-stream version
def test_get_key_empty_key(self, custodia_server):
container = 'secrets/bucket{}/'.format(self.get_unique_number())
mykey = '{}mykey'.format(container)
resp = custodia_server.post(container, headers=self.request_headers)
assert resp.status_code == 201
resp = custodia_server.get(container, headers=self.request_headers)
assert resp.status_code == 200
assert resp.text == '[]'
resp = custodia_server.get(mykey, headers=self.request_headers)
assert resp.status_code == 404
# there need to be application/octet-stream version
def test_get_key_forbidden_access(self, custodia_server):
container = 'secrets/bucket{}/'.format(self.get_unique_number())
mykey = '{}mykey'.format(container)
resp = custodia_server.post(container, headers=self.request_headers)
assert resp.status_code == 201
resp = custodia_server.put(mykey, json={"type": "simple",
"value": 'P@ssw0rd'},
headers=self.request_headers)
assert resp.status_code == 201
resp = custodia_server.get(container, headers=self.request_headers)
assert resp.status_code == 200
data = json.loads(resp.text)
assert 'mykey' in data
resp = custodia_server.get(mykey, headers={})
assert resp.status_code == 403
# there need to be application/octet-stream version
def test_delete_key(self, custodia_server):
container = 'secrets/bucket{}/'.format(self.get_unique_number())
mykey = '{}mykey'.format(container)
resp = custodia_server.post(container, headers=self.request_headers)
assert resp.status_code == 201
resp = custodia_server.put(mykey, json={"type": "simple",
"value": 'P@ssw0rd'},
headers=self.request_headers)
assert resp.status_code == 201
resp = custodia_server.get(container, headers=self.request_headers)
assert resp.status_code == 200
data = json.loads(resp.text)
assert 'mykey' in data
resp = custodia_server.get(mykey, headers=self.request_headers)
assert resp.status_code == 200
data = json.loads(resp.text)
assert 'type' in data
assert data['type'] == 'simple'
assert 'value' in data
assert data['value'] == 'P@ssw0rd'
resp = custodia_server.delete(mykey, headers=self.request_headers)
assert resp.status_code == 204
resp = custodia_server.get(container, headers=self.request_headers)
assert resp.status_code == 200
assert resp.text == '[]'
resp = custodia_server.get(mykey, headers=self.request_headers)
assert resp.status_code == 404
def test_delete_key_empty_key(self, custodia_server):
container = 'secrets/bucket{}/'.format(self.get_unique_number())
mykey = '{}mykey'.format(container)
resp = custodia_server.post(container, headers=self.request_headers)
assert resp.status_code == 201
resp = custodia_server.get(container, headers=self.request_headers)
assert resp.status_code == 200
assert resp.text == '[]'
resp = custodia_server.delete(mykey, headers=self.request_headers)
assert resp.status_code == 404
def test_delete_forbidden_access(self, custodia_server):
container = 'secrets/bucket{}/'.format(self.get_unique_number())
mykey = '{}mykey'.format(container)
resp = custodia_server.post(container, headers=self.request_headers)
assert resp.status_code == 201
resp = custodia_server.put(mykey, json={"type": "simple",
"value": 'P@ssw0rd'},
headers=self.request_headers)
assert resp.status_code == 201
resp = custodia_server.get(container, headers=self.request_headers)
assert resp.status_code == 200
data = json.loads(resp.text)
assert 'mykey' in data
resp = custodia_server.get(mykey, headers=self.request_headers)
assert resp.status_code == 200
data = json.loads(resp.text)
assert 'type' in data
assert data['type'] == 'simple'
assert 'value' in data
assert data['value'] == 'P@ssw0rd'
resp = custodia_server.delete(mykey, headers={})
assert resp.status_code == 403
resp = custodia_server.get(mykey, headers=self.request_headers)
assert resp.status_code == 200
data = json.loads(resp.text)
assert 'type' in data
assert data['type'] == 'simple'
assert 'value' in data
assert data['value'] == 'P@ssw0rd'
|
latchset/custodia
|
tests/functional/test_key.py
|
Python
|
gpl-3.0
| 11,000
|
import PISM, time
import numpy as np
np.set_printoptions(precision=5, suppress=True)
ctx = PISM.Context()
ice_density = ctx.config.get_double("constants.ice.density")
ocean_density = ctx.config.get_double("constants.sea_water.density")
mu = ice_density / ocean_density
def allocate_grid(ctx):
params = PISM.GridParameters(ctx.config)
params.Lx = 1e5
params.Ly = 1e5
params.Lz = 1000
params.Mx = 7
params.My = 7
params.Mz = 5
params.periodicity = PISM.NOT_PERIODIC
params.registration = PISM.CELL_CORNER
params.ownership_ranges_from_options(ctx.size)
return PISM.IceGrid(ctx.ctx, params)
def allocate_storage(grid):
ice_thickness = PISM.model.createIceThicknessVec(grid)
# not used, but needed by GeometryCalculator::compute()
surface = PISM.model.createIceSurfaceVec(grid)
bed_topography = PISM.model.createBedrockElevationVec(grid)
mask = PISM.model.createIceMaskVec(grid)
gl_mask = PISM.model.createGroundingLineMask(grid)
gl_mask_x = PISM.model.createGroundingLineMask(grid)
gl_mask_x.set_name("gl_mask_x")
gl_mask_y = PISM.model.createGroundingLineMask(grid)
gl_mask_y.set_name("gl_mask_y")
sea_level = PISM.model.createIceThicknessVec(grid)
sea_level.set_name("sea_level")
return ice_thickness, bed_topography, surface, mask, gl_mask, gl_mask_x, gl_mask_y, sea_level
def compute_mask(sea_level, bed_topography, ice_thickness, mask, surface):
gc = PISM.GeometryCalculator(ctx.config)
gc.compute(sea_level, bed_topography, ice_thickness, mask, surface)
def print_vec(vec):
v0 = vec.allocate_proc0_copy()
vec.put_on_proc0(v0.get())
shape = vec.get_dm().get().sizes
print vec.get_name()
print v0.get()[:].reshape(shape, order="f")
def init(mu, L, sea_level, vec, type="box"):
k = {0.0 : 8,
0.25 : 7,
0.5 : 6,
0.75 : 5,
1.0 : 4}
H0 = (8.0 / k[L]) * (sea_level / mu)
H1 = 0.5 * H0
grid = vec.get_grid()
with PISM.vec.Access(nocomm=[vec]):
for (i, j) in grid.points():
if type == "box" and abs(i - 3) < 2 and abs(j - 3) < 2:
vec[i, j] = H0
elif type == "cross" and abs(i - 3) < 2 and abs(j - 3) < 2 and (i == 3 or j == 3):
vec[i, j] = H0
else:
vec[i, j] = H1
if abs(i - 3) >= 3 or abs(j - 3) >= 3:
vec[i, j] = 0.0
vec.update_ghosts()
def grounded_cell_fraction_test():
# allocation
grid = allocate_grid(ctx)
ice_thickness, bed_topography, surface, mask, gl_mask, gl_mask_x, gl_mask_y, _ = allocate_storage(grid)
bed_topography.set(0.0)
# initialization
sea_level = 500.0
for L in [0.0, 0.25, 0.5, 0.75, 1.0]:
init(mu, L, sea_level, ice_thickness, "box")
compute_mask(sea_level, bed_topography, ice_thickness, mask, surface)
# computation of gl_mask
PISM.compute_grounded_cell_fraction(ice_density, ocean_density, sea_level,
ice_thickness, bed_topography, mask, gl_mask,
gl_mask_x, gl_mask_y)
# inspection / comparison
print "L = %f" % L
print_vec(mask)
print_vec(gl_mask_x)
print_vec(gl_mask_y)
print_vec(gl_mask)
def new_grounded_cell_fraction_test():
# allocation
grid = allocate_grid(ctx)
ice_thickness, bed_topography, _, _, gl_mask, _, _, sea_level = allocate_storage(grid)
# initialization
bed_topography.set(0.0)
sl = 500.0
sea_level.set(sl)
for L in [0.0, 0.25, 0.5, 0.75, 1.0]:
init(mu, L, sl, ice_thickness, "box")
# computation of gl_mask
PISM.compute_grounded_cell_fraction(ice_density, ocean_density,
sea_level,
ice_thickness,
bed_topography,
gl_mask)
# inspection / comparison
print "L = %f" % L
print_vec(gl_mask)
grounded_cell_fraction_test()
new_grounded_cell_fraction_test()
|
talbrecht/pism_pik
|
test/grounded_cell_fraction/grounded_cell_fraction.py
|
Python
|
gpl-3.0
| 4,170
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from .views import module_bp
|
Emercoin/emcweb
|
engine/emcweb/emcweb_webapi/__init__.py
|
Python
|
gpl-3.0
| 94
|
print ("Hello, Python!");
|
ashoka-cs/people
|
setup.py
|
Python
|
gpl-3.0
| 25
|
from pysys.constants import *
from apama.correlator import CorrelatorHelper
from com.jtech.basetest import CycleMonitorTest
class PySysTest(CycleMonitorTest):
def execute(self):
#start server and set stations
self.startHTTPServer()
station = self.addStation(1,'Hyde Park',51512303,-159988)
station.update(2,20,'2015-06-24 12:10:00')
self.dumpStations(file='city-bikes.json')
#start the application (adapter polls on request only)
self.startCorrelator(url='http://localhost:%d/city-bikes.json'%self.httpPort)
self.startScenarioPrinter(self.correlator)
self.startADBCAdapter(self.correlator, insert='insert.sql')
self.initialiseApplication(self.correlator)
self.waitForSignal('jython.out', expr='ADDED', condition='==1', timeout=5)
#update the station and poll
station.update(12,10,'2015-06-24 12:11:00')
self.dumpStationsAndPoll(file='city-bikes.json')
self.waitForSignal('jython.out', expr='REMOVED', condition='==1', timeout=5)
def validate(self):
exprList=[]
exprList.append('ADDED: id=1, ratio=0.09, type=LOWER_BOUNDARY')
exprList.append('REMOVED: id=1, ratio=0.09, type=LOWER_BOUNDARY')
self.assertOrderedGrep('jython.out', exprList=exprList)
|
moraygrieve/cycle-monitor
|
test/testcases/CycleMonitor_cor_004/run.py
|
Python
|
gpl-3.0
| 1,201
|
"""
Classes for modelling 3D vector fields.
"""
# Copyright (C) 2009-2011 University of Edinburgh
#
# This file is part of IMUSim.
#
# IMUSim is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# IMUSim is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with IMUSim. If not, see <http://www.gnu.org/licenses/>.
from abc import ABCMeta, abstractmethod, abstractproperty
from scipy import interpolate
from nvg.maths import vectors
from nvg.utilities.documentation import prepend_method_doc
#from imusim.maths.natural_neighbour import NaturalNeighbourInterpolatorC
import numpy as np
class VectorField(object):
"""
Base class for vector fields.
"""
__metaclass__ = ABCMeta
@abstractmethod
def __call__(self, position, t):
"""
Evaluate the vector field at the given position(s) and time(s).
@param position: 3xN L{np.ndarray} of position co-ordinates, in m.
@param position: Scalar or length N L{np.ndarray} of times in s.
@return: 3xN L{np.ndarray} of field values.
"""
pass
@abstractproperty
def nominalValue(self):
"""
Nominal 3x1 vector value of the field, for use in calibrating sensors.
"""
pass
@property
def nominalMagnitude(self):
"""
Nominal magnitude of the field, for use in calibrating sensors.
"""
return vectors.norm(self.nominalValue)
class ConstantVectorField(VectorField):
"""
A vector field wth a constant value everywhere.
"""
def __init__(self, value):
"""
Construct constant field model.
@param value: Field value as a 3x1 L{np.ndarray}.
"""
self._value = value
def __call__(self, position, t):
result = np.empty_like(position)
result[:] = self._value
return result
@property
def nominalValue(self):
return self._value
class InterpolatedVectorField(VectorField):
"""
A vector field interpolated from sampled values.
"""
@abstractmethod
def __init__(self, positions, values):
"""
Construct vector field interpolator.
@param positions: 3xN L{np.ndarray} of measurement positions.
@param values: 3xN L{np.ndarray} of corresponding field values.
"""
pass
class RBFInterpolatedField(InterpolatedVectorField):
"""
Field interpolation using radial basis functions.
Each component of the field is interpolated independently.
"""
def __init__(self, positions, values):
x,y,z = positions
self.components = [interpolate.Rbf(x,y,z,v,function='cubic')
for v in values]
def __call__(self, position, t):
length = np.shape(position)[-1]
nblocks = min(1, length/500)
inblocks = np.array_split(position, nblocks, axis=1)
outblocks = [np.array([np.atleast_1d(c(*(list(ib))))
for c in self.components]) for ib in inblocks]
return np.hstack(outblocks)
#class NaturalNeighbourInterpolatedField(InterpolatedVectorField):
# """
# Natural Neighbour interpolation of vector fields.
#
# This is a Python wrapper for the C implementation by Ross Hemsley, described
# in the report "Interpolation on a Magnetic Field", Bristol University, 2009.
#
# The original code and report are available from:
# http://code.google.com/p/interpolate3d/
# """
# def __init__(self, positions, values):
#
# valid = vectors.validity(np.vstack((positions,values)))
#
# x,y,z = positions
#
# self._baseField = np.median(values[:,valid], axis=1).reshape(3,1)
# deviations = values - self._baseField
#
# u,v,w = values
#
# self.imp = NaturalNeighbourInterpolatorC(
# np.ascontiguousarray(x[valid]),
# np.ascontiguousarray(y[valid]),
# np.ascontiguousarray(z[valid]),
# np.ascontiguousarray(u[valid]),
# np.ascontiguousarray(v[valid]),
# np.ascontiguousarray(w[valid]))
#
# @property
# def nominalValue(self):
# return self._baseField
#
# def __call__(self, position, t):
# valid = vectors.validity(position)
# result = np.empty_like(position)
# result[:,~valid] = np.nan
# result[:,valid] = self.imp(*(list(position[:,valid])))
# return self._baseField + result
|
alfkjartan/nvgimu
|
nvg/maths/vector_fields.py
|
Python
|
gpl-3.0
| 4,802
|
import tkinter as tk
class ToolTip(object):
def __init__(self, widget):
self.widget = widget
self.tipwindow = None
self.id = None
self.x = self.y = 0
def showtip(self, text):
"Display text in tooltip window"
self.text = text
if self.tipwindow or not self.text:
return
# print(self.widget.bbox("insert"))
# print(type(self.widget.bbox("insert")))
x, y, cx, cy = self.widget.bbox("insert")
# These steps seem to be necessary for older versions of Python (tkinter??)
# .split()
# x = int(x)
# y = int(y)
# cx = int(cx)
# cy = int(cy)
x = x + self.widget.winfo_rootx() + 27
y = y + cy + self.widget.winfo_rooty() +27
self.tipwindow = tw = tk.Toplevel(self.widget)
tw.wm_overrideredirect(1)
tw.wm_geometry("+%d+%d" % (x, y))
label = tk.Label(tw, text=self.text, justify=tk.LEFT,
background="#ffffe0", relief=tk.SOLID, borderwidth=1,
font=("tahoma", "8", "normal"))
label.pack(ipadx=1)
def hidetip(self):
tw =self.tipwindow
self.tipwindow = None
if tw:
tw.destroy()
# Create a tooltip tool
def createToolTip(widget, text):
toolTip = ToolTip(widget)
def enter(event):
toolTip.showtip(text)
def leave(event):
toolTip.hidetip()
widget.bind('<Leave>', leave)
widget.bind('<Enter>', enter)
|
mariana-LJ/groceries_desk_app
|
Tooltip.py
|
Python
|
gpl-3.0
| 1,513
|
# -*- coding: utf-8 -*-
# Copyright 2007-2021 The HyperSpy developers
#
# This file is part of HyperSpy.
#
# HyperSpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HyperSpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HyperSpy. If not, see <http://www.gnu.org/licenses/>.
import warnings
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
from mpl_toolkits.axes_grid1 import make_axes_locatable
import logging
import inspect
from functools import partial
from hyperspy.drawing.figure import BlittedFigure
from hyperspy.drawing import utils
from hyperspy.events import Event, Events
from hyperspy.exceptions import VisibleDeprecationWarning
from hyperspy.misc.test_utils import ignore_warning
_logger = logging.getLogger(__name__)
class Signal1DFigure(BlittedFigure):
"""
"""
def __init__(self, title=""):
super(Signal1DFigure, self).__init__()
self.figure = None
self.ax = None
self.right_ax = None
self.ax_lines = list()
self.right_ax_lines = list()
self.axes_manager = None
self.right_axes_manager = None
# Labels
self.xlabel = ''
self.ylabel = ''
self.title = title
self.create_figure()
self.create_axis()
# Color cycles
self._color_cycles = {
'line': utils.ColorCycle(),
'step': utils.ColorCycle(),
'scatter': utils.ColorCycle(), }
def create_axis(self):
self.ax = self.figure.add_subplot(111)
animated = self.figure.canvas.supports_blit
self.ax.yaxis.set_animated(animated)
self.ax.xaxis.set_animated(animated)
self.ax.hspy_fig = self
def create_right_axis(self, color='black', adjust_layout=True):
"""
Add an axis on the right hand side of the figure.
Parameters
----------
adjust_layout : bool, optional
Whether to call ``plt.tight_layout`` or not. The default is True.
Returns
-------
None.
"""
if self.ax is None:
self.create_axis()
if self.right_ax is None:
self.right_ax = self.ax.twinx()
self.right_ax.hspy_fig = self
self.right_ax.yaxis.set_animated(self.figure.canvas.supports_blit)
self.right_ax.tick_params(axis='y', labelcolor=color)
# Needs to set the zorder of the ax to get the mouse event for ax
# See https://github.com/matplotlib/matplotlib/issues/10009
self.ax.set_zorder(self.right_ax.get_zorder() + 1)
if adjust_layout:
plt.tight_layout()
def close_right_axis(self, adjust_layout=True):
"""
Remove the axis on the right hand side of the figure
Parameters
----------
adjust_layout : bool, optional
Whether to call ``plt.tight_layout`` or not. The default is True.
Returns
-------
None.
"""
if self.right_ax is not None:
for lines in self.right_ax_lines:
lines.close()
self.right_ax.remove()
self.right_ax = None
if adjust_layout:
plt.tight_layout()
def add_line(self, line, ax='left', connect_navigation=False):
"""
Add Signal1DLine to figure
Parameters
----------
line : Signal1DLine object
Line to be added to the figure.
ax : {'left', 'right'}, optional
Position the y axis, either 'left'. The default is 'left'.
connect_navigation : bool, optional
Connect the update of the line to the `indices_changed` event of
the axes_manager. This only necessary when adding a line to the
left since the `indices_changed` event is already connected to
the `update` method of `Signal1DFigure`. The default is False.
Returns
-------
None.
"""
if ax == 'left':
line.ax = self.ax
if line.axes_manager is None:
line.axes_manager = self.axes_manager
self.ax_lines.append(line)
line.sf_lines = self.ax_lines
elif ax == 'right':
line.ax = self.right_ax
self.right_ax_lines.append(line)
line.sf_lines = self.right_ax_lines
if line.axes_manager is None:
line.axes_manager = self.right_axes_manager
if connect_navigation:
f = partial(line._auto_update_line, update_ylimits=True)
line.axes_manager.events.indices_changed.connect(f, [])
line.events.closed.connect(
lambda: line.axes_manager.events.indices_changed.disconnect(f),
[])
line.axis = self.axis
# Automatically asign the color if not defined
if line.color is None:
line.color = self._color_cycles[line.type]()
# Or remove it from the color cycle if part of the cycle
# in this round
else:
rgba_color = mpl.colors.colorConverter.to_rgba(line.color)
if rgba_color in self._color_cycles[line.type].color_cycle:
self._color_cycles[line.type].color_cycle.remove(
rgba_color)
def plot(self, data_function_kwargs={}, **kwargs):
self.ax.set_xlabel(self.xlabel)
self.ax.set_ylabel(self.ylabel)
self.ax.set_title(self.title)
x_axis_upper_lims = []
x_axis_lower_lims = []
for line in self.ax_lines:
line.plot(data_function_kwargs=data_function_kwargs, **kwargs)
x_axis_lower_lims.append(line.axis.axis[0])
x_axis_upper_lims.append(line.axis.axis[-1])
for marker in self.ax_markers:
marker.plot(render_figure=False)
plt.xlim(np.min(x_axis_lower_lims), np.max(x_axis_upper_lims))
self.axes_manager.events.indices_changed.connect(self.update, [])
self.events.closed.connect(
lambda: self.axes_manager.events.indices_changed.disconnect(
self.update), [])
self.ax.figure.canvas.draw_idle()
if hasattr(self.figure, 'tight_layout'):
try:
self.figure.tight_layout()
except BaseException:
# tight_layout is a bit brittle, we do this just in case it
# complains
pass
self.figure.canvas.draw()
def _on_close(self):
_logger.debug('Closing Signal1DFigure.')
if self.figure is None:
return # Already closed
for line in self.ax_lines + self.right_ax_lines:
line.close()
super(Signal1DFigure, self)._on_close()
_logger.debug('Signal1DFigure Closed.')
def update(self):
"""
Update lines, markers and render at the end.
This method is connected to the `indices_changed` event of the
`axes_manager`.
"""
def update_lines(ax, ax_lines):
y_min, y_max = np.nan, np.nan
for line in ax_lines:
# save on figure rendering and do it at the end
# don't update the y limits
line._auto_update_line(render_figure=False,
update_ylimits=False)
y_min = np.nanmin([y_min, line._y_min])
y_max = np.nanmax([y_max, line._y_max])
ax.set_ylim(y_min, y_max)
for marker in self.ax_markers:
marker.update()
# Left and right axis needs to be updated separetely to set the
# correct y limits of each axes
update_lines(self.ax, self.ax_lines)
if self.right_ax is not None:
update_lines(self.right_ax, self.right_ax_lines)
self.render_figure()
class Signal1DLine(object):
"""Line that can be added to Signal1DFigure.
Attributes
----------
type : {'scatter', 'step', 'line'}
Select the line drawing style.
line_properties : dictionary
Accepts a dictionary of valid (i.e. recognized by mpl.plot)
containing valid line properties. In addition it understands
the keyword `type` that can take the following values:
{'scatter', 'step', 'line'}
auto_update: bool
If False, executing ``_auto_update_line`` does not update the
line plot.
Methods
-------
set_line_properties
Enables setting the line_properties attribute using keyword
arguments.
Raises
------
ValueError
If an invalid keyword value is passed to line_properties.
"""
def __init__(self):
self.events = Events()
self.events.closed = Event("""
Event that triggers when the line is closed.
Arguments:
obj: Signal1DLine instance
The instance that triggered the event.
""", arguments=["obj"])
self.sf_lines = None
self.ax = None
# Data attributes
self.data_function = None
# args to pass to `__call__`
self.data_function_kwargs = {}
self.axis = None
self.axes_manager = None
self._plot_imag = False
self.norm = 'linear'
# Properties
self.auto_update = True
self.autoscale = 'v'
self._y_min = np.nan
self._y_max = np.nan
self.line = None
self.plot_indices = False
self.text = None
self.text_position = (-0.1, 1.05,)
self._line_properties = {}
self.type = "line"
@property
def get_complex(self):
warnings.warn("The `get_complex` attribute is deprecated and will be"
"removed in 2.0, please use `_plot_imag` instead.",
VisibleDeprecationWarning)
return self._plot_imag
@property
def line_properties(self):
return self._line_properties
@line_properties.setter
def line_properties(self, kwargs):
if 'type' in kwargs:
self.type = kwargs['type']
del kwargs['type']
if 'color' in kwargs:
color = kwargs['color']
del kwargs['color']
self.color = color
for key, item in kwargs.items():
if item is None and key in self._line_properties:
del self._line_properties[key]
else:
self._line_properties[key] = item
if self.line is not None:
plt.setp(self.line, **self.line_properties)
self.ax.figure.canvas.draw_idle()
def set_line_properties(self, **kwargs):
self.line_properties = kwargs
@property
def type(self):
return self._type
@type.setter
def type(self, value):
lp = {}
if value == 'scatter':
lp['marker'] = 'o'
lp['linestyle'] = 'None'
lp['markersize'] = 1
elif value == 'line':
lp['linestyle'] = '-'
lp['marker'] = "None"
lp['drawstyle'] = "default"
elif value == 'step':
lp['drawstyle'] = 'steps-mid'
lp['marker'] = "None"
else:
raise ValueError(
"`type` must be one of "
"{\'scatter\', \'line\', \'step\'}"
"but %s was given" % value)
self._type = value
self.line_properties = lp
if self.color is not None:
self.color = self.color
@property
def color(self):
if 'color' in self.line_properties:
return self.line_properties['color']
elif 'markeredgecolor' in self.line_properties:
return self.line_properties['markeredgecolor']
else:
return None
@color.setter
def color(self, color):
if self._type == 'scatter':
self.set_line_properties(markeredgecolor=color)
if 'color' in self._line_properties:
del self._line_properties['color']
else:
if color is None and 'color' in self._line_properties:
del self._line_properties['color']
else:
self._line_properties['color'] = color
self.set_line_properties(markeredgecolor=None)
if self.line is not None:
plt.setp(self.line, **self.line_properties)
self.ax.figure.canvas.draw_idle()
def plot(self, data=1, **kwargs):
for key, value in kwargs.items():
if hasattr(self, key):
setattr(self, key, value)
data = self._get_data()
if self.line is not None:
self.line.remove()
norm = self.norm
if norm == 'log':
plot = self.ax.semilogy
elif (isinstance(norm, mpl.colors.Normalize) or
(inspect.isclass(norm) and issubclass(norm, mpl.colors.Normalize))
):
raise ValueError("Matplotlib Normalize instance or subclass can "
"be used for Signal2D only.")
elif norm not in ["auto", "linear"]:
raise ValueError("`norm` paramater should be 'auto', 'linear' or "
"'log' for Signal1D.")
else:
plot = self.ax.plot
self.line, = plot(self.axis.axis, data, **self.line_properties,
animated=self.ax.figure.canvas.supports_blit)
if not self.axes_manager or self.axes_manager.navigation_size == 0:
self.plot_indices = False
if self.plot_indices is True:
if self.text is not None:
self.text.remove()
self.text = self.ax.text(*self.text_position,
s=str(self.axes_manager.indices),
transform=self.ax.transAxes,
fontsize=12,
color=self.line.get_color(),
animated=self.ax.figure.canvas.supports_blit)
self._y_min, self._y_max = self.ax.get_ylim()
self.ax.hspy_fig.render_figure()
def _get_data(self, real_part=False):
if self._plot_imag and not real_part:
ydata = self.data_function(axes_manager=self.axes_manager,
**self.data_function_kwargs).imag
else:
ydata = self.data_function(axes_manager=self.axes_manager,
**self.data_function_kwargs).real
return ydata
def _auto_update_line(self, update_ylimits=False, **kwargs):
"""Updates the line plot only if `auto_update` is `True`.
This is useful to connect to events that automatically update the line.
"""
if self.auto_update:
if 'render_figure' not in kwargs.keys():
# if markers are plotted, we don't render the figure now but
# once the markers have been updated
kwargs['render_figure'] = (
len(self.ax.hspy_fig.ax_markers) == 0)
self.update(self, update_ylimits=update_ylimits, **kwargs)
def update(self, force_replot=False, render_figure=True,
update_ylimits=False):
"""Update the current spectrum figure
Parameters
----------
force_replot : bool
If True, close and open the figure. Default is False.
render_figure : bool
If True, render the figure. Useful to avoid firing matplotlib
drawing events too often. Default is True.
update_ylimits : bool
If True, update the y-limits. This is useful to avoid the figure
flickering when different lines update the y-limits consecutively,
in which case, this is done in `Signal1DFigure.update`.
Default is False.
"""
if force_replot is True:
self.close()
self.plot(data_function_kwargs=self.data_function_kwargs,
norm=self.norm)
self._y_min, self._y_max = self.ax.get_ylim()
ydata = self._get_data()
old_xaxis = self.line.get_xdata()
if len(old_xaxis) != self.axis.size or \
np.any(np.not_equal(old_xaxis, self.axis.axis)):
self.line.set_data(self.axis.axis, ydata)
else:
self.line.set_ydata(ydata)
if 'x' in self.autoscale:
self.ax.set_xlim(self.axis.axis[0], self.axis.axis[-1])
if 'v' in self.autoscale:
self.ax.relim()
y1, y2 = np.searchsorted(self.axis.axis,
self.ax.get_xbound())
y2 += 2
y1, y2 = np.clip((y1, y2), 0, len(ydata - 1))
clipped_ydata = ydata[y1:y2]
with ignore_warning(category=RuntimeWarning):
# In case of "All-NaN slices"
y_max, y_min = (np.nanmax(clipped_ydata),
np.nanmin(clipped_ydata))
if self._plot_imag:
# Add real plot
yreal = self._get_data(real_part=True)
clipped_yreal = yreal[y1:y2]
with ignore_warning(category=RuntimeWarning):
# In case of "All-NaN slices"
y_min = min(y_min, np.nanmin(clipped_yreal))
y_max = max(y_max, np.nanmin(clipped_yreal))
if y_min == y_max:
# To avoid matplotlib UserWarning when calling `set_ylim`
y_min, y_max = y_min - 0.1, y_max + 0.1
if not np.isfinite(y_min):
y_min = None # data are -inf or all NaN
if not np.isfinite(y_max):
y_max = None # data are inf or all NaN
if y_min is not None:
self._y_min = y_min
if y_max is not None:
self._y_max = y_max
if update_ylimits:
# Most of the time, we don't want to call `set_ylim` now to
# avoid flickering of the figure. However, we use the values
# `self._y_min` and `self._y_max` in `Signal1DFigure.update`
self.ax.set_ylim(self._y_min, self._y_max)
if self.plot_indices is True:
self.text.set_text(self.axes_manager.indices)
if render_figure:
self.ax.hspy_fig.render_figure()
def close(self):
_logger.debug('Closing `Signal1DLine`.')
if self.line in self.ax.lines:
self.ax.lines.remove(self.line)
if self.text and self.text in self.ax.texts:
self.ax.texts.remove(self.text)
if self.sf_lines and self in self.sf_lines:
self.sf_lines.remove(self)
self.events.closed.trigger(obj=self)
for f in self.events.closed.connected:
self.events.closed.disconnect(f)
try:
self.ax.figure.canvas.draw_idle()
except BaseException:
pass
_logger.debug('`Signal1DLine` closed.')
def _plot_component(factors, idx, ax=None, cal_axis=None,
comp_label='PC'):
if ax is None:
ax = plt.gca()
if cal_axis is not None:
x = cal_axis.axis
plt.xlabel(cal_axis.units)
else:
x = np.arange(factors.shape[0])
plt.xlabel('Channel index')
ax.plot(x, factors[:, idx], label='%s %i' % (comp_label, idx))
return ax
def _plot_loading(loadings, idx, axes_manager, ax=None,
comp_label='PC', no_nans=True, calibrate=True,
cmap=plt.cm.gray):
if ax is None:
ax = plt.gca()
if no_nans:
loadings = np.nan_to_num(loadings)
if axes_manager.navigation_dimension == 2:
extent = None
# get calibration from a passed axes_manager
shape = axes_manager._navigation_shape_in_array
if calibrate:
extent = (axes_manager._axes[0].low_value,
axes_manager._axes[0].high_value,
axes_manager._axes[1].high_value,
axes_manager._axes[1].low_value)
im = ax.imshow(loadings[idx].reshape(shape), cmap=cmap, extent=extent,
interpolation='nearest')
div = make_axes_locatable(ax)
cax = div.append_axes("right", size="5%", pad=0.05)
plt.colorbar(im, cax=cax)
elif axes_manager.navigation_dimension == 1:
if calibrate:
x = axes_manager._axes[0].axis
else:
x = np.arange(axes_manager._axes[0].size)
ax.step(x, loadings[idx])
else:
raise ValueError('View not supported')
|
ericpre/hyperspy
|
hyperspy/drawing/signal1d.py
|
Python
|
gpl-3.0
| 21,140
|
# granulecell_incremental_writing.py ---
#
# Filename: granulecell_incremental_writing.py
# Description:
# Author: subha
# Maintainer:
# Created: Mon Nov 3 23:28:21 2014 (+0530)
# Version:
# Last-Updated: Tue Nov 4 10:59:41 2014 (+0530)
# By: Subhasis Ray
# Update #: 4
# URL:
# Keywords:
# Compatibility:
#
#
# Commentary:
#
#
#
#
# Change log:
#
#
#
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street, Fifth
# Floor, Boston, MA 02110-1301, USA.
#
#
# Code:
"""This example shows incremental data-writing while running a
simulation.
It uses the Granule cell model in NeuroMLv1.8 by Aditya Gilra
(available as part of MOOSE demos in Demos/neuroml/GranuleCell). The
script there uses the neuroml reader developed by Aditya to simulate
the model in MOOSE.
"""
import os
import numpy as np
import sys
from datetime import datetime
sys.path.append('../../')
sys.path.append('/home/subha/src/moose_trunk/python')
sys.path.append('/home/subha/src/moose_trunk/Demos/neuroml/GranuleCell')
import nsdf
# You need to include the location of Granule98 and moose module in
# your path. These are: `moose/Demos/neuroml/GranuleCell` and
# `moose/python` respectively where moose is the directory where you
# have moose sources (built).
import moose
import Granule98 as granule
runtime = 0.7
def example():
directory = os.path.dirname(granule.__file__)
current = os.getcwd()
os.chdir(directory)
start_time = datetime.now()
granule.loadGran98NeuroML_L123(granule.filename)
soma_path = '/cells[0]/Gran_0[0]/Soma_0[0]'
ca = moose.element('{}/Gran_CaPool_98/data/somaCa'.format(soma_path))
vm = moose.element('{}/data[0]/somaVm[0]'.format(soma_path))
os.chdir(current)
writer = nsdf.NSDFWriter('granulecell_incremental.h5', mode='w', compression='gzip')
writer.add_model_filecontents([directory])
ca_data = nsdf.UniformData('Ca', unit='mM', dt=granule.plotdt, tunit='s')
ca_data.put_data(soma_path, ca.vector)
source_ds = writer.add_uniform_ds('GranuleSoma', [soma_path])
writer.add_uniform_data(source_ds, ca_data)
vm_data = nsdf.UniformData('Vm', unit='V', dt=granule.plotdt, tunit='s')
vm_data.put_data(soma_path, vm.vector)
writer.add_uniform_data(source_ds, vm_data)
writer.title = 'Sample NSDF file for olfactory bulb granule cell model'
writer.description = 'This file stores the entire model' \
' directory in `/model/filecontent`'
writer.tstart = start_time
writer.creator = [os.environ['USER']]
writer.contributor = ['Subhasis Ray', 'Aditya Gilra']
writer.license = 'CC BY-SA'
writer.software = ['Python2.7', 'moose', 'nsdf python library']
writer.method = ['exponential Euler']
clock = moose.Clock('/clock')
time_increment = 100 * granule.plotdt
while clock.currentTime < runtime:
print 'Run till', clock.currentTime
vm.clearVec()
ca.clearVec()
to_run = time_increment
if clock.currentTime + time_increment > runtime:
to_run = runtime - clock.currentTime
moose.start(to_run)
vm_data.put_data(soma_path, vm.vector)
writer.add_uniform_data(source_ds, vm_data)
ca_data.put_data(soma_path, ca.vector)
writer.add_uniform_data(source_ds, ca_data)
end_time = datetime.now()
writer.tend = end_time
print 'Finished writing example NSDF file for GranuleCell demo'
if __name__ == '__main__':
example()
#
# granulecell_incremental_writing.py ends here
|
nsdf/nsdf
|
examples/moose_NeuroML/granulecell_incremental_writing.py
|
Python
|
gpl-3.0
| 4,177
|
#!/usr/bin/python
import bluetooth
import time
import os
beforePresent = False
timeSleep = 5
countDown = 10
while True:
print "Checking " + time.strftime("%a, %d %b %Y %H:%M:%S", time.gmtime())
print "TP"
print bluetooth.lookup_name('2C:54:CF:79:C3:5E', timeout=5) or ' not present'
print "PW"
print bluetooth.lookup_name('A0:99:9B:3D:94:11', timeout=5) or ' not present'
print "JA"
print bluetooth.lookup_name('18:AF:61:62:9D:BC', timeout=5) or ' not present'
# if (result != None):
# print "TP present"
# if (beforePresent == False):
# os.system("./codesend 351491")
# os.system("./codesend 351491")
# time.sleep(1)
# os.system("./codesend 349955")
# os.system("./codesend 349955")
# time.sleep(1)
# os.system("./codesend 349635")
# os.system("./codesend 349635")
# time.sleep(1)
# os.system("./codesend 349491")
# os.system("./codesend 349491")
# beforePresent = True
# os.system("echo 0=50% > /dev/servoblaster")
# time.sleep(5)
# os.system("echo 0=0% > /dev/servoblaster")
#backoff to conserve bluetooth power
# if (timeSleep < 60):
# timeSleep += 10
# countDown = 10
# else:
# print "TP out of range"
#if countdown expired
# if (countDown < 0):
#this line is prolly pointless
# if (beforePresent == True):
# os.system("./codesend 351500")
# os.system("./codesend 351500")
# time.sleep(1)
# os.system("./codesend 349964")
# os.system("./codesend 349964")
# time.sleep(1)
# os.system("./codesend 349644")
# os.system("./codesend 349644")
# time.sleep(1)
# os.system("./codesend 349500")
# os.system("./codesend 349500")
# beforePresent = False
# else:
# countDown-=1
# timeSleep = 5
time.sleep(timeSleep)
|
Tpeirce/Proximity-Lighting
|
test-tp.py
|
Python
|
gpl-3.0
| 1,765
|
#!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ecs_taskdefinition
short_description: register a task definition in ecs
description:
- Registers or deregisters task definitions in the Amazon Web Services (AWS) EC2 Container Service (ECS)
version_added: "2.0"
author: Mark Chance(@Java1Guy)
requirements: [ json, boto, botocore, boto3 ]
options:
state:
description:
- State whether the task definition should exist or be deleted
required: true
choices: ['present', 'absent']
arn:
description:
- The arn of the task description to delete
required: false
family:
description:
- A Name that would be given to the task definition
required: false
revision:
description:
- A revision number for the task definition
required: False
force_create:
description:
- Always create new task definition
required: False
version_added: 2.5
containers:
description:
- A list of containers definitions
required: False
network_mode:
description:
- The Docker networking mode to use for the containers in the task.
required: false
default: bridge
choices: [ 'bridge', 'host', 'none' ]
version_added: 2.3
task_role_arn:
description:
- The Amazon Resource Name (ARN) of the IAM role that containers in this task can assume. All containers in this task are granted
the permissions that are specified in this role.
required: false
version_added: 2.3
volumes:
description:
- A list of names of volumes to be attached
required: False
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
- name: Create task definition
ecs_taskdefinition:
containers:
- name: simple-app
cpu: 10
essential: true
image: "httpd:2.4"
memory: 300
mountPoints:
- containerPath: /usr/local/apache2/htdocs
sourceVolume: my-vol
portMappings:
- containerPort: 80
hostPort: 80
- name: busybox
command:
- >
/bin/sh -c "while true; do echo '<html><head><title>Amazon ECS Sample App</title></head><body><div><h1>Amazon ECS Sample App</h1><h2>Congratulations!
</h2><p>Your application is now running on a container in Amazon ECS.</p>' > top; /bin/date > date ; echo '</div></body></html>' > bottom;
cat top date bottom > /usr/local/apache2/htdocs/index.html ; sleep 1; done"
cpu: 10
entryPoint:
- sh
- "-c"
essential: false
image: busybox
memory: 200
volumesFrom:
- sourceContainer: simple-app
volumes:
- name: my-vol
family: test-cluster-taskdef
state: present
register: task_output
'''
RETURN = '''
taskdefinition:
description: a reflection of the input parameters
type: dict inputs plus revision, status, taskDefinitionArn
'''
try:
import boto
import botocore
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
try:
import boto3
HAS_BOTO3 = True
except ImportError:
HAS_BOTO3 = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import boto3_conn, camel_dict_to_snake_dict, ec2_argument_spec, get_aws_connection_info
class EcsTaskManager:
"""Handles ECS Tasks"""
def __init__(self, module):
self.module = module
try:
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
if not region:
module.fail_json(msg="Region must be specified as a parameter, in EC2_REGION or AWS_REGION environment variables or in boto configuration file")
self.ecs = boto3_conn(module, conn_type='client', resource='ecs', region=region, endpoint=ec2_url, **aws_connect_kwargs)
except boto.exception.NoAuthHandlerFound as e:
module.fail_json(msg="Can't authorize connection - " % str(e))
def describe_task(self, task_name):
try:
response = self.ecs.describe_task_definition(taskDefinition=task_name)
return response['taskDefinition']
except botocore.exceptions.ClientError:
return None
def register_task(self, family, task_role_arn, network_mode, container_definitions, volumes):
validated_containers = []
# Ensures the number parameters are int as required by boto
for container in container_definitions:
for param in ('memory', 'cpu', 'memoryReservation'):
if param in container:
container[param] = int(container[param])
if 'portMappings' in container:
for port_mapping in container['portMappings']:
for port in ('hostPort', 'containerPort'):
if port in port_mapping:
port_mapping[port] = int(port_mapping[port])
validated_containers.append(container)
try:
response = self.ecs.register_task_definition(family=family,
taskRoleArn=task_role_arn,
networkMode=network_mode,
containerDefinitions=container_definitions,
volumes=volumes)
except botocore.exceptions.ClientError as e:
self.module.fail_json(msg=e.message, **camel_dict_to_snake_dict(e.response))
return response['taskDefinition']
def describe_task_definitions(self, family):
data = {
"taskDefinitionArns": [],
"nextToken": None
}
def fetch():
# Boto3 is weird about params passed, so only pass nextToken if we have a value
params = {
'familyPrefix': family
}
if data['nextToken']:
params['nextToken'] = data['nextToken']
result = self.ecs.list_task_definitions(**params)
data['taskDefinitionArns'] += result['taskDefinitionArns']
data['nextToken'] = result.get('nextToken', None)
return data['nextToken'] is not None
# Fetch all the arns, possibly across multiple pages
while fetch():
pass
# Return the full descriptions of the task definitions, sorted ascending by revision
return list(
sorted(
[self.ecs.describe_task_definition(taskDefinition=arn)['taskDefinition'] for arn in data['taskDefinitionArns']],
key=lambda td: td['revision']
)
)
def deregister_task(self, taskArn):
response = self.ecs.deregister_task_definition(taskDefinition=taskArn)
return response['taskDefinition']
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
state=dict(required=True, choices=['present', 'absent']),
arn=dict(required=False, type='str'),
family=dict(required=False, type='str'),
revision=dict(required=False, type='int'),
force_create=dict(required=False, default=False, type='bool'),
containers=dict(required=False, type='list'),
network_mode=dict(required=False, default='bridge', choices=['bridge', 'host', 'none'], type='str'),
task_role_arn=dict(required=False, default='', type='str'),
volumes=dict(required=False, type='list')))
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
if not HAS_BOTO:
module.fail_json(msg='boto is required.')
if not HAS_BOTO3:
module.fail_json(msg='boto3 is required.')
task_to_describe = None
task_mgr = EcsTaskManager(module)
results = dict(changed=False)
if module.params['state'] == 'present':
if 'containers' not in module.params or not module.params['containers']:
module.fail_json(msg="To use task definitions, a list of containers must be specified")
if 'family' not in module.params or not module.params['family']:
module.fail_json(msg="To use task definitions, a family must be specified")
family = module.params['family']
existing_definitions_in_family = task_mgr.describe_task_definitions(module.params['family'])
if 'revision' in module.params and module.params['revision']:
# The definition specifies revision. We must gurantee that an active revision of that number will result from this.
revision = int(module.params['revision'])
# A revision has been explicitly specified. Attempt to locate a matching revision
tasks_defs_for_revision = [td for td in existing_definitions_in_family if td['revision'] == revision]
existing = tasks_defs_for_revision[0] if len(tasks_defs_for_revision) > 0 else None
if existing and existing['status'] != "ACTIVE":
# We cannot reactivate an inactive revision
module.fail_json(msg="A task in family '%s' already exists for revsion %d, but it is inactive" % (family, revision))
elif not existing:
if not existing_definitions_in_family and revision != 1:
module.fail_json(msg="You have specified a revision of %d but a created revision would be 1" % revision)
elif existing_definitions_in_family and existing_definitions_in_family[-1]['revision'] + 1 != revision:
module.fail_json(msg="You have specified a revision of %d but a created revision would be %d" %
(revision, existing_definitions_in_family[-1]['revision'] + 1))
else:
existing = None
def _right_has_values_of_left(left, right):
# Make sure the values are equivalent for everything left has
for k, v in left.items():
if not ((not v and (k not in right or not right[k])) or (k in right and v == right[k])):
# We don't care about list ordering because ECS can change things
if isinstance(v, list) and k in right:
left_list = v
right_list = right[k] or []
if len(left_list) != len(right_list):
return False
for list_val in left_list:
if list_val not in right_list:
return False
else:
return False
# Make sure right doesn't have anything that left doesn't
for k, v in right.items():
if v and k not in left:
return False
return True
def _task_definition_matches(requested_volumes, requested_containers, existing_task_definition):
if td['status'] != "ACTIVE":
return None
existing_volumes = td.get('volumes', []) or []
if len(requested_volumes) != len(existing_volumes):
# Nope.
return None
if len(requested_volumes) > 0:
for requested_vol in requested_volumes:
found = False
for actual_vol in existing_volumes:
if _right_has_values_of_left(requested_vol, actual_vol):
found = True
break
if not found:
return None
existing_containers = td.get('containerDefinitions', []) or []
if len(requested_containers) != len(existing_containers):
# Nope.
return None
for requested_container in requested_containers:
found = False
for actual_container in existing_containers:
if _right_has_values_of_left(requested_container, actual_container):
found = True
break
if not found:
return None
return existing_task_definition
# No revision explicitly specified. Attempt to find an active, matching revision that has all the properties requested
for td in existing_definitions_in_family:
requested_volumes = module.params.get('volumes', []) or []
requested_containers = module.params.get('containers', []) or []
existing = _task_definition_matches(requested_volumes, requested_containers, td)
if existing:
break
if existing and not module.params.get('force_create'):
# Awesome. Have an existing one. Nothing to do.
results['taskdefinition'] = existing
else:
if not module.check_mode:
# Doesn't exist. create it.
volumes = module.params.get('volumes', []) or []
for container in module.params['containers']:
if 'environment' in container:
for environment in container['environment']:
environment['value'] = str(environment['value'])
results['taskdefinition'] = task_mgr.register_task(module.params['family'],
module.params['task_role_arn'],
module.params['network_mode'],
module.params['containers'],
volumes)
results['changed'] = True
elif module.params['state'] == 'absent':
# When de-registering a task definition, we can specify the ARN OR the family and revision.
if module.params['state'] == 'absent':
if 'arn' in module.params and module.params['arn'] is not None:
task_to_describe = module.params['arn']
elif 'family' in module.params and module.params['family'] is not None and 'revision' in module.params and \
module.params['revision'] is not None:
task_to_describe = module.params['family'] + ":" + str(module.params['revision'])
else:
module.fail_json(msg="To use task definitions, an arn or family and revision must be specified")
existing = task_mgr.describe_task(task_to_describe)
if not existing:
pass
else:
# It exists, so we should delete it and mark changed. Return info about the task definition deleted
results['taskdefinition'] = existing
if 'status' in existing and existing['status'] == "INACTIVE":
results['changed'] = False
else:
if not module.check_mode:
task_mgr.deregister_task(task_to_describe)
results['changed'] = True
module.exit_json(**results)
if __name__ == '__main__':
main()
|
nazo/ansible
|
lib/ansible/modules/cloud/amazon/ecs_taskdefinition.py
|
Python
|
gpl-3.0
| 16,288
|
__author__ = "Mikael Mortensen <mikaem@math.uio.no>"
__date__ = "2018-10-23"
__copyright__ = "Copyright (C) 2018 " + __author__
__license__ = "GNU Lesser GPL version 3 or any later version"
#pylint: disable=unbalanced-tuple-unpacking,unused-variable,function-redefined,unused-argument
from shenfun.spectralbase import inner_product
from shenfun.la import TDMA
from shenfun import TensorProductSpace, Array, TestFunction, TrialFunction, \
CompositeSpace, div, grad, Dx, inner, Function, FunctionSpace
from shenfun.chebyshev.la import Helmholtz, Biharmonic
from .spectralinit import *
from ..shen.Matrices import BiharmonicCoeff, HelmholtzCoeff
from ..shen import LUsolve
def get_context():
"""Set up context for solver"""
# Get points and weights for Chebyshev weighted integrals
assert params.Dquad == params.Bquad
collapse_fourier = False if params.dealias == '3/2-rule' else True
ST = FunctionSpace(params.N[2], 'C', bc=(0, 0), quad=params.Dquad)
SB = FunctionSpace(params.N[2], 'C', bc='Biharmonic', quad=params.Bquad)
CT = FunctionSpace(params.N[2], 'C', quad=params.Dquad)
ST0 = FunctionSpace(params.N[2], 'C', bc=(0, 0), quad=params.Dquad) # For 1D problem
K0 = FunctionSpace(params.N[0], 'F', domain=(0, params.L[0]), dtype='D')
K1 = FunctionSpace(params.N[1], 'F', domain=(0, params.L[1]), dtype='d')
kw0 = {'threads':params.threads,
'planner_effort':params.planner_effort["dct"],
'slab': (params.decomposition == 'slab'),
'collapse_fourier': collapse_fourier,
'modify_spaces_inplace': True}
FST = TensorProductSpace(comm, (K0, K1, ST), axes=(2, 0, 1), **kw0) # Dirichlet
FSB = TensorProductSpace(comm, (K0, K1, SB), axes=(2, 0, 1), **kw0) # Biharmonic
FCT = TensorProductSpace(comm, (K0, K1, CT), axes=(2, 0, 1), **kw0) # Regular Chebyshev
VFS = CompositeSpace([FST, FST, FSB])
VFST = CompositeSpace([FST, FST, FST])
VUG = CompositeSpace([FST, FSB])
mask = FST.get_mask_nyquist() if params.mask_nyquist else None
# Padded
kw = {'padding_factor': 1.5 if params.dealias == '3/2-rule' else 1,
'dealias_direct': params.dealias == '2/3-rule'}
if params.dealias == '3/2-rule':
# Requires new bases due to planning and transforms on different size arrays
STp = FunctionSpace(params.N[2], 'C', bc=(0, 0), quad=params.Dquad)
SBp = FunctionSpace(params.N[2], 'C', bc='Biharmonic', quad=params.Bquad)
CTp = FunctionSpace(params.N[2], 'C', quad=params.Dquad)
else:
STp, SBp, CTp = ST, SB, CT
K0p = FunctionSpace(params.N[0], 'F', dtype='D', domain=(0, params.L[0]), **kw)
K1p = FunctionSpace(params.N[1], 'F', dtype='d', domain=(0, params.L[1]), **kw)
FSTp = TensorProductSpace(comm, (K0p, K1p, STp), axes=(2, 0, 1), **kw0)
FSBp = TensorProductSpace(comm, (K0p, K1p, SBp), axes=(2, 0, 1), **kw0)
FCTp = TensorProductSpace(comm, (K0p, K1p, CTp), axes=(2, 0, 1), **kw0)
VFSp = CompositeSpace([FSTp, FSTp, FSBp])
float, complex, mpitype = datatypes("double")
# Mesh variables
X = FST.local_mesh(True)
x0, x1, x2 = FST.mesh()
K = FST.local_wavenumbers(scaled=True)
# Solution variables
U = Array(VFS)
U0 = Array(VFS)
U_hat = Function(VFS)
U_hat0 = Function(VFS)
g = Function(FST)
# primary variable
u = (U_hat, g)
H_hat = Function(VFST)
H_hat0 = Function(VFST)
H_hat1 = Function(VFST)
dU = Function(VUG)
hv = Function(FST)
hg = Function(FST)
Source = Array(VFS)
Sk = Function(VFS)
K2 = K[0]*K[0]+K[1]*K[1]
K4 = K2**2
# Set Nyquist frequency to zero on K that is used for odd derivatives in nonlinear terms
Kx = FST.local_wavenumbers(scaled=True, eliminate_highest_freq=True)
K_over_K2 = np.zeros((2,)+g.shape)
for i in range(2):
K_over_K2[i] = K[i] / np.where(K2 == 0, 1, K2)
for i in range(3):
K[i] = K[i].astype(float)
Kx[i] = Kx[i].astype(float)
Kx2 = Kx[1]*Kx[1]+Kx[2]*Kx[2]
work = work_arrays()
u_dealias = Array(VFSp)
u0_hat = np.zeros((2, params.N[2]), dtype=complex)
h0_hat = np.zeros((2, params.N[2]), dtype=complex)
w = np.zeros((params.N[2], ), dtype=complex)
w1 = np.zeros((params.N[2], ), dtype=complex)
nu, dt, N = params.nu, params.dt, params.N
# Collect all matrices
mat = config.AttributeDict(
dict(CDD=inner_product((ST, 0), (ST, 1)),
CTD=inner_product((CT, 0), (ST, 1)),
BTT=inner_product((CT, 0), (CT, 0)),
AB=HelmholtzCoeff(N[2], 1.0, -(K2 - 2.0/nu/dt), 2, ST.quad),
AC=BiharmonicCoeff(N[2], nu*dt/2., (1. - nu*dt*K2), -(K2 - nu*dt/2.*K4), 2, SB.quad),
# Matrices for biharmonic equation
CBD=inner_product((SB, 0), (ST, 1)),
ABB=inner_product((SB, 0), (SB, 2)),
BBB=inner_product((SB, 0), (SB, 0)),
SBB=inner_product((SB, 0), (SB, 4)),
# Matrices for Helmholtz equation
ADD=inner_product((ST, 0), (ST, 2)),
BDD=inner_product((ST, 0), (ST, 0)),
BBD=inner_product((SB, 0), (ST, 0)),
CDB=inner_product((ST, 0), (SB, 1)),
ADD0=inner_product((ST0, 0), (ST0, 2)),
BDD0=inner_product((ST0, 0), (ST0, 0))))
la = config.AttributeDict(
dict(HelmholtzSolverG=Helmholtz(mat.ADD, mat.BDD, -np.ones((1, 1, 1)),
(K2+2.0/nu/dt)),
BiharmonicSolverU=Biharmonic(mat.SBB, mat.ABB, mat.BBB, -nu*dt/2.*np.ones((1, 1, 1)),
(1.+nu*dt*K2),
(-(K2 + nu*dt/2.*K4))),
HelmholtzSolverU0=Helmholtz(mat.ADD0, mat.BDD0, np.array([-1.]), np.array([2./nu/dt])),
TDMASolverD=TDMA(inner_product((ST, 0), (ST, 0)))))
hdf5file = KMMFile(config.params.solver,
checkpoint={'space': VFS,
'data': {'0': {'U': [U_hat]},
'1': {'U': [U_hat0]}}},
results={'space': VFS,
'data': {'U': [U]}})
return config.AttributeDict(locals())
class KMMFile(HDF5File):
def update_components(self, U_hat, U, **context):
"""Transform to real data when storing the solution"""
U = U_hat.backward(U)
assert params.precision == "double"
def end_of_tstep(context):
"""Function called at end of time step.
If returning True, the while-loop in time breaks free. Used by adaptive
solvers to modify the time stepsize. Used here to rotate solutions.
"""
context.U_hat0[:] = context.U_hat
context.H_hat1[:] = context.H_hat
return False
def get_velocity(U, U_hat, VFS, **context):
"""Compute velocity from context"""
U = VFS.backward(U_hat, U)
return U
def set_velocity(U_hat, U, VFS, **context):
"""Set transformed velocity from context"""
U_hat = VFS.forward(U, U_hat)
return U_hat
def get_curl(curl, U_hat, g, work, FCTp, FSTp, FSBp, Kx, **context):
"""Compute curl from context"""
curl = compute_curl(curl, U_hat, g, Kx, FCTp, FSTp, FSBp, work)
return curl
def get_convection(H_hat, U_hat, g, Kx, VFSp, FSTp, FSBp, FCTp, work, mat, la, u_dealias, **context):
"""Compute convection from context"""
conv_ = getConvection(params.convection)
H_hat = conv_(H_hat, U_hat, g, Kx, VFSp, FSTp, FSBp, FCTp, work, mat, la, u_dealias)
return H_hat
def get_pressure(context, solver):
FCT = context.FCT
FST = context.FST
U = solver.get_velocity(**context)
U0 = context.VFS.backward(context.U_hat0, context.U0)
dt = solver.params.dt
H_hat = solver.get_convection(**context)
Hx = Array(FST)
Hx = FST.backward(H_hat[0], Hx)
v = TestFunction(FCT)
p = TrialFunction(FCT)
U = U.as_function()
U0 = U0.as_function()
rhs_hat = inner((0.5*context.nu)*div(grad(U[0]+U0[0])), v)
Hx -= 1./dt*(U[0]-U0[0])
rhs_hat += inner(Hx, v)
CT = inner(Dx(p, 0), v)
# Should implement fast solver. Just a backwards substitution
A = CT.diags().toarray()*CT.scale[0]
A[-1, 0] = 1
a_i = np.linalg.inv(A)
p_hat = Function(context.FCT)
for j in range(p_hat.shape[1]):
for k in range(p_hat.shape[2]):
p_hat[:, j, k] = np.dot(a_i, rhs_hat[:, j, k])
p = Array(FCT)
p = FCT.backward(p_hat, p)
uu = np.sum((0.5*(U+U0))**2, 0)
uu *= 0.5
return p-uu+3./16.
def get_divergence(U, U_hat, FST, K, Kx, work, la, mat, **context):
Uc_hat = work[(U_hat[0], 0, True)]
Uc = work[(U, 2, True)]
Uc_hat = mat.CDB.matvec(U_hat[2], Uc_hat, axis=2)
Uc_hat = la.TDMASolverD(Uc_hat, axis=2)
dwdz = Uc[2] = FST.backward(Uc_hat, Uc[2])
dudx_h = 1j*K[0]*U_hat[0]
dudx = Uc[0] = FST.backward(dudx_h, Uc[0])
dvdy_h = 1j*K[1]*U_hat[1]
dvdy = Uc[1] = FST.backward(dvdy_h, Uc[1])
return dudx+dvdy+dwdz
#@profile
def Cross(c, a, b, FSTp, work):
Uc = work[(a, 2, False)]
Uc = cross1(Uc, a, b)
c[0] = FSTp.forward(Uc[0], c[0])
c[1] = FSTp.forward(Uc[1], c[1])
c[2] = FSTp.forward(Uc[2], c[2])
return c
def compute_curl(c, u_hat, g, K, FCTp, FSTp, FSBp, work):
F_tmp = work[(u_hat, 0, False)]
F_tmp2 = work[(u_hat, 2, False)]
Uc = work[(c, 2, False)]
# Mult_CTD_3D is projection to T of d(u_hat)/dz (for components 0 and 1 of u_hat)
# Corresponds to CTD.matvec(u_hat[0])/BTT.dd, CTD.matvec(u_hat[1])/BTT.dd
#LUsolve.Mult_CTD_3D_n(params.N[2], u_hat[0], u_hat[1], F_tmp[0], F_tmp[1], 2)
LUsolve.Mult_CTD_3D_ptr(params.N[2], u_hat[0], u_hat[1], F_tmp[0], F_tmp[1], 2)
dudz = Uc[0] = FCTp.backward(F_tmp[0], Uc[0])
dvdz = Uc[1] = FCTp.backward(F_tmp[1], Uc[1])
c[2] = FSTp.backward(g, c[2])
dwdy = F_tmp2[0] = 1j*K[1]*u_hat[2]
dwdx = F_tmp2[1] = 1j*K[0]*u_hat[2]
c[0] = FSBp.backward(F_tmp2[0], c[0])
c[0] -= dvdz
c[1] = FSBp.backward(-F_tmp2[1], c[1])
c[1] += dudz
return c
def compute_derivatives(U, U_hat, FST, FCT, FSB, K, la, mat, work, **context):
duidxj = np.zeros((3, 3)+U.shape[1:])
F_tmp = work[(U_hat, 0, True)]
# dudx = 0 from continuity equation. Use Shen Dirichlet basis
# Use regular Chebyshev basis for dvdx and dwdx
F_tmp[0] = mat.CDB.matvec(U_hat[0], F_tmp[0])
F_tmp[0] = la.TDMASolverD(F_tmp[0])
duidxj[0, 0] = FST.backward(F_tmp[0], duidxj[0, 0])
LUsolve.Mult_CTD_3D_n(params.N[0], U_hat[1], U_hat[2], F_tmp[1], F_tmp[2])
duidxj[1, 0] = dvdx = FCT.backward(F_tmp[1], duidxj[1, 0]) # proj to Cheb
duidxj[2, 0] = dwdx = FCT.backward(F_tmp[2], duidxj[2, 0]) # proj to Cheb
duidxj[0, 1] = dudy = FSB.backward(1j*K[1]*U_hat[0], duidxj[0, 1]) # ShenB
duidxj[0, 2] = dudz = FSB.backward(1j*K[2]*U_hat[0], duidxj[0, 2])
duidxj[1, 1] = dvdy = FST.backward(1j*K[1]*U_hat[1], duidxj[1, 1])
duidxj[1, 2] = dvdz = FST.backward(1j*K[2]*U_hat[1], duidxj[1, 2])
duidxj[2, 1] = dwdy = FST.backward(1j*K[1]*U_hat[2], duidxj[2, 1])
duidxj[2, 2] = dwdz = FST.backward(1j*K[2]*U_hat[2], duidxj[2, 2])
return duidxj
def standardConvection(rhs, u_dealias, u_hat, K, VFSp, FSTp, FSBp, FCTp, work,
mat, la):
rhs[:] = 0
U = u_dealias
Uc = work[(U, 1, True)]
Uc2 = work[(U, 2, True)]
F_tmp = work[(rhs, 0, True)]
# dudx = 0 from continuity equation. Use Shen Dirichlet basis
# Use regular Chebyshev basis for dvdx and dwdx
F_tmp[0] = mat.CDB.matvec(u_hat[0], F_tmp[0])
F_tmp[0] = la.TDMASolverD(F_tmp[0])
dudx = Uc[0] = FSTp.backward(F_tmp[0], Uc[0])
LUsolve.Mult_CTD_3D_n(params.N[0], u_hat[1], u_hat[2], F_tmp[1], F_tmp[2])
dvdx = Uc[1] = FCTp.backward(F_tmp[1], Uc[1])
dwdx = Uc[2] = FCTp.backward(F_tmp[2], Uc[2])
dudy = Uc2[0] = FSBp.backward(1j*K[1]*u_hat[0], Uc2[0])
dudz = Uc2[1] = FSBp.backward(1j*K[2]*u_hat[0], Uc2[1])
rhs[0] = FSTp.forward(U[0]*dudx + U[1]*dudy + U[2]*dudz, rhs[0])
Uc2[:] = 0
dvdy = Uc2[0] = FSTp.backward(1j*K[1]*u_hat[1], Uc2[0])
dvdz = Uc2[1] = FSTp.backward(1j*K[2]*u_hat[1], Uc2[1])
rhs[1] = FSTp.forward(U[0]*dvdx + U[1]*dvdy + U[2]*dvdz, rhs[1])
Uc2[:] = 0
dwdy = Uc2[0] = FSTp.backward(1j*K[1]*u_hat[2], Uc2[0])
dwdz = Uc2[1] = FSTp.backward(1j*K[2]*u_hat[2], Uc2[1])
rhs[2] = FSTp.forward(U[0]*dwdx + U[1]*dwdy + U[2]*dwdz, rhs[2])
return rhs
def divergenceConvection(rhs, u_dealias, u_hat, K, VFSp, FSTp, FSBp, FCTp, work,
mat, la, add=False):
"""c_i = div(u_i u_j)"""
if not add:
rhs.fill(0)
F_tmp = work[(rhs, 0, True)]
F_tmp2 = work[(rhs, 1, True)]
U = u_dealias
F_tmp[0] = FSTp.forward(U[0]*U[0], F_tmp[0])
F_tmp[1] = FSTp.forward(U[0]*U[1], F_tmp[1])
F_tmp[2] = FSTp.forward(U[0]*U[2], F_tmp[2])
F_tmp2[0] = mat.CDD.matvec(F_tmp[0], F_tmp2[0])
F_tmp2[1] = mat.CDD.matvec(F_tmp[1], F_tmp2[1])
F_tmp2[2] = mat.CDD.matvec(F_tmp[2], F_tmp2[2])
F_tmp2[0] = la.TDMASolverD(F_tmp2[0])
F_tmp2[1] = la.TDMASolverD(F_tmp2[1])
F_tmp2[2] = la.TDMASolverD(F_tmp2[2])
rhs[0] += F_tmp2[0]
rhs[1] += F_tmp2[1]
rhs[2] += F_tmp2[2]
F_tmp2[0] = FSTp.forward(U[0]*U[1], F_tmp2[0])
F_tmp2[1] = FSTp.forward(U[0]*U[2], F_tmp2[1])
rhs[0] += 1j*K[1]*F_tmp2[0] # duvdy
rhs[0] += 1j*K[2]*F_tmp2[1] # duwdz
F_tmp[0] = FSTp.forward(U[1]*U[1], F_tmp[0])
F_tmp[1] = FSTp.forward(U[1]*U[2], F_tmp[1])
F_tmp[2] = FSTp.forward(U[2]*U[2], F_tmp[2])
rhs[1] += 1j*K[1]*F_tmp[0] # dvvdy
rhs[1] += 1j*K[2]*F_tmp[1] # dvwdz
rhs[2] += 1j*K[1]*F_tmp[1] # dvwdy
rhs[2] += 1j*K[2]*F_tmp[2] # dwwdz
return rhs
def getConvection(convection):
if convection == "Standard":
def Conv(rhs, u_hat, g_hat, K, VFSp, FSTp, FSBp, FCTp, work, mat, la, u_dealias):
u_dealias = VFSp.backward(u_hat, u_dealias)
rhs = standardConvection(rhs, u_dealias, u_hat, K, VFSp, FSTp,
FSBp, FCTp, work, mat, la)
rhs[:] *= -1
return rhs
elif convection == "Divergence":
def Conv(rhs, u_hat, g_hat, K, VFSp, FSTp, FSBp, FCTp, work, mat, la, u_dealias):
u_dealias = VFSp.backward(u_hat, u_dealias)
rhs = divergenceConvection(rhs, u_dealias, u_hat, K, VFSp, FSTp,
FSBp, FCTp, work, mat, la, False)
rhs[:] *= -1
return rhs
elif convection == "Skew":
def Conv(rhs, u_hat, g_hat, K, VFSp, FSTp, FSBp, FCTp, work, mat, la, u_dealias):
u_dealias = VFSp.backward(u_hat, u_dealias)
rhs = standardConvection(rhs, u_dealias, u_hat, K, VFSp, FSTp,
FSBp, FCTp, work, mat, la)
rhs = divergenceConvection(rhs, u_dealias, u_hat, K, VFSp, FSTp,
FSBp, FCTp, work, mat, la, True)
rhs *= -0.5
return rhs
elif convection == "Vortex":
def Conv(rhs, u_hat, g_hat, K, VFSp, FSTp, FSBp, FCTp, work, mat, la, u_dealias):
curl_dealias = work[(u_dealias, 1, False)]
u_dealias = VFSp.backward(u_hat, u_dealias)
curl_dealias = compute_curl(curl_dealias, u_hat, g_hat, K, FCTp, FSTp, FSBp, work)
rhs = Cross(rhs, u_dealias, curl_dealias, FSTp, work)
return rhs
Conv.convection = convection
return Conv
@optimizer
def assembleAB(H_hat0, H_hat, H_hat1):
H_hat0[:] = 1.5*H_hat - 0.5*H_hat1
return H_hat0
@optimizer
def add_linear(rhs, u, g, work, AB, AC, SBB, ABB, BBB, nu, dt, K2, K4):
diff_u = work[(g, 0, False)]
diff_g = work[(g, 1, False)]
u0 = work[(g, 2, False)]
# Compute diffusion for g-equation
diff_g = AB.matvec(g, diff_g)
# Compute diffusion++ for u-equation
diff_u = AC.matvec(u, diff_u)
#diff_u[:] = nu*dt/2.*SBB.matvec(u, u0)
#diff_u += (1. - nu*dt*K2)*ABB.matvec(u, u0)
#diff_u -= (K2 - nu*dt/2.*K4)*BBB.matvec(u, u0)
rhs[0] += diff_u
rhs[1] += diff_g
return rhs
#@profile
def ComputeRHS(rhs, u_hat, g_hat, solver,
H_hat, H_hat1, H_hat0, VFSp, FSTp, FSBp, FCTp, work, Kx, K2, Kx2,
K4, hv, hg, mat, la, u_dealias, mask, **context):
"""Compute right hand side of Navier Stokes
args:
rhs The right hand side to be returned
u_hat The FST of the velocity at current time
g_hat The FST of the curl in wall normal direction
solver The current solver module
Remaining args are extracted from context
"""
# Nonlinear convection term at current u_hat
H_hat = solver.conv(H_hat, u_hat, g_hat, Kx, VFSp, FSTp, FSBp, FCTp, work, mat, la, u_dealias)
# Assemble convection with Adams-Bashforth at time = n+1/2
H_hat0 = solver.assembleAB(H_hat0, H_hat, H_hat1)
if mask is not None:
H_hat0.mask_nyquist(mask)
# Assemble hv, hg and remaining rhs
w0 = work[(hv, 0, False)]
w1 = work[(hv, 1, False)]
hv[:] = -1j*Kx[0]*mat.CBD.matvec(H_hat0[0], w0, axis=2)
hv -= 1j*Kx[1]*mat.CBD.matvec(H_hat0[1], w0, axis=2)
hv -= K2*mat.BBD.matvec(H_hat0[2], w0, axis=2)
hg[:] = 1j*Kx[0]*mat.BDD.matvec(H_hat0[1], w0, axis=2) - 1j*Kx[1]*mat.BDD.matvec(H_hat0[0], w1, axis=2)
rhs[0] = hv*params.dt
rhs[1] = hg*2./params.nu
rhs = solver.add_linear(rhs, u_hat[2], g_hat, work, mat.AB, mat.AC, mat.SBB,
mat.ABB, mat.BBB, params.nu, params.dt, K2, K4)
return rhs
def compute_vw(u_hat, f_hat, g_hat, K_over_K2):
u_hat[0] = -1j*(K_over_K2[0]*f_hat - K_over_K2[1]*g_hat)
u_hat[1] = -1j*(K_over_K2[1]*f_hat + K_over_K2[0]*g_hat)
return u_hat
#@profile
def solve_linear(u_hat, g_hat, rhs,
work, la, mat, K_over_K2, H_hat0, U_hat0, Sk, u0_hat, h0_hat,
w, w1, **context):
"""Solve final linear algebra systems"""
f_hat = work[(u_hat[2], 0, True)]
w0 = work[(u_hat[2], 1, False)]
u_hat[2] = la.BiharmonicSolverU(u_hat[2], rhs[0])
g_hat = la.HelmholtzSolverG(g_hat, rhs[1])
# Compute v_hat and w_hat from u_hat and g_hat
f_hat -= mat.CDB.matvec(u_hat[2], w0, axis=2)
f_hat = la.TDMASolverD(f_hat, axis=2)
u_hat = compute_vw(u_hat, f_hat, g_hat, K_over_K2)
# Remains to fix wavenumber 0
if rank == 0:
h0_hat[0] = H_hat0[0, 0, 0]
h0_hat[1] = H_hat0[1, 0, 0]
u0_hat[0] = U_hat0[0, 0, 0]
u0_hat[1] = U_hat0[1, 0, 0]
w = mat.BDD0.matvec(2./params.nu*h0_hat[0], w)
w -= 2./params.nu * Sk[0, 0, 0]
w1 = mat.ADD0.matvec(u0_hat[0], w1)
w += w1
w += 2./params.nu/params.dt * mat.BDD0.matvec(u0_hat[0], w1)
u0_hat[0] = la.HelmholtzSolverU0(u0_hat[0], w)
w = mat.BDD0.matvec(2./params.nu*h0_hat[1], w)
w += mat.ADD0.matvec(u0_hat[1], w1)
w += mat.BDD0.matvec(2./params.nu/params.dt*u0_hat[1], w1)
u0_hat[1] = la.HelmholtzSolverU0(u0_hat[1], w)
u_hat[0, 0, 0] = u0_hat[0]
u_hat[1, 0, 0] = u0_hat[1]
u_hat[2, 0, 0] = 0 # This required for continuity
return u_hat, g_hat
def integrate(u_hat, g_hat, rhs, dt, solver, context):
"""Regular implicit solver for KMM channel solver"""
rhs[:] = 0
rhs = solver.ComputeRHS(rhs, u_hat, g_hat, solver, **context)
u_hat, g_hat = solver.solve_linear(u_hat, g_hat, rhs, **context)
return (u_hat, g_hat), dt, dt
def getintegrator(rhs, u0, solver, context):
u_hat, g_hat = u0
def func():
return solver.integrate(u_hat, g_hat, rhs, params.dt, solver, context)
return func
|
mikaem/spectralDNS
|
spectralDNS/solvers/KMMr.py
|
Python
|
gpl-3.0
| 19,773
|
# -*- coding: utf-8 -*-
"""
:copyright: Copyright 2014 by Łukasz Mierzwa
:contact: l.mierzwa@gmail.com
"""
from __future__ import unicode_literals
import pytest
from django.core.management import call_command
from upaas_admin.common.tests import MongoEngineTestCase
from upaas_admin.apps.scheduler.models import (BackendRunPlanSettings,
ApplicationRunPlan)
class RunPlanMigrationTest(MongoEngineTestCase):
@pytest.mark.usefixtures("create_pkg", "create_backend", "create_router")
def test_run_plan_migration(self):
backend_settings = BackendRunPlanSettings(backend=self.backend,
package=self.pkg,
socket=8080, stats=9090,
workers_min=1, workers_max=4)
run_plan = ApplicationRunPlan(application=self.app,
backends=[backend_settings],
workers_min=1, workers_max=4,
memory_per_worker=128, max_log_size=1)
run_plan.save()
self.app.reload()
self.assertEqual(self.app.run_plan, None)
self.assertEqual(call_command('migrate_db'), None)
self.app.reload()
self.assertEqual(self.app.run_plan, run_plan)
run_plan.delete()
|
prymitive/upaas-admin
|
tests/test_migrate_run_plans.py
|
Python
|
gpl-3.0
| 1,423
|
#! /usr/bin/python
import requests
import bs4
import re
import datetime
import pg
conn = pg.connect(
host='localhost',
dbname='bidFTA',
user='postgres'
)
cons = pg.DB(host='localhost',
dbname='bidFTA',
user='postgres')
known = [x[0] for x in conn.query('SELECT num FROM auctions').getresult()]
def cull():
print conn.query('DELETE FROM auctions WHERE datetime < now()')
def getListings(listings, site, auction):
items = []
for listing in listings.select('.DataRow'):
try:
id = int(listing.attrs.get('id'))
except ValueError:
#print listing.attrs.get('id')
continue
if id == 0:
continue
tds = listing.select('td')
info = re.split(r'<br\s*/>', str(tds[2]))
INFO = {'auction': auction, 'id': id}
for x in info:
try:
key, val = re.sub(r'<.{1,3}>', '', x).split(': ')
if key.lower() in ['brand', 'item description', 'additional information']:
if key.lower() == 'item description':
INFO['description'] = val.strip()
elif key.lower() == 'additional information':
INFO['info'] = val.strip()
else:
INFO[key.lower()] = val.strip()
except ValueError:
continue
INFO['image'] = tds[1].select('img')[0].attrs.get('src')
INFO['link'] = site + tds[1].select('a')[0].attrs.get('href')
items.append(INFO)
return items
def main():
response = requests.get('http://bidfta.com')
soup = bs4.BeautifulSoup(response.text.replace(u'\u2019',''))
aucs = {}
auctions = (a for a in soup.select('div.content.active div.currentAuctionsListings div.auction'))
for auction in auctions:
title = auction.select('a div.auctionTitle h4')[0].get_text().split()
#print 'Auction#:', title[0]
if title[0] not in aucs:
aucs[title[0]] = {'listings': []}
#print 'Title:', ' '.join(title[2:])
aucs[title[0]]['title'] = ' '.join(title[2:])
#print 'Location:', auction.select('a .auctionLocation')[0].get_text()
aucs[title[0]]['location'] = auction.select('a .auctionLocation')[0].get_text()
#print 'Datetime:', auction.select('time')[0].attrs.get('datetime')
# strip timezone
aucs[title[0]]['datetime'] = datetime.datetime.strptime(auction.select('time')[0].attrs.get('datetime')[:-5], "%Y-%m-%dT%H:%M:%S")
# if auction already over, ignore
if (aucs[title[0]]['datetime'] < datetime.datetime.now()
or int(title[0]) in known):
continue
aucIns = {'num': title[0], 'title': aucs[title[0]]['title'].replace(u'\u2019',''), 'location': aucs[title[0]]['location'],
'datetime': aucs[title[0]]['datetime']
}
cons.insert('auctions', aucIns)
aucID = conn.query('SELECT id FROM auctions WHERE num=%s' % (aucIns['num'])).getresult()[0][0]
link = auction.select('a')[0].attrs.get('href')
site = '/'.join(link.split('/')[0:3])
details = requests.get(link).text
soup = bs4.BeautifulSoup(details)
#print 'Removal:', details.split('REMOVAL: ')[1].split('<p>')[0]
aucSite = site + soup.select('a')[6].attrs.get('href')
listings = bs4.BeautifulSoup(requests.get(aucSite).text)
# get next page
aucs[title[0]]['listings'].extend(getListings(listings, site, aucID))
try:
form = listings.select('form[name=viewform]')[0]
except IndexError:
continue
data = {
'auction': form.select('[name=auction]')[0].attrs.get('value'),
'contents': form.select('[name=contents]')[0].attrs.get('value'),
'pages':form.select('[name=pages]')[0].attrs.get('value'),
'searchtitle':form.select('[name=searchtitle]')[0].attrs.get('value'),
'searchcount':form.select('[name=searchcount]')[0].attrs.get('value')
}
for i in range(int(data['pages'])):
page = form.select('[name=p%d]' % (i+1))[0]
data[page.attrs.get('name')] = page.attrs.get('value')
for i in range(int(data['pages'])-1):
data['page'] = 'p%s' % (i+2)
data['npage'] = 'p%s' % (i+1)
nextP = requests.post(aucSite, data=data).text
listings = bs4.BeautifulSoup(nextP)
aucs[title[0]]['listings'].extend(getListings(listings, site, aucID))
print 'Auction: %s, %d listings' % (title[0], len(aucs[title[0]]['listings']))
for items in aucs[title[0]]['listings']:
try:
cons.insert('items', items)
except:
continue
if __name__ == '__main__':
main()
#html = '''<input type="submit" name="page"></input><input type="hidden" name="page"></input>'''
#soup = bs4.BeautifulSoup(html)
#print soup.select('input[type=submit]')
form = '''<form action="/cgi-bin/mnlist.cgi" method="post" name="viewform">
<br/><table align="center" border="0" id="SelectPage" width="100%"><tr><td align="center">
<input name="auction" type="hidden" value="twomc184"/>
<input name="contents" type="hidden" value="0/A/B/C/D/E/1/2/3/4/5/6/7/8/9/10/11/12/13/14/15/16/17/18/19/20/21/22/23/24/25/26/27/28/29/30/31/32/33/34/35/36/37/38/39/40/41/42/43/44/"/>
<input name="pages" type="hidden" value="3"/>
<input name="searchtitle" type="hidden" value="Category:-ALL"/>
<input name="searchcount" type="hidden" value="106"/>
<input name="page" type="hidden" value="p1"/>
<p align="center">Select page to view:
<input name="p1" type="hidden" value="0/A/B/C/D/E/1/2/3/4/5/6/7/8/9/10/11/12/13/14/15/16/17/18/19/20/21/22/23/24/25/26/27/28/29/30/31/32/33/34/35/36/37/38/39/40/41/42/43/44/"/> <u>p1</u>
<input name="p2" type="hidden" value="45/46/47/48/49/50/51/52/53/54/55/56/57/58/59/60/61/62/63/64/65/66/67/68/69/70/71/72/73/74/75/76/77/78/79/80/81/82/83/84/85/86/87/88/89/90/91/92/93/94/"/> <input name="page" type="submit" value="p2"/>
<input name="p3" type="hidden" value="95/96/97/98/99/100/"/> <input name="page" type="submit" value="p3"/>
<input name="npage" type="hidden" value="p2"/>
<input name="nwpage" type="hidden" value=""/> </p></td></tr>
</table>
</form>'''
#form = bs4.BeautifulSoup(form)
#for i in range(3):
# page = form.select('[name=p%d]' % (i+1))[0]
# print
# print 'VALUE:', page.attrs.get('value')
# print
'''<tr class="DataRow" id="1" valign="top"><td><a href="/cgi-bin/mnlist.cgi?twomc198/1">1</a></td>
<td align="center"><a href="/cgi-bin/mnlist.cgi?twomc198/1"><img alt="1t.jpg" border="0" src="https://fast-track-auctions.s3.amazonaws.com/uploads/auctions/198/1t.jpg"/></a></td><td><b>Brand</b>: DIVERSEY<br/><b>Item Description</b>: CASE OF 6 SIGNATURE ULTRA HIGH SPEED FLOOR FINISH, EACH INDIVIDUAL PACKAGE CONTAINS 2.5L!<br/><b>Retail</b>: $119.99 <br/><b>Location</b>: MW-BY FRONT OFFICE<br/><b>Additional Information</b>: 6X YOUR BID, New- Item is new in or out of the box and will have no damage, missing parts or pieces.<br/><b>Contact</b>: Please use our contact submission via bidfta.com to submit any questions regarding this auction.<br/><b>Front Page</b>: <a href="http://www.bidfta.com" target="_blank">Click here to go back to Fast Track Auction Home Page</a> <br/></td>
<td align="right"><a href="/cgi-bin/mnhistory.cgi?twomc198/1">6</a></td>
<td align="right">78232</td>
<td align="right">4.20
<br/>x 6 = 25.20</td>
<td align="right">??</td>
<td align="center" colspan="2">ended</td></tr>'''
'''
<tr class="DataRow" id="1" valign="top">
<td>
<a href="/cgi-bin/mnlist.cgi?twomc198/1">1</a>
</td>
<td align="center">
<a href="/cgi-bin/mnlist.cgi?twomc198/1">
<img alt="1t.jpg" border="0" src="https://fast-track-auctions.s3.amazonaws.com/uploads/auctions/198/1t.jpg"/>
</a>
</td>
<td>
<b>Brand</b>: DIVERSEY<br/>
<b>Item Description</b>: CASE OF 6 SIGNATURE ULTRA HIGH SPEED FLOOR FINISH, EACH INDIVIDUAL PACKAGE CONTAINS 2.5L!<br/>
<b>Retail</b>: $119.99 <br/>
<b>Location</b>: MW-BY FRONT OFFICE<br/>
<b>Additional Information</b>: 6X YOUR BID, New- Item is new in or out of the box and will have no damage, missing parts or pieces.<br/>
<b>Contact</b>: Please use our contact submission via bidfta.com to submit any questions regarding this auction.<br/>
<b>Front Page</b>: <a href="http://www.bidfta.com" target="_blank">Click here to go back to Fast Track Auction Home Page</a> <br/>
</td>
<td align="right"><a href="/cgi-bin/mnhistory.cgi?twomc198/1">6</a></td>
<td align="right">78232</td>
<td align="right">4.20
<br/>x 6 = 25.20</td>
<td align="right">??</td>
<td align="center" colspan="2">ended</td>
</tr>
'''
|
bnjmnjhnsn/scraper
|
scraper.py
|
Python
|
gpl-3.0
| 8,978
|
class MifareClassic1k():
pass
|
mchro/RejsekortReader
|
rfid/formats/mifare/MifareClassic1k.py
|
Python
|
gpl-3.0
| 34
|
from background_task import background
from .models import Notification
import requests
import json
from .models import Stream, Notification
import redis
import ast
import logging
logger = logging.getLogger('StreamBasedNotifs')
hdlr = logging.FileHandler('notifications.log')
formatter = logging.Formatter('%(asctime)s %(message)s')
hdlr.setFormatter(formatter)
logger.addHandler(hdlr)
@background(schedule=0)
def sendNotifications(data):
'''send delayed notifications to webhook url'''
notif_features = Notification.objects.get(event_name=data['name'])
webhook_url = notif_features.url
slack_data = {}
slack_data['info'] = data['info']
target_data = []
if not notif_features.target : # If target has been set as User
target_data.append(data['user_id'])
slack_data['target'] = target_data
else:
slack_data['target'] = data['associated_user_ids']
slack_data['event_name'] = data['name']
slack_data['name'] = notif_features.name
response = requests.post(
webhook_url, data=json.dumps(slack_data),
headers={'Content-Type': 'application/json'}
)
if response.status_code/100 != 2:
logger.error(
'%s \n %s'
% (str(response.status_code), data)
)
def send_notifications(data):
'''send notifications without delay to webhook url'''
notif_features = Notification.objects.get(event_name=data['name'])
webhook_url = notif_features.url
slack_data = {}
slack_data['info'] = data['info']
target_data = []
if not notif_features.target: # If target has been set as User
target_data.append(data['user_id'])
slack_data['target'] = target_data
else:
slack_data['target'] = data['associated_user_ids']
slack_data['event_name'] = data['name']
slack_data['name'] = notif_features.name
response = requests.post(
webhook_url, data=json.dumps(slack_data),
headers={'Content-Type': 'application/json'}
)
if response.status_code / 100 != 2:
logger.error(
'%s \n %s'
% (str(response.status_code), data)
)
@background(schedule=-1)
def listen_stream():
redis_con = redis.Redis('demo.scorebeyond.com', 8007)
subs = redis_con.pubsub()
subs.subscribe('test')
for message in subs.listen():
if message['type'] == "message":
data1 = ast.literal_eval(message['data'])
if Notification.objects.filter(event_name=data1['name']):
sendNotifications(data1, capture=Notification.objects.get(event_name=data1['name']).delay)
if not Stream.objects.filter(name=data1['name']):
type_list = []
if not data1['info']:
Stream.objects.create(name=data1['name'], info="")
else:
for k, v in data1['info'].iteritems():
type_list.append(k+":"+type(v).__name__)
Stream.objects.create(name=data1['name'], info=','.join(type_list))
else:
print message
|
bkaganyildiz/StreamBasedNotification
|
StreamBasedNotifs/capture/task.py
|
Python
|
gpl-3.0
| 3,089
|
# -*- coding: utf-8 -*-
"""
Created on Sun Mar 27 20:17:27 2016
@author: alanschoen
"""
import sqlite3
conn = sqlite3.connect('../output/shelters.db')
print "Tables:"
for row in conn.execute("SELECT name FROM sqlite_master WHERE type='table'"):
print row
print "Shelters:"
for row in conn.execute('SELECT * FROM shelters ORDER BY NoboMile LIMIT 5'):
print row
conn.close()
|
alanjschoen/hikkit
|
src/validate_db.py
|
Python
|
gpl-3.0
| 391
|
import datetime
import os.path
from sickchill import logger
from sickchill.helper.common import episode_num
from sickchill.oldbeard import common, db, helpers
MIN_DB_VERSION = 44
MAX_DB_VERSION = 44
class MainSanityCheck(db.DBSanityCheck):
def check(self):
self.fix_missing_table_indexes()
self.fix_duplicate_shows()
self.fix_duplicate_episodes()
self.fix_orphan_episodes()
self.fix_unaired_episodes()
self.fix_episode_statuses()
self.fix_invalid_airdates()
self.fix_show_nfo_lang()
self.convert_archived_to_compound()
def convert_archived_to_compound(self):
logger.debug(_('Checking for archived episodes not qualified'))
sql_results = self.connection.select(
"SELECT episode_id, showid, status, location, season, episode FROM tv_episodes WHERE status = ?", [common.ARCHIVED]
)
if sql_results:
logger.warning(_("Found {count} shows with bare archived status, attempting automatic conversion...".format(count=len(sql_results))))
for archivedEp in sql_results:
fixed_status = common.Quality.compositeStatus(common.ARCHIVED, common.Quality.UNKNOWN)
existing = archivedEp['location'] and os.path.exists(archivedEp['location'])
if existing:
quality = common.Quality.nameQuality(archivedEp['location'])
fixed_status = common.Quality.compositeStatus(common.ARCHIVED, quality)
old_status = common.statusStrings[common.ARCHIVED]
new_status = common.statusStrings[fixed_status]
archived_episode = archivedEp['showid']
ep = episode_num(archivedEp['season'])
episode_id = archivedEp['episode_id']
location = archivedEp['location'] or 'unknown location'
result = ('NOT FOUND', 'EXISTS')[bool(existing)]
logger.info(_('Changing status from {old_status} to {new_status} for {archived_episode}: {ep} at {location} (File {result})'.format(
old_status=old_status, new_status=new_status, archived_episode=archived_episode, ep=ep, location=location, result=result
)))
self.connection.action("UPDATE tv_episodes SET status = ? WHERE episode_id = ?", [fixed_status, episode_id])
def fix_duplicate_shows(self, column='indexer_id'):
sql_results = self.connection.select(
f"SELECT show_id, {column}, COUNT({column}) as count FROM tv_shows GROUP BY {column} HAVING count > 1")
for cur_duplicate in sql_results:
logger.debug(_("Duplicate show detected! {column}: {dupe} count: {count}".format(column=column, dupe=cur_duplicate[column], count=cur_duplicate['count'])))
cur_dupe_results = self.connection.select(
"SELECT show_id, " + column + " FROM tv_shows WHERE " + column + " = ? LIMIT ?",
[cur_duplicate[column], int(cur_duplicate["count"]) - 1]
)
for cur_dupe_id in cur_dupe_results:
logger.info(_("Deleting duplicate show with {column}: {dupe} showid: {show}".format(column=column, dupe=cur_dupe_id[column], show=cur_dupe_id['show_id'])))
self.connection.action("DELETE FROM tv_shows WHERE show_id = ?", [cur_dupe_id["show_id"]])
def fix_duplicate_episodes(self):
sql_results = self.connection.select(
"SELECT showid, season, episode, COUNT(showid) as count FROM tv_episodes GROUP BY showid, season, episode HAVING count > 1")
for cur_duplicate in sql_results:
dupe_id = cur_duplicate["showid"]
dupe_season = cur_duplicate["season"]
dupe_episode = cur_duplicate["episode"],
dupe_count = cur_duplicate["count"]
logger.debug(_("Duplicate episode detected! showid: {dupe_id} season: {dupe_season} episode {dupe_episode} count: {dupe_count}".format(
dupe_id=dupe_id, dupe_season=dupe_season, dupe_episode=dupe_episode, dupe_count=dupe_count))
)
cur_dupe_results = self.connection.select(
"SELECT episode_id FROM tv_episodes WHERE showid = ? AND season = ? and episode = ? ORDER BY episode_id DESC LIMIT ?",
[cur_duplicate["showid"], cur_duplicate["season"], cur_duplicate["episode"],
int(cur_duplicate["count"]) - 1]
)
for cur_dupe_id in cur_dupe_results:
current_episode_id = cur_dupe_id["episode_id"]
logger.info(_("Deleting duplicate episode with episode_id: {current_episode_id}".format(current_episode_id=current_episode_id)))
self.connection.action("DELETE FROM tv_episodes WHERE episode_id = ?", [current_episode_id])
def fix_orphan_episodes(self):
sql_results = self.connection.select(
"SELECT episode_id, showid, tv_shows.indexer_id FROM tv_episodes "
"LEFT JOIN tv_shows ON tv_episodes.showid=tv_shows.indexer_id WHERE tv_shows.indexer_id is NULL")
for cur_orphan in sql_results:
current_episode_id = cur_orphan["episode_id"]
current_show_id = cur_orphan["showid"]
logger.debug(_("Orphan episode detected! episode_id: {current_episode_id} showid: {current_show_id}".format(
current_episode_id=current_episode_id, current_show_id=current_show_id))
)
logger.info(_("Deleting orphan episode with episode_id: {current_episode_id}".format(current_episode_id=current_episode_id)))
self.connection.action("DELETE FROM tv_episodes WHERE episode_id = ?", [current_episode_id])
def fix_missing_table_indexes(self):
if not self.connection.select("PRAGMA index_info('idx_indexer_id')"):
logger.info(_("Missing idx_indexer_id for TV Shows table detected!, fixing..."))
self.connection.action("CREATE UNIQUE INDEX idx_indexer_id ON tv_shows(indexer_id);")
if not self.connection.select("PRAGMA index_info('idx_tv_episodes_showid_airdate')"):
logger.info(_("Missing idx_tv_episodes_showid_airdate for TV Episodes table detected!, fixing..."))
self.connection.action("CREATE INDEX idx_tv_episodes_showid_airdate ON tv_episodes(showid, airdate);")
if not self.connection.select("PRAGMA index_info('idx_showid')"):
logger.info(_("Missing idx_showid for TV Episodes table detected!, fixing..."))
self.connection.action("CREATE INDEX idx_showid ON tv_episodes (showid);")
if not self.connection.select("PRAGMA index_info('idx_status')"):
logger.info(_("Missing idx_status for TV Episodes table detected!, fixing..."))
self.connection.action("CREATE INDEX idx_status ON tv_episodes (status, season, episode, airdate)")
if not self.connection.select("PRAGMA index_info('idx_sta_epi_air')"):
logger.info(_("Missing idx_sta_epi_air for TV Episodes table detected!, fixing..."))
self.connection.action("CREATE INDEX idx_sta_epi_air ON tv_episodes (status, episode, airdate)")
if not self.connection.select("PRAGMA index_info('idx_sta_epi_sta_air')"):
logger.info(_("Missing idx_sta_epi_sta_air for TV Episodes table detected!, fixing..."))
self.connection.action("CREATE INDEX idx_sta_epi_sta_air ON tv_episodes (season, episode, status, airdate)")
def fix_unaired_episodes(self):
current_date = datetime.date.today()
sql_results = self.connection.select(
"SELECT episode_id FROM tv_episodes WHERE (airdate > ? or airdate = 1) AND status in (?,?) AND season > 0",
[current_date.toordinal(), common.SKIPPED, common.WANTED])
for cur_unaired in sql_results:
current_episode_id = cur_unaired["episode_id"]
logger.info(_("Fixing unaired episode status for episode_id: {current_episode_id}".format(current_episode_id=current_episode_id)))
self.connection.action("UPDATE tv_episodes SET status = ? WHERE episode_id = ?", [common.UNAIRED, current_episode_id])
def fix_episode_statuses(self):
sql_results = self.connection.select("SELECT episode_id, showid FROM tv_episodes WHERE status IS NULL")
for cur_ep in sql_results:
current_episode_id = cur_ep["episode_id"]
current_show_id = cur_ep["showid"]
logger.debug(_("MALFORMED episode status detected! episode_id: {current_episode_id} showid: {current_show_id}".format(
current_episode_id=current_episode_id, current_show_id=current_show_id)))
logger.info(_("Fixing malformed episode status with episode_id: {current_episode_id}".format(current_episode_id=current_episode_id)))
self.connection.action("UPDATE tv_episodes SET status = ? WHERE episode_id = ?", [common.UNKNOWN, current_episode_id])
def fix_invalid_airdates(self):
sql_results = self.connection.select(
"SELECT episode_id, showid FROM tv_episodes WHERE airdate >= ? OR airdate < 1", [datetime.date.max.toordinal()])
for bad_airdate in sql_results:
current_episode_id = bad_airdate["episode_id"]
current_show_id = bad_airdate["showid"]
logger.debug(_("Bad episode airdate detected! episode_id: {current_episode_id} showid: {current_show_id}".format(
current_episode_id=current_episode_id, current_show_id=current_show_id)))
logger.info(_("Fixing bad episode airdate for episode_id: {current_episode_id}".format(current_episode_id=current_episode_id)))
self.connection.action("UPDATE tv_episodes SET airdate = '1' WHERE episode_id = ?", [current_episode_id])
def fix_show_nfo_lang(self):
self.connection.action("UPDATE tv_shows SET lang = '' WHERE lang = 0 or lang = '0'")
def backup_database(version):
logger.info("Backing up database before upgrade")
if not helpers.backupVersionedFile(db.db_full_path(), version):
logger.log_error_and_exit("Database backup failed, abort upgrading database")
else:
logger.info("Proceeding with upgrade")
# ======================
# = Main DB Migrations =
# ======================
# Add new migrations at the bottom of the list; subclass the previous migration.
class InitialSchema(db.SchemaUpgrade):
def test(self):
return self.has_table("db_version")
def execute(self):
if not self.has_table("tv_shows") and not self.has_table("db_version"):
queries = [
"CREATE TABLE db_version(db_version INTEGER, db_minor_version INTEGER);",
"CREATE TABLE history(action NUMERIC, date NUMERIC, showid NUMERIC, season NUMERIC, episode NUMERIC, quality NUMERIC, resource TEXT, provider TEXT, version NUMERIC DEFAULT -1);",
"CREATE TABLE imdb_info(indexer_id INTEGER PRIMARY KEY, imdb_id TEXT, title TEXT, year NUMERIC, akas TEXT, runtimes NUMERIC, genres TEXT, countries TEXT, country_codes TEXT, certificates TEXT, rating TEXT, votes INTEGER, last_update NUMERIC);",
"CREATE TABLE info(last_backlog NUMERIC, last_indexer NUMERIC, last_proper_search NUMERIC);",
"CREATE TABLE scene_numbering(indexer TEXT, indexer_id INTEGER, season INTEGER, episode INTEGER, scene_season INTEGER, scene_episode INTEGER, absolute_number NUMERIC, scene_absolute_number NUMERIC, PRIMARY KEY(indexer_id, season, episode));",
"CREATE TABLE tv_shows(show_id INTEGER PRIMARY KEY, indexer_id NUMERIC, indexer NUMERIC, show_name TEXT, location TEXT, network TEXT, genre TEXT, classification TEXT, runtime NUMERIC, quality NUMERIC, airs TEXT, status TEXT, flatten_folders NUMERIC, paused NUMERIC, startyear NUMERIC, air_by_date NUMERIC, lang TEXT, subtitles NUMERIC, notify_list TEXT, imdb_id TEXT, last_update_indexer NUMERIC, dvdorder NUMERIC, archive_firstmatch NUMERIC, rls_require_words TEXT, rls_ignore_words TEXT, sports NUMERIC, anime NUMERIC, scene NUMERIC, default_ep_status NUMERIC DEFAULT -1, sub_use_sr_metadata NUMERIC DEFAULT 0);",
"CREATE TABLE tv_episodes(episode_id INTEGER PRIMARY KEY, showid NUMERIC, indexerid NUMERIC, indexer TEXT, name TEXT, season NUMERIC, episode NUMERIC, description TEXT, airdate NUMERIC, hasnfo NUMERIC, hastbn NUMERIC, status NUMERIC, location TEXT, file_size NUMERIC, release_name TEXT, subtitles TEXT, subtitles_searchcount NUMERIC, subtitles_lastsearch TIMESTAMP, is_proper NUMERIC, scene_season NUMERIC, scene_episode NUMERIC, absolute_number NUMERIC, scene_absolute_number NUMERIC, version NUMERIC DEFAULT -1, release_group TEXT);",
"CREATE TABLE blacklist (show_id INTEGER, range TEXT, keyword TEXT);",
"CREATE TABLE whitelist (show_id INTEGER, range TEXT, keyword TEXT);",
"CREATE TABLE xem_refresh (indexer TEXT, indexer_id INTEGER PRIMARY KEY, last_refreshed INTEGER);",
"CREATE TABLE indexer_mapping (indexer_id INTEGER, indexer NUMERIC, mindexer_id INTEGER, mindexer NUMERIC, PRIMARY KEY (indexer_id, indexer));",
"CREATE UNIQUE INDEX idx_indexer_id ON tv_shows(indexer_id);",
"CREATE INDEX idx_showid ON tv_episodes(showid);",
"CREATE INDEX idx_sta_epi_air ON tv_episodes(status, episode, airdate);",
"CREATE INDEX idx_sta_epi_sta_air ON tv_episodes(season, episode, status, airdate);",
"CREATE INDEX idx_status ON tv_episodes(status,season,episode,airdate);",
"CREATE INDEX idx_tv_episodes_showid_airdate ON tv_episodes(showid, airdate);",
"INSERT INTO db_version(db_version, db_minor_version) VALUES (44, 3);"
]
for query in queries:
self.connection.action(query)
else:
cur_db_version = self.get_db_version()
if cur_db_version < MIN_DB_VERSION:
logger.log_error_and_exit(_(
"Your database version ({cur_db_version}) is too old to migrate from what this version of SickChill supports ({MIN_DB_VERSION}).\nUpgrade using a previous version (tag) build 496 to build 501 of SickChill first or remove database file to begin fresh.".format(cur_db_version=cur_db_version, MIN_DB_VERSION=MIN_DB_VERSION)))
if cur_db_version > MAX_DB_VERSION:
logger.log_error_and_exit(_(
"Your database version ({cur_db_version}) has been incremented past what this version of SickChill supports ({MAX_DB_VERSION}).\nIf you have used other forks of SickChill, your database may be unusable due to their modifications.".format(cur_db_version=cur_db_version, MAX_DB_VERSION=MAX_DB_VERSION)))
class AddPreferWords(InitialSchema):
""" Adding column rls_prefer_words to tv_shows """
def test(self):
return self.has_column("tv_shows", "rls_prefer_words")
def execute(self):
backup_database(self.get_db_version())
logger.info("Adding column rls_prefer_words to tvshows")
self.add_column("tv_shows", "rls_prefer_words", "TEXT", "")
self.inc_minor_version()
logger.info('Updated to: {0:d}.{1:d}'.format(*self.connection.version))
class AddCustomNameToShow(AddPreferWords):
""" Adding column rls_prefer_words to tv_shows """
def test(self):
return self.has_column("tv_shows", "custom_name")
def execute(self):
backup_database(self.get_db_version())
logger.info("Adding column custom_name to tvshows")
self.add_column("tv_shows", "custom_name", "TEXT", "")
self.inc_minor_version()
logger.info('Updated to: {0:d}.{1:d}'.format(*self.connection.version))
|
Vagab0nd/SiCKRAGE
|
sickchill/oldbeard/databases/main.py
|
Python
|
gpl-3.0
| 15,696
|
#!/usr/bin/python3
# -*- coding: UTF-8 -*-
import sys, string, traceback
import socket, asyncore
import logging, threading
class network(asyncore.dispatcher):
def __init__(self, iobuffer, nick, port):
logging_format = '%(asctime)-15s: %(message)s'
logging.basicConfig(filename='debug.log', format=logging_format, level=logging.DEBUG)
self.iobuffer = iobuffer # These locate in microirc now.
self.address = iobuffer[2]
asyncore.dispatcher.__init__(self)
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
self.connect((self.address, port))
self.iobuffer[0].append('USER microirc +xi µirc: µirc')
self.iobuffer[0].append('NICK %s' % nick)
threading.Thread(target=asyncore.loop).start()
logging.info('Connected to %s:%s.' % (address, str(port)))
return
# overwritten asyncore methods:
def handle_connect(self):
pass
def handle_close(self):
self.close()
def handle_read(self):
try: # This is the second one we try..
rcv = self.recv(8192).decode().split('\n')
for lmsg in rcv:
rmsg = lmsg.split(' ')
if len(rmsg) <= 1:
return
if rmsg[0] == 'PING':
self.iobuffer[0].append('PONG %s' % rmsg[1]) #Alpha2todo - Functionalize direct write.
self.iobuffer[1].append(lmsg.rstrip())
logging.debug('Received "%s" from server.' % lmsg.rstrip())
continue
return
except:
logging.debug(traceback.format_exc())
return
def writable(self):
return (len(self.iobuffer[0]) > 0)
def handle_write(self):
for i in self.iobuffer[0]:
logging.debug('Sent "%s" to server.' % (i.rstrip()))
n = '%s\n\r' % i.rstrip()
self.send(n.encode('utf-8'))
self.iobuffer[0].pop(self.iobuffer[0].index(i))
return
|
nafac/microirc
|
microirc-0.1.1r34/network_old.py
|
Python
|
gpl-3.0
| 1,724
|
class Threshold(Block):
input = Input()
ratio = Output(input)
#passfail = Output(input)
average_period = .35
epoch = 13.0
# auto_mode = Bool(True)
# mode = Enum('increase', 'decrease', 'range')
auto_target = 0.8
# low_target = Float(0.90)
# high_target = Float(0.90)
def init(self, mode):
assert mode in ('increase', 'decrease')
self.mode = mode
epoch_samples = int(self.input.sample_rate * self.epoch)
self.gr_block.set_history(epoch_samples)
print ('Threshold set_history(%d)' % epoch_samples)
self.threshold = 1.0
self.high_threshold = 0.0
self.calc_cnt = 0
self.auto_mode = True
self.color = QtGui.QColor(self.input.color)
self.widget = Threshold.Widget(self)
self.timer = QtCore.QTimer()
self.timer.timeout.connect(self.updateGUI)
self.timer.start(100)
self.current_passfail = False
self.current_value = 0.0
def updateGUI(self):
self.widget.update()
def general_work(self, input_items, output_items):
#print ('Threshold work', len(input_items[0]), output_items, input_items[0][0])
self.gr_block.consume_each(1)
avg_period_samples = int(self.average_period * self.input.sample_rate)
avg = sum(input_items[0][-avg_period_samples:]) / avg_period_samples
self.current_value = avg
#self.signal.append([avg])
#self.signal.process()
self.calc_cnt += 1
#self.calc_cnt = avg_period_samples
if self.auto_mode and self.calc_cnt >= avg_period_samples:
self.calc_cnt = 0
avg_period = input_items[0][-avg_period_samples:]
if self.mode == 'decrease':
self.threshold = np.percentile(input_items[0], 100 * self.auto_target)
elif self.mode == 'increase':
self.threshold = np.percentile(input_items[0], 100 - 100 * self.auto_target)
else:
self.high_threshold = np.percentile(input_items[0], self.high_target)
self.threshold = np.percentile(input_items[0], 100 - 100 * self.low_target)
success = False
if self.mode == 'decrease':
if avg < self.threshold:
success = True
elif self.mode == 'increase':
if avg > self.threshold:
success = True
else:
if avg > self.threshold and avg < self.high_threshold:
success = True
output_items[0][0] = avg / self.threshold
#self.output_items[1] = success
self.gr_block.produce(0, 1)
self.current_passfail = success
return 0
class Widget(QtGui.QWidget):
MAX = 25
def __init__(self, threshold):
QtGui.QWidget.__init__(self)
self.threshold = threshold
self.setMinimumSize(42, 23 * 5)
def paintEvent(self, event):
painter = QtGui.QPainter(self)
width = self.width()
height = self.height()
top, bottom = height * .1, height * .8
left, right = width * .1, width * .8
rect = QtCore.QRect(left, top, right, bottom)
painter.fillRect(rect, QtGui.QColor('black'))
#painter.setWindow(rect)
dist = bottom - top
relval = self.threshold.current_value / self.MAX
relval = min(1.0, relval)
reltop = (1.0 - relval) * bottom + top
relbottom = height * 0.9 - reltop
rect = QtCore.QRect(left, reltop, right, relbottom)
color = QtGui.QColor('green' if self.threshold.current_passfail else 'red')
painter.fillRect(rect, color)
thr_height = self.threshold.threshold / self.MAX
thr_top = (1.0 - thr_height) * bottom + top
rect = QtCore.QRect(left, thr_top, right, 2)
painter.fillRect(rect, QtGui.QColor('white'))
#painter.setBrush
|
strfry/OpenNFB
|
blocks/threshold.py
|
Python
|
gpl-3.0
| 4,067
|
"""
formatting.py
Contains functions for formatting and working with strings.
The licensing for this module isn't solid, because I started working on this module before I had a proper
system for tracking code licences. If your code is in this file and you have any queries, contact me by
email at <lukeroge@gmail.com>!
Maintainer:
- Luke Rogers <https://github.com/lukeroge>
License:
GPL v3
License for final section (all code after the "DJANGO LICENCE" comment):
BSD license
Copyright (c) Django Software Foundation and individual contributors.
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. Neither the name of Django nor the names of its contributors may be used
to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import copy
import html.entities
import re
import warnings
from html.parser import HTMLParser
from cloudbot.util.colors import strip_irc
# Constants
IRC_COLOR_RE = re.compile(r"(\x03(\d+,\d+|\d)|[\x0f\x02\x16\x1f])")
REPLACEMENTS = {
'a': 'ä',
'b': 'Б',
'c': 'ċ',
'd': 'đ',
'e': 'ë',
'f': 'ƒ',
'g': 'ġ',
'h': 'ħ',
'i': 'í',
'j': 'ĵ',
'k': 'ķ',
'l': 'ĺ',
'm': 'ṁ',
'n': 'ñ',
'o': 'ö',
'p': 'ρ',
'q': 'ʠ',
'r': 'ŗ',
's': 'š',
't': 'ţ',
'u': 'ü',
'v': '',
'w': 'ω',
'x': 'χ',
'y': 'ÿ',
'z': 'ź',
'A': 'Å',
'B': 'Β',
'C': 'Ç',
'D': 'Ď',
'E': 'Ē',
'F': 'Ḟ',
'G': 'Ġ',
'H': 'Ħ',
'I': 'Í',
'J': 'Ĵ',
'K': 'Ķ',
'L': 'Ĺ',
'M': 'Μ',
'N': 'Ν',
'O': 'Ö',
'P': 'Р',
'Q': 'Q',
'R': 'Ŗ',
'S': 'Š',
'T': 'Ţ',
'U': 'Ů',
'V': 'Ṿ',
'W': 'Ŵ',
'X': 'Χ',
'Y': 'Ỳ',
'Z': 'Ż'
}
# Classes
class HTMLTextExtractor(HTMLParser):
"""
Takes HTML and provides cleaned and stripped text.
"""
def __init__(self):
HTMLParser.__init__(self)
self.result = []
def handle_data(self, d):
self.result.append(d)
def handle_charref(self, number):
codepoint = int(number[1:], 16) if number[0] in ('x', 'X') else int(number)
self.result.append(chr(codepoint))
def handle_entityref(self, name):
codepoint = html.entities.name2codepoint[name]
self.result.append(chr(codepoint))
def get_text(self):
return ''.join(self.result)
# Functions
def strip_html(to_strip):
"""
Takes HTML and returns cleaned and stripped text.
:rtype str
"""
s = HTMLTextExtractor()
s.feed(to_strip)
return s.get_text()
def munge(text, count=0):
"""
Replaces characters in a string with visually similar characters to avoid pinging users in IRC.
Count sets how many characters are replaced, defaulting to all characters.
:rtype str
"""
reps = 0
for n in range(len(text)):
rep = REPLACEMENTS.get(text[n])
if rep:
text = text[:n] + rep + text[n + 1:]
reps += 1
if reps == count:
break
return text
def ireplace(text, old, new, count=None):
"""
A case-insensitive replace() clone. Return a copy of text with all occurrences of substring
old replaced by new. If the optional argument count is given, only the first count
occurrences are replaced.
"""
pattern = re.compile(re.escape(old), re.IGNORECASE)
if count:
return pattern.sub(new, text, count=count)
else:
return pattern.sub(new, text)
def multi_replace(text, word_dic):
"""
Takes a string and replace words that match a key in a dictionary with the associated value,
then returns the changed text
:rtype str
"""
rc = re.compile('|'.join(map(re.escape, word_dic)))
def translate(match):
return word_dic[match.group(0)]
return rc.sub(translate, text)
# compatibility
multiword_replace = multi_replace
def truncate_words(content, length=10, suffix='...'):
"""
Truncates a string after a certain number of words.
:rtype str
"""
split = content.split()
if len(split) <= length:
return " ".join(split[:length])
else:
return " ".join(split[:length]) + suffix
def truncate(content, length=100, suffix='...'):
"""
Truncates a string after a certain number of characters.
Function always tries to truncate on a word boundary.
:rtype str
"""
if len(content) <= length:
return content
else:
return content[:length].rsplit(' ', 1)[0] + suffix
# compatibility
truncate_str = truncate
strip_colors = strip_irc
def chunk_str(content, length=420):
"""
Chunks a string into smaller strings of given length. Returns chunks.
:rtype list
"""
def chunk(c, l):
while c:
out = (c + ' ')[:l].rsplit(' ', 1)[0]
c = c[len(out):].strip()
yield out
return list(chunk(content, length))
def pluralize(num=0, text=''):
"""
Takes a number and a string, and pluralizes that string using the number and combines the results.
:rtype: str
"""
warnings.warn(
"formatting.pluralize() is deprecated, please use one of the other formatting.pluralize_*() functions",
DeprecationWarning
)
return pluralize_suffix(num, text)
def pluralise(num=0, text=''):
"""
Takes a number and a string, and pluralizes that string using the number and combines the results.
:rtype: str
"""
warnings.warn(
"formatting.pluralise() is deprecated, please use one of the other formatting.pluralise_*() functions",
DeprecationWarning
)
return pluralise_suffix(num, text)
def pluralize_suffix(num=0, text='', suffix='s'):
"""
Takes a number and a string, and pluralizes that string using the number and combines the results.
:rtype: str
"""
return pluralize_select(num, text, text + suffix)
pluralise_suffix = pluralize_suffix
def pluralize_select(count, single, plural):
return "{:,} {}".format(count, single if count == 1 else plural)
pluralise_select = pluralize_select
def pluralize_auto(count, thing):
if thing.endswith(('s', 'ss', 'sh', 'ch', 'x', 'z')):
return pluralize_suffix(count, thing, 'es')
elif thing.endswith(('f', 'fe')):
return pluralize_select(count, thing, thing.rsplit('f', 1)[0] + 'ves')
elif thing.endswith('y') and thing[-2:-1].lower() not in "aeiou":
return pluralize_select(count, thing, thing[:-1] + 'ies')
elif thing.endswith('y') and thing[-2:-1].lower() in "aeiou":
return pluralize_suffix(count, thing)
elif thing.endswith('o'):
return pluralize_suffix(count, thing, 'es')
elif thing.endswith('us'):
return pluralize_select(count, thing, thing[:-2] + 'i')
elif thing.endswith('is'):
return pluralize_select(count, thing, thing[:-2] + 'es')
elif thing.endswith('on'):
return pluralize_select(count, thing, thing[:-2] + 'a')
else:
return pluralize_suffix(count, thing)
pluralise_auto = pluralize_auto
def dict_format(args, formats):
"""
:type args: dict[unknown, unknown]
:type formats: list[str]
:rtype: str
"""
matches = {}
for f in formats:
try:
# Check if values can be mapped
m = f.format(**args)
# Insert match and number of matched values (max matched values if already in dict)
matches[m] = max([matches.get(m, 0), len(re.findall(r'({.*?\})', f))])
except Exception:
continue
# Return most complete match, ranked by values matched and then my match length or None
try:
return max(matches.items(), key=lambda x: (x[1], len(x[0])))[0]
except Exception:
return None
# DJANGO LICENCE
split_re = re.compile(r"""((?:[^\s'"]*(?:(?:"(?:[^"\\]|\\.)*" | '(?:["""
r"""^'\\]|\\.)*')[^\s'"]*)+) | \S+)""", re.VERBOSE)
def smart_split(text):
"""
Generator that splits a string by spaces, leaving quoted phrases together.
Supports both single and double quotes, and supports escaping quotes with
backslashes. In the output, strings will keep their initial and trailing
quote marks and escaped quotes will remain escaped (the results can then
be further processed with unescape_string_literal()).
>> list(smart_split(r'This is "a person\'s" test.'))
['This', 'is', '"a person\\\'s"', 'test.']
>> list(smart_split(r"Another 'person\'s' test."))
['Another', "'person\\'s'", 'test.']
>> list(smart_split(r'A "\"funky\" style" test.'))
['A', '"\\"funky\\" style"', 'test.']
"""
for bit in split_re.finditer(text):
yield bit.group(0)
def get_text_list(list_, last_word='or'):
"""
>> get_text_list(['a', 'b', 'c', 'd'])
'a, b, c or d'
>> get_text_list(['a', 'b', 'c'], 'and')
'a, b and c'
>> get_text_list(['a', 'b'], 'and')
'a and b'
>> get_text_list(['a'])
'a'
>> get_text_list([])
''
"""
if len(list_) == 0:
return ''
if len(list_) == 1:
return list_[0]
# Translators: This string is used as a separator between list elements
return "{}, {} {}".format(", ".join([i for i in list_][:-1]), last_word, list_[-1])
def gen_markdown_table(headers, rows):
"""
Generates a Markdown formatted table from the data
"""
rows = copy.copy(rows)
rows.insert(0, headers)
rotated = zip(*reversed(rows))
sizes = tuple(map(lambda l: max(max(map(len, l)), 3), rotated))
rows.insert(1, tuple(('-' * size) for size in sizes))
lines = [
"| {} |".format(' | '.join(cell.ljust(sizes[i]) for i, cell in enumerate(row)))
for row in rows
]
return '\n'.join(lines)
|
valesi/CloudBot
|
cloudbot/util/formatting.py
|
Python
|
gpl-3.0
| 11,182
|
# Python test set -- built-in functions
import ast
import builtins
import collections
import io
import locale
import os
import pickle
import platform
import random
import sys
import traceback
import types
import unittest
import warnings
from operator import neg
from test.support import TESTFN, unlink, run_unittest, check_warnings
from test.support.script_helper import assert_python_ok
try:
import pty, signal
except ImportError:
pty = signal = None
class Squares:
def __init__(self, max):
self.max = max
self.sofar = []
def __len__(self): return len(self.sofar)
def __getitem__(self, i):
if not 0 <= i < self.max: raise IndexError
n = len(self.sofar)
while n <= i:
self.sofar.append(n*n)
n += 1
return self.sofar[i]
class StrSquares:
def __init__(self, max):
self.max = max
self.sofar = []
def __len__(self):
return len(self.sofar)
def __getitem__(self, i):
if not 0 <= i < self.max:
raise IndexError
n = len(self.sofar)
while n <= i:
self.sofar.append(str(n*n))
n += 1
return self.sofar[i]
class BitBucket:
def write(self, line):
pass
test_conv_no_sign = [
('0', 0),
('1', 1),
('9', 9),
('10', 10),
('99', 99),
('100', 100),
('314', 314),
(' 314', 314),
('314 ', 314),
(' \t\t 314 \t\t ', 314),
(repr(sys.maxsize), sys.maxsize),
(' 1x', ValueError),
(' 1 ', 1),
(' 1\02 ', ValueError),
('', ValueError),
(' ', ValueError),
(' \t\t ', ValueError),
(str(b'\u0663\u0661\u0664 ','raw-unicode-escape'), 314),
(chr(0x200), ValueError),
]
test_conv_sign = [
('0', 0),
('1', 1),
('9', 9),
('10', 10),
('99', 99),
('100', 100),
('314', 314),
(' 314', ValueError),
('314 ', 314),
(' \t\t 314 \t\t ', ValueError),
(repr(sys.maxsize), sys.maxsize),
(' 1x', ValueError),
(' 1 ', ValueError),
(' 1\02 ', ValueError),
('', ValueError),
(' ', ValueError),
(' \t\t ', ValueError),
(str(b'\u0663\u0661\u0664 ','raw-unicode-escape'), 314),
(chr(0x200), ValueError),
]
class TestFailingBool:
def __bool__(self):
raise RuntimeError
class TestFailingIter:
def __iter__(self):
raise RuntimeError
def filter_char(arg):
return ord(arg) > ord("d")
def map_char(arg):
return chr(ord(arg)+1)
class BuiltinTest(unittest.TestCase):
# Helper to check picklability
def check_iter_pickle(self, it, seq, proto):
itorg = it
d = pickle.dumps(it, proto)
it = pickle.loads(d)
self.assertEqual(type(itorg), type(it))
self.assertEqual(list(it), seq)
#test the iterator after dropping one from it
it = pickle.loads(d)
try:
next(it)
except StopIteration:
return
d = pickle.dumps(it, proto)
it = pickle.loads(d)
self.assertEqual(list(it), seq[1:])
def test_import(self):
__import__('sys')
__import__('time')
__import__('string')
__import__(name='sys')
__import__(name='time', level=0)
self.assertRaises(ImportError, __import__, 'spamspam')
self.assertRaises(TypeError, __import__, 1, 2, 3, 4)
self.assertRaises(ValueError, __import__, '')
self.assertRaises(TypeError, __import__, 'sys', name='sys')
def test_abs(self):
# int
self.assertEqual(abs(0), 0)
self.assertEqual(abs(1234), 1234)
self.assertEqual(abs(-1234), 1234)
self.assertTrue(abs(-sys.maxsize-1) > 0)
# float
self.assertEqual(abs(0.0), 0.0)
self.assertEqual(abs(3.14), 3.14)
self.assertEqual(abs(-3.14), 3.14)
# str
self.assertRaises(TypeError, abs, 'a')
# bool
self.assertEqual(abs(True), 1)
self.assertEqual(abs(False), 0)
# other
self.assertRaises(TypeError, abs)
self.assertRaises(TypeError, abs, None)
class AbsClass(object):
def __abs__(self):
return -5
self.assertEqual(abs(AbsClass()), -5)
def test_all(self):
self.assertEqual(all([2, 4, 6]), True)
self.assertEqual(all([2, None, 6]), False)
self.assertRaises(RuntimeError, all, [2, TestFailingBool(), 6])
self.assertRaises(RuntimeError, all, TestFailingIter())
self.assertRaises(TypeError, all, 10) # Non-iterable
self.assertRaises(TypeError, all) # No args
self.assertRaises(TypeError, all, [2, 4, 6], []) # Too many args
self.assertEqual(all([]), True) # Empty iterator
self.assertEqual(all([0, TestFailingBool()]), False)# Short-circuit
S = [50, 60]
self.assertEqual(all(x > 42 for x in S), True)
S = [50, 40, 60]
self.assertEqual(all(x > 42 for x in S), False)
def test_any(self):
self.assertEqual(any([None, None, None]), False)
self.assertEqual(any([None, 4, None]), True)
self.assertRaises(RuntimeError, any, [None, TestFailingBool(), 6])
self.assertRaises(RuntimeError, any, TestFailingIter())
self.assertRaises(TypeError, any, 10) # Non-iterable
self.assertRaises(TypeError, any) # No args
self.assertRaises(TypeError, any, [2, 4, 6], []) # Too many args
self.assertEqual(any([]), False) # Empty iterator
self.assertEqual(any([1, TestFailingBool()]), True) # Short-circuit
S = [40, 60, 30]
self.assertEqual(any(x > 42 for x in S), True)
S = [10, 20, 30]
self.assertEqual(any(x > 42 for x in S), False)
def test_ascii(self):
self.assertEqual(ascii(''), '\'\'')
self.assertEqual(ascii(0), '0')
self.assertEqual(ascii(()), '()')
self.assertEqual(ascii([]), '[]')
self.assertEqual(ascii({}), '{}')
a = []
a.append(a)
self.assertEqual(ascii(a), '[[...]]')
a = {}
a[0] = a
self.assertEqual(ascii(a), '{0: {...}}')
# Advanced checks for unicode strings
def _check_uni(s):
self.assertEqual(ascii(s), repr(s))
_check_uni("'")
_check_uni('"')
_check_uni('"\'')
_check_uni('\0')
_check_uni('\r\n\t .')
# Unprintable non-ASCII characters
_check_uni('\x85')
_check_uni('\u1fff')
_check_uni('\U00012fff')
# Lone surrogates
_check_uni('\ud800')
_check_uni('\udfff')
# Issue #9804: surrogates should be joined even for printable
# wide characters (UCS-2 builds).
self.assertEqual(ascii('\U0001d121'), "'\\U0001d121'")
# All together
s = "'\0\"\n\r\t abcd\x85é\U00012fff\uD800\U0001D121xxx."
self.assertEqual(ascii(s),
r"""'\'\x00"\n\r\t abcd\x85\xe9\U00012fff\ud800\U0001d121xxx.'""")
def test_neg(self):
x = -sys.maxsize-1
self.assertTrue(isinstance(x, int))
self.assertEqual(-x, sys.maxsize+1)
def test_callable(self):
self.assertTrue(callable(len))
self.assertFalse(callable("a"))
self.assertTrue(callable(callable))
self.assertTrue(callable(lambda x, y: x + y))
self.assertFalse(callable(__builtins__))
def f(): pass
self.assertTrue(callable(f))
class C1:
def meth(self): pass
self.assertTrue(callable(C1))
c = C1()
self.assertTrue(callable(c.meth))
self.assertFalse(callable(c))
# __call__ is looked up on the class, not the instance
c.__call__ = None
self.assertFalse(callable(c))
c.__call__ = lambda self: 0
self.assertFalse(callable(c))
del c.__call__
self.assertFalse(callable(c))
class C2(object):
def __call__(self): pass
c2 = C2()
self.assertTrue(callable(c2))
c2.__call__ = None
self.assertTrue(callable(c2))
class C3(C2): pass
c3 = C3()
self.assertTrue(callable(c3))
def test_chr(self):
self.assertEqual(chr(32), ' ')
self.assertEqual(chr(65), 'A')
self.assertEqual(chr(97), 'a')
self.assertEqual(chr(0xff), '\xff')
self.assertRaises(ValueError, chr, 1<<24)
self.assertEqual(chr(sys.maxunicode),
str('\\U0010ffff'.encode("ascii"), 'unicode-escape'))
self.assertRaises(TypeError, chr)
self.assertEqual(chr(0x0000FFFF), "\U0000FFFF")
self.assertEqual(chr(0x00010000), "\U00010000")
self.assertEqual(chr(0x00010001), "\U00010001")
self.assertEqual(chr(0x000FFFFE), "\U000FFFFE")
self.assertEqual(chr(0x000FFFFF), "\U000FFFFF")
self.assertEqual(chr(0x00100000), "\U00100000")
self.assertEqual(chr(0x00100001), "\U00100001")
self.assertEqual(chr(0x0010FFFE), "\U0010FFFE")
self.assertEqual(chr(0x0010FFFF), "\U0010FFFF")
self.assertRaises(ValueError, chr, -1)
self.assertRaises(ValueError, chr, 0x00110000)
self.assertRaises((OverflowError, ValueError), chr, 2**32)
def test_cmp(self):
self.assertTrue(not hasattr(builtins, "cmp"))
def test_compile(self):
compile('print(1)\n', '', 'exec')
bom = b'\xef\xbb\xbf'
compile(bom + b'print(1)\n', '', 'exec')
compile(source='pass', filename='?', mode='exec')
compile(dont_inherit=0, filename='tmp', source='0', mode='eval')
compile('pass', '?', dont_inherit=1, mode='exec')
compile(memoryview(b"text"), "name", "exec")
self.assertRaises(TypeError, compile)
self.assertRaises(ValueError, compile, 'print(42)\n', '<string>', 'badmode')
self.assertRaises(ValueError, compile, 'print(42)\n', '<string>', 'single', 0xff)
self.assertRaises(ValueError, compile, chr(0), 'f', 'exec')
self.assertRaises(TypeError, compile, 'pass', '?', 'exec',
mode='eval', source='0', filename='tmp')
compile('print("\xe5")\n', '', 'exec')
self.assertRaises(ValueError, compile, chr(0), 'f', 'exec')
self.assertRaises(ValueError, compile, str('a = 1'), 'f', 'bad')
# test the optimize argument
codestr = '''def f():
"""doc"""
try:
assert False
except AssertionError:
return (True, f.__doc__)
else:
return (False, f.__doc__)
'''
def f(): """doc"""
values = [(-1, __debug__, f.__doc__),
(0, True, 'doc'),
(1, False, 'doc'),
(2, False, None)]
for optval, debugval, docstring in values:
# test both direct compilation and compilation via AST
codeobjs = []
codeobjs.append(compile(codestr, "<test>", "exec", optimize=optval))
tree = ast.parse(codestr)
codeobjs.append(compile(tree, "<test>", "exec", optimize=optval))
for code in codeobjs:
ns = {}
exec(code, ns)
rv = ns['f']()
self.assertEqual(rv, (debugval, docstring))
def test_delattr(self):
sys.spam = 1
delattr(sys, 'spam')
self.assertRaises(TypeError, delattr)
def test_dir(self):
# dir(wrong number of arguments)
self.assertRaises(TypeError, dir, 42, 42)
# dir() - local scope
local_var = 1
self.assertIn('local_var', dir())
# dir(module)
self.assertIn('exit', dir(sys))
# dir(module_with_invalid__dict__)
class Foo(types.ModuleType):
__dict__ = 8
f = Foo("foo")
self.assertRaises(TypeError, dir, f)
# dir(type)
self.assertIn("strip", dir(str))
self.assertNotIn("__mro__", dir(str))
# dir(obj)
class Foo(object):
def __init__(self):
self.x = 7
self.y = 8
self.z = 9
f = Foo()
self.assertIn("y", dir(f))
# dir(obj_no__dict__)
class Foo(object):
__slots__ = []
f = Foo()
self.assertIn("__repr__", dir(f))
# dir(obj_no__class__with__dict__)
# (an ugly trick to cause getattr(f, "__class__") to fail)
class Foo(object):
__slots__ = ["__class__", "__dict__"]
def __init__(self):
self.bar = "wow"
f = Foo()
self.assertNotIn("__repr__", dir(f))
self.assertIn("bar", dir(f))
# dir(obj_using __dir__)
class Foo(object):
def __dir__(self):
return ["kan", "ga", "roo"]
f = Foo()
self.assertTrue(dir(f) == ["ga", "kan", "roo"])
# dir(obj__dir__tuple)
class Foo(object):
def __dir__(self):
return ("b", "c", "a")
res = dir(Foo())
self.assertIsInstance(res, list)
self.assertTrue(res == ["a", "b", "c"])
# dir(obj__dir__not_sequence)
class Foo(object):
def __dir__(self):
return 7
f = Foo()
self.assertRaises(TypeError, dir, f)
# dir(traceback)
try:
raise IndexError
except:
self.assertEqual(len(dir(sys.exc_info()[2])), 4)
# test that object has a __dir__()
self.assertEqual(sorted([].__dir__()), dir([]))
def test_divmod(self):
self.assertEqual(divmod(12, 7), (1, 5))
self.assertEqual(divmod(-12, 7), (-2, 2))
self.assertEqual(divmod(12, -7), (-2, -2))
self.assertEqual(divmod(-12, -7), (1, -5))
self.assertEqual(divmod(-sys.maxsize-1, -1), (sys.maxsize+1, 0))
for num, denom, exp_result in [ (3.25, 1.0, (3.0, 0.25)),
(-3.25, 1.0, (-4.0, 0.75)),
(3.25, -1.0, (-4.0, -0.75)),
(-3.25, -1.0, (3.0, -0.25))]:
result = divmod(num, denom)
self.assertAlmostEqual(result[0], exp_result[0])
self.assertAlmostEqual(result[1], exp_result[1])
self.assertRaises(TypeError, divmod)
def test_eval(self):
self.assertEqual(eval('1+1'), 2)
self.assertEqual(eval(' 1+1\n'), 2)
globals = {'a': 1, 'b': 2}
locals = {'b': 200, 'c': 300}
self.assertEqual(eval('a', globals) , 1)
self.assertEqual(eval('a', globals, locals), 1)
self.assertEqual(eval('b', globals, locals), 200)
self.assertEqual(eval('c', globals, locals), 300)
globals = {'a': 1, 'b': 2}
locals = {'b': 200, 'c': 300}
bom = b'\xef\xbb\xbf'
self.assertEqual(eval(bom + b'a', globals, locals), 1)
self.assertEqual(eval('"\xe5"', globals), "\xe5")
self.assertRaises(TypeError, eval)
self.assertRaises(TypeError, eval, ())
self.assertRaises(SyntaxError, eval, bom[:2] + b'a')
class X:
def __getitem__(self, key):
raise ValueError
self.assertRaises(ValueError, eval, "foo", {}, X())
def test_general_eval(self):
# Tests that general mappings can be used for the locals argument
class M:
"Test mapping interface versus possible calls from eval()."
def __getitem__(self, key):
if key == 'a':
return 12
raise KeyError
def keys(self):
return list('xyz')
m = M()
g = globals()
self.assertEqual(eval('a', g, m), 12)
self.assertRaises(NameError, eval, 'b', g, m)
self.assertEqual(eval('dir()', g, m), list('xyz'))
self.assertEqual(eval('globals()', g, m), g)
self.assertEqual(eval('locals()', g, m), m)
self.assertRaises(TypeError, eval, 'a', m)
class A:
"Non-mapping"
pass
m = A()
self.assertRaises(TypeError, eval, 'a', g, m)
# Verify that dict subclasses work as well
class D(dict):
def __getitem__(self, key):
if key == 'a':
return 12
return dict.__getitem__(self, key)
def keys(self):
return list('xyz')
d = D()
self.assertEqual(eval('a', g, d), 12)
self.assertRaises(NameError, eval, 'b', g, d)
self.assertEqual(eval('dir()', g, d), list('xyz'))
self.assertEqual(eval('globals()', g, d), g)
self.assertEqual(eval('locals()', g, d), d)
# Verify locals stores (used by list comps)
eval('[locals() for i in (2,3)]', g, d)
eval('[locals() for i in (2,3)]', g, collections.UserDict())
class SpreadSheet:
"Sample application showing nested, calculated lookups."
_cells = {}
def __setitem__(self, key, formula):
self._cells[key] = formula
def __getitem__(self, key):
return eval(self._cells[key], globals(), self)
ss = SpreadSheet()
ss['a1'] = '5'
ss['a2'] = 'a1*6'
ss['a3'] = 'a2*7'
self.assertEqual(ss['a3'], 210)
# Verify that dir() catches a non-list returned by eval
# SF bug #1004669
class C:
def __getitem__(self, item):
raise KeyError(item)
def keys(self):
return 1 # used to be 'a' but that's no longer an error
self.assertRaises(TypeError, eval, 'dir()', globals(), C())
def test_exec(self):
g = {}
exec('z = 1', g)
if '__builtins__' in g:
del g['__builtins__']
self.assertEqual(g, {'z': 1})
exec('z = 1+1', g)
if '__builtins__' in g:
del g['__builtins__']
self.assertEqual(g, {'z': 2})
g = {}
l = {}
with check_warnings():
warnings.filterwarnings("ignore", "global statement",
module="<string>")
exec('global a; a = 1; b = 2', g, l)
if '__builtins__' in g:
del g['__builtins__']
if '__builtins__' in l:
del l['__builtins__']
self.assertEqual((g, l), ({'a': 1}, {'b': 2}))
def test_exec_globals(self):
code = compile("print('Hello World!')", "", "exec")
# no builtin function
self.assertRaisesRegex(NameError, "name 'print' is not defined",
exec, code, {'__builtins__': {}})
# __builtins__ must be a mapping type
self.assertRaises(TypeError,
exec, code, {'__builtins__': 123})
# no __build_class__ function
code = compile("class A: pass", "", "exec")
self.assertRaisesRegex(NameError, "__build_class__ not found",
exec, code, {'__builtins__': {}})
class frozendict_error(Exception):
pass
class frozendict(dict):
def __setitem__(self, key, value):
raise frozendict_error("frozendict is readonly")
# read-only builtins
if isinstance(__builtins__, types.ModuleType):
frozen_builtins = frozendict(__builtins__.__dict__)
else:
frozen_builtins = frozendict(__builtins__)
code = compile("__builtins__['superglobal']=2; print(superglobal)", "test", "exec")
self.assertRaises(frozendict_error,
exec, code, {'__builtins__': frozen_builtins})
# read-only globals
namespace = frozendict({})
code = compile("x=1", "test", "exec")
self.assertRaises(frozendict_error,
exec, code, namespace)
def test_exec_redirected(self):
savestdout = sys.stdout
sys.stdout = None # Whatever that cannot flush()
try:
# Used to raise SystemError('error return without exception set')
exec('a')
except NameError:
pass
finally:
sys.stdout = savestdout
def test_filter(self):
self.assertEqual(list(filter(lambda c: 'a' <= c <= 'z', 'Hello World')), list('elloorld'))
self.assertEqual(list(filter(None, [1, 'hello', [], [3], '', None, 9, 0])), [1, 'hello', [3], 9])
self.assertEqual(list(filter(lambda x: x > 0, [1, -3, 9, 0, 2])), [1, 9, 2])
self.assertEqual(list(filter(None, Squares(10))), [1, 4, 9, 16, 25, 36, 49, 64, 81])
self.assertEqual(list(filter(lambda x: x%2, Squares(10))), [1, 9, 25, 49, 81])
def identity(item):
return 1
filter(identity, Squares(5))
self.assertRaises(TypeError, filter)
class BadSeq(object):
def __getitem__(self, index):
if index<4:
return 42
raise ValueError
self.assertRaises(ValueError, list, filter(lambda x: x, BadSeq()))
def badfunc():
pass
self.assertRaises(TypeError, list, filter(badfunc, range(5)))
# test bltinmodule.c::filtertuple()
self.assertEqual(list(filter(None, (1, 2))), [1, 2])
self.assertEqual(list(filter(lambda x: x>=3, (1, 2, 3, 4))), [3, 4])
self.assertRaises(TypeError, list, filter(42, (1, 2)))
def test_filter_pickle(self):
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
f1 = filter(filter_char, "abcdeabcde")
f2 = filter(filter_char, "abcdeabcde")
self.check_iter_pickle(f1, list(f2), proto)
def test_getattr(self):
self.assertTrue(getattr(sys, 'stdout') is sys.stdout)
self.assertRaises(TypeError, getattr, sys, 1)
self.assertRaises(TypeError, getattr, sys, 1, "foo")
self.assertRaises(TypeError, getattr)
self.assertRaises(AttributeError, getattr, sys, chr(sys.maxunicode))
# unicode surrogates are not encodable to the default encoding (utf8)
self.assertRaises(AttributeError, getattr, 1, "\uDAD1\uD51E")
def test_hasattr(self):
self.assertTrue(hasattr(sys, 'stdout'))
self.assertRaises(TypeError, hasattr, sys, 1)
self.assertRaises(TypeError, hasattr)
self.assertEqual(False, hasattr(sys, chr(sys.maxunicode)))
# Check that hasattr propagates all exceptions outside of
# AttributeError.
class A:
def __getattr__(self, what):
raise SystemExit
self.assertRaises(SystemExit, hasattr, A(), "b")
class B:
def __getattr__(self, what):
raise ValueError
self.assertRaises(ValueError, hasattr, B(), "b")
def test_hash(self):
hash(None)
self.assertEqual(hash(1), hash(1))
self.assertEqual(hash(1), hash(1.0))
hash('spam')
self.assertEqual(hash('spam'), hash(b'spam'))
hash((0,1,2,3))
def f(): pass
self.assertRaises(TypeError, hash, [])
self.assertRaises(TypeError, hash, {})
# Bug 1536021: Allow hash to return long objects
class X:
def __hash__(self):
return 2**100
self.assertEqual(type(hash(X())), int)
class Z(int):
def __hash__(self):
return self
self.assertEqual(hash(Z(42)), hash(42))
def test_hex(self):
self.assertEqual(hex(16), '0x10')
self.assertEqual(hex(-16), '-0x10')
self.assertRaises(TypeError, hex, {})
def test_id(self):
id(None)
id(1)
id(1.0)
id('spam')
id((0,1,2,3))
id([0,1,2,3])
id({'spam': 1, 'eggs': 2, 'ham': 3})
# Test input() later, alphabetized as if it were raw_input
def test_iter(self):
self.assertRaises(TypeError, iter)
self.assertRaises(TypeError, iter, 42, 42)
lists = [("1", "2"), ["1", "2"], "12"]
for l in lists:
i = iter(l)
self.assertEqual(next(i), '1')
self.assertEqual(next(i), '2')
self.assertRaises(StopIteration, next, i)
def test_isinstance(self):
class C:
pass
class D(C):
pass
class E:
pass
c = C()
d = D()
e = E()
self.assertTrue(isinstance(c, C))
self.assertTrue(isinstance(d, C))
self.assertTrue(not isinstance(e, C))
self.assertTrue(not isinstance(c, D))
self.assertTrue(not isinstance('foo', E))
self.assertRaises(TypeError, isinstance, E, 'foo')
self.assertRaises(TypeError, isinstance)
def test_issubclass(self):
class C:
pass
class D(C):
pass
class E:
pass
c = C()
d = D()
e = E()
self.assertTrue(issubclass(D, C))
self.assertTrue(issubclass(C, C))
self.assertTrue(not issubclass(C, D))
self.assertRaises(TypeError, issubclass, 'foo', E)
self.assertRaises(TypeError, issubclass, E, 'foo')
self.assertRaises(TypeError, issubclass)
def test_len(self):
self.assertEqual(len('123'), 3)
self.assertEqual(len(()), 0)
self.assertEqual(len((1, 2, 3, 4)), 4)
self.assertEqual(len([1, 2, 3, 4]), 4)
self.assertEqual(len({}), 0)
self.assertEqual(len({'a':1, 'b': 2}), 2)
class BadSeq:
def __len__(self):
raise ValueError
self.assertRaises(ValueError, len, BadSeq())
class InvalidLen:
def __len__(self):
return None
self.assertRaises(TypeError, len, InvalidLen())
class FloatLen:
def __len__(self):
return 4.5
self.assertRaises(TypeError, len, FloatLen())
class HugeLen:
def __len__(self):
return sys.maxsize + 1
self.assertRaises(OverflowError, len, HugeLen())
class NoLenMethod(object): pass
self.assertRaises(TypeError, len, NoLenMethod())
def test_map(self):
self.assertEqual(
list(map(lambda x: x*x, range(1,4))),
[1, 4, 9]
)
try:
from math import sqrt
except ImportError:
def sqrt(x):
return pow(x, 0.5)
self.assertEqual(
list(map(lambda x: list(map(sqrt, x)), [[16, 4], [81, 9]])),
[[4.0, 2.0], [9.0, 3.0]]
)
self.assertEqual(
list(map(lambda x, y: x+y, [1,3,2], [9,1,4])),
[10, 4, 6]
)
def plus(*v):
accu = 0
for i in v: accu = accu + i
return accu
self.assertEqual(
list(map(plus, [1, 3, 7])),
[1, 3, 7]
)
self.assertEqual(
list(map(plus, [1, 3, 7], [4, 9, 2])),
[1+4, 3+9, 7+2]
)
self.assertEqual(
list(map(plus, [1, 3, 7], [4, 9, 2], [1, 1, 0])),
[1+4+1, 3+9+1, 7+2+0]
)
self.assertEqual(
list(map(int, Squares(10))),
[0, 1, 4, 9, 16, 25, 36, 49, 64, 81]
)
def Max(a, b):
if a is None:
return b
if b is None:
return a
return max(a, b)
self.assertEqual(
list(map(Max, Squares(3), Squares(2))),
[0, 1]
)
self.assertRaises(TypeError, map)
self.assertRaises(TypeError, map, lambda x: x, 42)
class BadSeq:
def __iter__(self):
raise ValueError
yield None
self.assertRaises(ValueError, list, map(lambda x: x, BadSeq()))
def badfunc(x):
raise RuntimeError
self.assertRaises(RuntimeError, list, map(badfunc, range(5)))
def test_map_pickle(self):
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
m1 = map(map_char, "Is this the real life?")
m2 = map(map_char, "Is this the real life?")
self.check_iter_pickle(m1, list(m2), proto)
def test_max(self):
self.assertEqual(max('123123'), '3')
self.assertEqual(max(1, 2, 3), 3)
self.assertEqual(max((1, 2, 3, 1, 2, 3)), 3)
self.assertEqual(max([1, 2, 3, 1, 2, 3]), 3)
self.assertEqual(max(1, 2, 3.0), 3.0)
self.assertEqual(max(1, 2.0, 3), 3)
self.assertEqual(max(1.0, 2, 3), 3)
self.assertRaises(TypeError, max)
self.assertRaises(TypeError, max, 42)
self.assertRaises(ValueError, max, ())
class BadSeq:
def __getitem__(self, index):
raise ValueError
self.assertRaises(ValueError, max, BadSeq())
for stmt in (
"max(key=int)", # no args
"max(default=None)",
"max(1, 2, default=None)", # require container for default
"max(default=None, key=int)",
"max(1, key=int)", # single arg not iterable
"max(1, 2, keystone=int)", # wrong keyword
"max(1, 2, key=int, abc=int)", # two many keywords
"max(1, 2, key=1)", # keyfunc is not callable
):
try:
exec(stmt, globals())
except TypeError:
pass
else:
self.fail(stmt)
self.assertEqual(max((1,), key=neg), 1) # one elem iterable
self.assertEqual(max((1,2), key=neg), 1) # two elem iterable
self.assertEqual(max(1, 2, key=neg), 1) # two elems
self.assertEqual(max((), default=None), None) # zero elem iterable
self.assertEqual(max((1,), default=None), 1) # one elem iterable
self.assertEqual(max((1,2), default=None), 2) # two elem iterable
self.assertEqual(max((), default=1, key=neg), 1)
self.assertEqual(max((1, 2), default=3, key=neg), 1)
data = [random.randrange(200) for i in range(100)]
keys = dict((elem, random.randrange(50)) for elem in data)
f = keys.__getitem__
self.assertEqual(max(data, key=f),
sorted(reversed(data), key=f)[-1])
def test_min(self):
self.assertEqual(min('123123'), '1')
self.assertEqual(min(1, 2, 3), 1)
self.assertEqual(min((1, 2, 3, 1, 2, 3)), 1)
self.assertEqual(min([1, 2, 3, 1, 2, 3]), 1)
self.assertEqual(min(1, 2, 3.0), 1)
self.assertEqual(min(1, 2.0, 3), 1)
self.assertEqual(min(1.0, 2, 3), 1.0)
self.assertRaises(TypeError, min)
self.assertRaises(TypeError, min, 42)
self.assertRaises(ValueError, min, ())
class BadSeq:
def __getitem__(self, index):
raise ValueError
self.assertRaises(ValueError, min, BadSeq())
for stmt in (
"min(key=int)", # no args
"min(default=None)",
"min(1, 2, default=None)", # require container for default
"min(default=None, key=int)",
"min(1, key=int)", # single arg not iterable
"min(1, 2, keystone=int)", # wrong keyword
"min(1, 2, key=int, abc=int)", # two many keywords
"min(1, 2, key=1)", # keyfunc is not callable
):
try:
exec(stmt, globals())
except TypeError:
pass
else:
self.fail(stmt)
self.assertEqual(min((1,), key=neg), 1) # one elem iterable
self.assertEqual(min((1,2), key=neg), 2) # two elem iterable
self.assertEqual(min(1, 2, key=neg), 2) # two elems
self.assertEqual(min((), default=None), None) # zero elem iterable
self.assertEqual(min((1,), default=None), 1) # one elem iterable
self.assertEqual(min((1,2), default=None), 1) # two elem iterable
self.assertEqual(min((), default=1, key=neg), 1)
self.assertEqual(min((1, 2), default=1, key=neg), 2)
data = [random.randrange(200) for i in range(100)]
keys = dict((elem, random.randrange(50)) for elem in data)
f = keys.__getitem__
self.assertEqual(min(data, key=f),
sorted(data, key=f)[0])
def test_next(self):
it = iter(range(2))
self.assertEqual(next(it), 0)
self.assertEqual(next(it), 1)
self.assertRaises(StopIteration, next, it)
self.assertRaises(StopIteration, next, it)
self.assertEqual(next(it, 42), 42)
class Iter(object):
def __iter__(self):
return self
def __next__(self):
raise StopIteration
it = iter(Iter())
self.assertEqual(next(it, 42), 42)
self.assertRaises(StopIteration, next, it)
def gen():
yield 1
return
it = gen()
self.assertEqual(next(it), 1)
self.assertRaises(StopIteration, next, it)
self.assertEqual(next(it, 42), 42)
def test_oct(self):
self.assertEqual(oct(100), '0o144')
self.assertEqual(oct(-100), '-0o144')
self.assertRaises(TypeError, oct, ())
def write_testfile(self):
# NB the first 4 lines are also used to test input, below
fp = open(TESTFN, 'w')
self.addCleanup(unlink, TESTFN)
with fp:
fp.write('1+1\n')
fp.write('The quick brown fox jumps over the lazy dog')
fp.write('.\n')
fp.write('Dear John\n')
fp.write('XXX'*100)
fp.write('YYY'*100)
def test_open(self):
self.write_testfile()
fp = open(TESTFN, 'r')
with fp:
self.assertEqual(fp.readline(4), '1+1\n')
self.assertEqual(fp.readline(), 'The quick brown fox jumps over the lazy dog.\n')
self.assertEqual(fp.readline(4), 'Dear')
self.assertEqual(fp.readline(100), ' John\n')
self.assertEqual(fp.read(300), 'XXX'*100)
self.assertEqual(fp.read(1000), 'YYY'*100)
def test_open_default_encoding(self):
old_environ = dict(os.environ)
try:
# try to get a user preferred encoding different than the current
# locale encoding to check that open() uses the current locale
# encoding and not the user preferred encoding
for key in ('LC_ALL', 'LANG', 'LC_CTYPE'):
if key in os.environ:
del os.environ[key]
self.write_testfile()
current_locale_encoding = locale.getpreferredencoding(False)
fp = open(TESTFN, 'w')
with fp:
self.assertEqual(fp.encoding, current_locale_encoding)
finally:
os.environ.clear()
os.environ.update(old_environ)
def test_open_non_inheritable(self):
fileobj = open(__file__)
with fileobj:
self.assertFalse(os.get_inheritable(fileobj.fileno()))
def test_ord(self):
self.assertEqual(ord(' '), 32)
self.assertEqual(ord('A'), 65)
self.assertEqual(ord('a'), 97)
self.assertEqual(ord('\x80'), 128)
self.assertEqual(ord('\xff'), 255)
self.assertEqual(ord(b' '), 32)
self.assertEqual(ord(b'A'), 65)
self.assertEqual(ord(b'a'), 97)
self.assertEqual(ord(b'\x80'), 128)
self.assertEqual(ord(b'\xff'), 255)
self.assertEqual(ord(chr(sys.maxunicode)), sys.maxunicode)
self.assertRaises(TypeError, ord, 42)
self.assertEqual(ord(chr(0x10FFFF)), 0x10FFFF)
self.assertEqual(ord("\U0000FFFF"), 0x0000FFFF)
self.assertEqual(ord("\U00010000"), 0x00010000)
self.assertEqual(ord("\U00010001"), 0x00010001)
self.assertEqual(ord("\U000FFFFE"), 0x000FFFFE)
self.assertEqual(ord("\U000FFFFF"), 0x000FFFFF)
self.assertEqual(ord("\U00100000"), 0x00100000)
self.assertEqual(ord("\U00100001"), 0x00100001)
self.assertEqual(ord("\U0010FFFE"), 0x0010FFFE)
self.assertEqual(ord("\U0010FFFF"), 0x0010FFFF)
def test_pow(self):
self.assertEqual(pow(0,0), 1)
self.assertEqual(pow(0,1), 0)
self.assertEqual(pow(1,0), 1)
self.assertEqual(pow(1,1), 1)
self.assertEqual(pow(2,0), 1)
self.assertEqual(pow(2,10), 1024)
self.assertEqual(pow(2,20), 1024*1024)
self.assertEqual(pow(2,30), 1024*1024*1024)
self.assertEqual(pow(-2,0), 1)
self.assertEqual(pow(-2,1), -2)
self.assertEqual(pow(-2,2), 4)
self.assertEqual(pow(-2,3), -8)
self.assertAlmostEqual(pow(0.,0), 1.)
self.assertAlmostEqual(pow(0.,1), 0.)
self.assertAlmostEqual(pow(1.,0), 1.)
self.assertAlmostEqual(pow(1.,1), 1.)
self.assertAlmostEqual(pow(2.,0), 1.)
self.assertAlmostEqual(pow(2.,10), 1024.)
self.assertAlmostEqual(pow(2.,20), 1024.*1024.)
self.assertAlmostEqual(pow(2.,30), 1024.*1024.*1024.)
self.assertAlmostEqual(pow(-2.,0), 1.)
self.assertAlmostEqual(pow(-2.,1), -2.)
self.assertAlmostEqual(pow(-2.,2), 4.)
self.assertAlmostEqual(pow(-2.,3), -8.)
for x in 2, 2.0:
for y in 10, 10.0:
for z in 1000, 1000.0:
if isinstance(x, float) or \
isinstance(y, float) or \
isinstance(z, float):
self.assertRaises(TypeError, pow, x, y, z)
else:
self.assertAlmostEqual(pow(x, y, z), 24.0)
self.assertAlmostEqual(pow(-1, 0.5), 1j)
self.assertAlmostEqual(pow(-1, 1/3), 0.5 + 0.8660254037844386j)
self.assertRaises(ValueError, pow, -1, -2, 3)
self.assertRaises(ValueError, pow, 1, 2, 0)
self.assertRaises(TypeError, pow)
def test_input(self):
self.write_testfile()
fp = open(TESTFN, 'r')
savestdin = sys.stdin
savestdout = sys.stdout # Eats the echo
try:
sys.stdin = fp
sys.stdout = BitBucket()
self.assertEqual(input(), "1+1")
self.assertEqual(input(), 'The quick brown fox jumps over the lazy dog.')
self.assertEqual(input('testing\n'), 'Dear John')
# SF 1535165: don't segfault on closed stdin
# sys.stdout must be a regular file for triggering
sys.stdout = savestdout
sys.stdin.close()
self.assertRaises(ValueError, input)
sys.stdout = BitBucket()
sys.stdin = io.StringIO("NULL\0")
self.assertRaises(TypeError, input, 42, 42)
sys.stdin = io.StringIO(" 'whitespace'")
self.assertEqual(input(), " 'whitespace'")
sys.stdin = io.StringIO()
self.assertRaises(EOFError, input)
del sys.stdout
self.assertRaises(RuntimeError, input, 'prompt')
del sys.stdin
self.assertRaises(RuntimeError, input, 'prompt')
finally:
sys.stdin = savestdin
sys.stdout = savestdout
fp.close()
# test_int(): see test_int.py for tests of built-in function int().
def test_repr(self):
self.assertEqual(repr(''), '\'\'')
self.assertEqual(repr(0), '0')
self.assertEqual(repr(()), '()')
self.assertEqual(repr([]), '[]')
self.assertEqual(repr({}), '{}')
a = []
a.append(a)
self.assertEqual(repr(a), '[[...]]')
a = {}
a[0] = a
self.assertEqual(repr(a), '{0: {...}}')
def test_round(self):
self.assertEqual(round(0.0), 0.0)
self.assertEqual(type(round(0.0)), int)
self.assertEqual(round(1.0), 1.0)
self.assertEqual(round(10.0), 10.0)
self.assertEqual(round(1000000000.0), 1000000000.0)
self.assertEqual(round(1e20), 1e20)
self.assertEqual(round(-1.0), -1.0)
self.assertEqual(round(-10.0), -10.0)
self.assertEqual(round(-1000000000.0), -1000000000.0)
self.assertEqual(round(-1e20), -1e20)
self.assertEqual(round(0.1), 0.0)
self.assertEqual(round(1.1), 1.0)
self.assertEqual(round(10.1), 10.0)
self.assertEqual(round(1000000000.1), 1000000000.0)
self.assertEqual(round(-1.1), -1.0)
self.assertEqual(round(-10.1), -10.0)
self.assertEqual(round(-1000000000.1), -1000000000.0)
self.assertEqual(round(0.9), 1.0)
self.assertEqual(round(9.9), 10.0)
self.assertEqual(round(999999999.9), 1000000000.0)
self.assertEqual(round(-0.9), -1.0)
self.assertEqual(round(-9.9), -10.0)
self.assertEqual(round(-999999999.9), -1000000000.0)
self.assertEqual(round(-8.0, -1), -10.0)
self.assertEqual(type(round(-8.0, -1)), float)
self.assertEqual(type(round(-8.0, 0)), float)
self.assertEqual(type(round(-8.0, 1)), float)
# Check even / odd rounding behaviour
self.assertEqual(round(5.5), 6)
self.assertEqual(round(6.5), 6)
self.assertEqual(round(-5.5), -6)
self.assertEqual(round(-6.5), -6)
# Check behavior on ints
self.assertEqual(round(0), 0)
self.assertEqual(round(8), 8)
self.assertEqual(round(-8), -8)
self.assertEqual(type(round(0)), int)
self.assertEqual(type(round(-8, -1)), int)
self.assertEqual(type(round(-8, 0)), int)
self.assertEqual(type(round(-8, 1)), int)
# test new kwargs
self.assertEqual(round(number=-8.0, ndigits=-1), -10.0)
self.assertRaises(TypeError, round)
# test generic rounding delegation for reals
class TestRound:
def __round__(self):
return 23
class TestNoRound:
pass
self.assertEqual(round(TestRound()), 23)
self.assertRaises(TypeError, round, 1, 2, 3)
self.assertRaises(TypeError, round, TestNoRound())
t = TestNoRound()
t.__round__ = lambda *args: args
self.assertRaises(TypeError, round, t)
self.assertRaises(TypeError, round, t, 0)
# Some versions of glibc for alpha have a bug that affects
# float -> integer rounding (floor, ceil, rint, round) for
# values in the range [2**52, 2**53). See:
#
# http://sources.redhat.com/bugzilla/show_bug.cgi?id=5350
#
# We skip this test on Linux/alpha if it would fail.
linux_alpha = (platform.system().startswith('Linux') and
platform.machine().startswith('alpha'))
system_round_bug = round(5e15+1) != 5e15+1
@unittest.skipIf(linux_alpha and system_round_bug,
"test will fail; failure is probably due to a "
"buggy system round function")
def test_round_large(self):
# Issue #1869: integral floats should remain unchanged
self.assertEqual(round(5e15-1), 5e15-1)
self.assertEqual(round(5e15), 5e15)
self.assertEqual(round(5e15+1), 5e15+1)
self.assertEqual(round(5e15+2), 5e15+2)
self.assertEqual(round(5e15+3), 5e15+3)
def test_setattr(self):
setattr(sys, 'spam', 1)
self.assertEqual(sys.spam, 1)
self.assertRaises(TypeError, setattr, sys, 1, 'spam')
self.assertRaises(TypeError, setattr)
# test_str(): see test_unicode.py and test_bytes.py for str() tests.
def test_sum(self):
self.assertEqual(sum([]), 0)
self.assertEqual(sum(list(range(2,8))), 27)
self.assertEqual(sum(iter(list(range(2,8)))), 27)
self.assertEqual(sum(Squares(10)), 285)
self.assertEqual(sum(iter(Squares(10))), 285)
self.assertEqual(sum([[1], [2], [3]], []), [1, 2, 3])
self.assertRaises(TypeError, sum)
self.assertRaises(TypeError, sum, 42)
self.assertRaises(TypeError, sum, ['a', 'b', 'c'])
self.assertRaises(TypeError, sum, ['a', 'b', 'c'], '')
self.assertRaises(TypeError, sum, [b'a', b'c'], b'')
values = [bytearray(b'a'), bytearray(b'b')]
self.assertRaises(TypeError, sum, values, bytearray(b''))
self.assertRaises(TypeError, sum, [[1], [2], [3]])
self.assertRaises(TypeError, sum, [{2:3}])
self.assertRaises(TypeError, sum, [{2:3}]*2, {2:3})
class BadSeq:
def __getitem__(self, index):
raise ValueError
self.assertRaises(ValueError, sum, BadSeq())
empty = []
sum(([x] for x in range(10)), empty)
self.assertEqual(empty, [])
def test_type(self):
self.assertEqual(type(''), type('123'))
self.assertNotEqual(type(''), type(()))
# We don't want self in vars(), so these are static methods
@staticmethod
def get_vars_f0():
return vars()
@staticmethod
def get_vars_f2():
BuiltinTest.get_vars_f0()
a = 1
b = 2
return vars()
class C_get_vars(object):
def getDict(self):
return {'a':2}
__dict__ = property(fget=getDict)
def test_vars(self):
self.assertEqual(set(vars()), set(dir()))
self.assertEqual(set(vars(sys)), set(dir(sys)))
self.assertEqual(self.get_vars_f0(), {})
self.assertEqual(self.get_vars_f2(), {'a': 1, 'b': 2})
self.assertRaises(TypeError, vars, 42, 42)
self.assertRaises(TypeError, vars, 42)
self.assertEqual(vars(self.C_get_vars()), {'a':2})
def test_zip(self):
a = (1, 2, 3)
b = (4, 5, 6)
t = [(1, 4), (2, 5), (3, 6)]
self.assertEqual(list(zip(a, b)), t)
b = [4, 5, 6]
self.assertEqual(list(zip(a, b)), t)
b = (4, 5, 6, 7)
self.assertEqual(list(zip(a, b)), t)
class I:
def __getitem__(self, i):
if i < 0 or i > 2: raise IndexError
return i + 4
self.assertEqual(list(zip(a, I())), t)
self.assertEqual(list(zip()), [])
self.assertEqual(list(zip(*[])), [])
self.assertRaises(TypeError, zip, None)
class G:
pass
self.assertRaises(TypeError, zip, a, G())
self.assertRaises(RuntimeError, zip, a, TestFailingIter())
# Make sure zip doesn't try to allocate a billion elements for the
# result list when one of its arguments doesn't say how long it is.
# A MemoryError is the most likely failure mode.
class SequenceWithoutALength:
def __getitem__(self, i):
if i == 5:
raise IndexError
else:
return i
self.assertEqual(
list(zip(SequenceWithoutALength(), range(2**30))),
list(enumerate(range(5)))
)
class BadSeq:
def __getitem__(self, i):
if i == 5:
raise ValueError
else:
return i
self.assertRaises(ValueError, list, zip(BadSeq(), BadSeq()))
def test_zip_pickle(self):
a = (1, 2, 3)
b = (4, 5, 6)
t = [(1, 4), (2, 5), (3, 6)]
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
z1 = zip(a, b)
self.check_iter_pickle(z1, t, proto)
def test_format(self):
# Test the basic machinery of the format() builtin. Don't test
# the specifics of the various formatters
self.assertEqual(format(3, ''), '3')
# Returns some classes to use for various tests. There's
# an old-style version, and a new-style version
def classes_new():
class A(object):
def __init__(self, x):
self.x = x
def __format__(self, format_spec):
return str(self.x) + format_spec
class DerivedFromA(A):
pass
class Simple(object): pass
class DerivedFromSimple(Simple):
def __init__(self, x):
self.x = x
def __format__(self, format_spec):
return str(self.x) + format_spec
class DerivedFromSimple2(DerivedFromSimple): pass
return A, DerivedFromA, DerivedFromSimple, DerivedFromSimple2
def class_test(A, DerivedFromA, DerivedFromSimple, DerivedFromSimple2):
self.assertEqual(format(A(3), 'spec'), '3spec')
self.assertEqual(format(DerivedFromA(4), 'spec'), '4spec')
self.assertEqual(format(DerivedFromSimple(5), 'abc'), '5abc')
self.assertEqual(format(DerivedFromSimple2(10), 'abcdef'),
'10abcdef')
class_test(*classes_new())
def empty_format_spec(value):
# test that:
# format(x, '') == str(x)
# format(x) == str(x)
self.assertEqual(format(value, ""), str(value))
self.assertEqual(format(value), str(value))
# for builtin types, format(x, "") == str(x)
empty_format_spec(17**13)
empty_format_spec(1.0)
empty_format_spec(3.1415e104)
empty_format_spec(-3.1415e104)
empty_format_spec(3.1415e-104)
empty_format_spec(-3.1415e-104)
empty_format_spec(object)
empty_format_spec(None)
# TypeError because self.__format__ returns the wrong type
class BadFormatResult:
def __format__(self, format_spec):
return 1.0
self.assertRaises(TypeError, format, BadFormatResult(), "")
# TypeError because format_spec is not unicode or str
self.assertRaises(TypeError, format, object(), 4)
self.assertRaises(TypeError, format, object(), object())
# tests for object.__format__ really belong elsewhere, but
# there's no good place to put them
x = object().__format__('')
self.assertTrue(x.startswith('<object object at'))
# first argument to object.__format__ must be string
self.assertRaises(TypeError, object().__format__, 3)
self.assertRaises(TypeError, object().__format__, object())
self.assertRaises(TypeError, object().__format__, None)
# --------------------------------------------------------------------
# Issue #7994: object.__format__ with a non-empty format string is
# deprecated
def test_deprecated_format_string(obj, fmt_str, should_raise):
if should_raise:
self.assertRaises(TypeError, format, obj, fmt_str)
else:
format(obj, fmt_str)
fmt_strs = ['', 's']
class A:
def __format__(self, fmt_str):
return format('', fmt_str)
for fmt_str in fmt_strs:
test_deprecated_format_string(A(), fmt_str, False)
class B:
pass
class C(object):
pass
for cls in [object, B, C]:
for fmt_str in fmt_strs:
test_deprecated_format_string(cls(), fmt_str, len(fmt_str) != 0)
# --------------------------------------------------------------------
# make sure we can take a subclass of str as a format spec
class DerivedFromStr(str): pass
self.assertEqual(format(0, DerivedFromStr('10')), ' 0')
def test_bin(self):
self.assertEqual(bin(0), '0b0')
self.assertEqual(bin(1), '0b1')
self.assertEqual(bin(-1), '-0b1')
self.assertEqual(bin(2**65), '0b1' + '0' * 65)
self.assertEqual(bin(2**65-1), '0b' + '1' * 65)
self.assertEqual(bin(-(2**65)), '-0b1' + '0' * 65)
self.assertEqual(bin(-(2**65-1)), '-0b' + '1' * 65)
def test_bytearray_translate(self):
x = bytearray(b"abc")
self.assertRaises(ValueError, x.translate, b"1", 1)
self.assertRaises(TypeError, x.translate, b"1"*256, 1)
def test_construct_singletons(self):
for const in None, Ellipsis, NotImplemented:
tp = type(const)
self.assertIs(tp(), const)
self.assertRaises(TypeError, tp, 1, 2)
self.assertRaises(TypeError, tp, a=1, b=2)
@unittest.skipUnless(pty, "the pty and signal modules must be available")
class PtyTests(unittest.TestCase):
"""Tests that use a pseudo terminal to guarantee stdin and stdout are
terminals in the test environment"""
def run_child(self, child, terminal_input):
r, w = os.pipe() # Pipe test results from child back to parent
try:
pid, fd = pty.fork()
except (OSError, AttributeError) as e:
os.close(r)
os.close(w)
self.skipTest("pty.fork() raised {}".format(e))
raise
if pid == 0:
# Child
try:
# Make sure we don't get stuck if there's a problem
signal.alarm(2)
os.close(r)
with open(w, "w") as wpipe:
child(wpipe)
except:
traceback.print_exc()
finally:
# We don't want to return to unittest...
os._exit(0)
# Parent
os.close(w)
os.write(fd, terminal_input)
# Get results from the pipe
with open(r, "r") as rpipe:
lines = []
while True:
line = rpipe.readline().strip()
if line == "":
# The other end was closed => the child exited
break
lines.append(line)
# Check the result was got and corresponds to the user's terminal input
if len(lines) != 2:
# Something went wrong, try to get at stderr
# Beware of Linux raising EIO when the slave is closed
child_output = bytearray()
while True:
try:
chunk = os.read(fd, 3000)
except OSError: # Assume EIO
break
if not chunk:
break
child_output.extend(chunk)
os.close(fd)
child_output = child_output.decode("ascii", "ignore")
self.fail("got %d lines in pipe but expected 2, child output was:\n%s"
% (len(lines), child_output))
os.close(fd)
return lines
def check_input_tty(self, prompt, terminal_input, stdio_encoding=None):
if not sys.stdin.isatty() or not sys.stdout.isatty():
self.skipTest("stdin and stdout must be ttys")
def child(wpipe):
# Check the error handlers are accounted for
if stdio_encoding:
sys.stdin = io.TextIOWrapper(sys.stdin.detach(),
encoding=stdio_encoding,
errors='surrogateescape')
sys.stdout = io.TextIOWrapper(sys.stdout.detach(),
encoding=stdio_encoding,
errors='replace')
print("tty =", sys.stdin.isatty() and sys.stdout.isatty(), file=wpipe)
print(ascii(input(prompt)), file=wpipe)
lines = self.run_child(child, terminal_input + b"\r\n")
# Check we did exercise the GNU readline path
self.assertIn(lines[0], {'tty = True', 'tty = False'})
if lines[0] != 'tty = True':
self.skipTest("standard IO in should have been a tty")
input_result = eval(lines[1]) # ascii() -> eval() roundtrip
if stdio_encoding:
expected = terminal_input.decode(stdio_encoding, 'surrogateescape')
else:
expected = terminal_input.decode(sys.stdin.encoding) # what else?
self.assertEqual(input_result, expected)
def test_input_tty(self):
# Test input() functionality when wired to a tty (the code path
# is different and invokes GNU readline if available).
self.check_input_tty("prompt", b"quux")
def test_input_tty_non_ascii(self):
# Check stdin/stdout encoding is used when invoking GNU readline
self.check_input_tty("prompté", b"quux\xe9", "utf-8")
def test_input_tty_non_ascii_unicode_errors(self):
# Check stdin/stdout error handler is used when invoking GNU readline
self.check_input_tty("prompté", b"quux\xe9", "ascii")
def test_input_no_stdout_fileno(self):
# Issue #24402: If stdin is the original terminal but stdout.fileno()
# fails, do not use the original stdout file descriptor
def child(wpipe):
print("stdin.isatty():", sys.stdin.isatty(), file=wpipe)
sys.stdout = io.StringIO() # Does not support fileno()
input("prompt")
print("captured:", ascii(sys.stdout.getvalue()), file=wpipe)
lines = self.run_child(child, b"quux\r")
expected = (
"stdin.isatty(): True",
"captured: 'prompt'",
)
self.assertSequenceEqual(lines, expected)
class TestSorted(unittest.TestCase):
def test_basic(self):
data = list(range(100))
copy = data[:]
random.shuffle(copy)
self.assertEqual(data, sorted(copy))
self.assertNotEqual(data, copy)
data.reverse()
random.shuffle(copy)
self.assertEqual(data, sorted(copy, key=lambda x: -x))
self.assertNotEqual(data, copy)
random.shuffle(copy)
self.assertEqual(data, sorted(copy, reverse=1))
self.assertNotEqual(data, copy)
def test_inputtypes(self):
s = 'abracadabra'
types = [list, tuple, str]
for T in types:
self.assertEqual(sorted(s), sorted(T(s)))
s = ''.join(set(s)) # unique letters only
types = [str, set, frozenset, list, tuple, dict.fromkeys]
for T in types:
self.assertEqual(sorted(s), sorted(T(s)))
def test_baddecorator(self):
data = 'The quick Brown fox Jumped over The lazy Dog'.split()
self.assertRaises(TypeError, sorted, data, None, lambda x,y: 0)
class ShutdownTest(unittest.TestCase):
def test_cleanup(self):
# Issue #19255: builtins are still available at shutdown
code = """if 1:
import builtins
import sys
class C:
def __del__(self):
print("before")
# Check that builtins still exist
len(())
print("after")
c = C()
# Make this module survive until builtins and sys are cleaned
builtins.here = sys.modules[__name__]
sys.here = sys.modules[__name__]
# Create a reference loop so that this module needs to go
# through a GC phase.
here = sys.modules[__name__]
"""
# Issue #20599: Force ASCII encoding to get a codec implemented in C,
# otherwise the codec may be unloaded before C.__del__() is called, and
# so print("before") fails because the codec cannot be used to encode
# "before" to sys.stdout.encoding. For example, on Windows,
# sys.stdout.encoding is the OEM code page and these code pages are
# implemented in Python
rc, out, err = assert_python_ok("-c", code,
PYTHONIOENCODING="ascii")
self.assertEqual(["before", "after"], out.decode().splitlines())
def load_tests(loader, tests, pattern):
from doctest import DocTestSuite
tests.addTest(DocTestSuite(builtins))
return tests
if __name__ == "__main__":
unittest.main()
|
Microvellum/Fluid-Designer
|
win64-vc/2.78/python/lib/test/test_builtin.py
|
Python
|
gpl-3.0
| 60,127
|
# -*- coding: utf-8 -*- # NOQA
from service_framework.a_plugin import RestHandler as abstract_plugin # NOQA
class Service(abstract_plugin):
def get(self):
context = self.get_basic_context()
self.render("html/about.html", context=context)
config = {"service_name": "about",
"handler": Service,
"service_type": "rest",
"service_category": "ui_module",
}
|
Romeren/Master
|
services/miscellanceous/ui_modules/about/restserver.py
|
Python
|
gpl-3.0
| 418
|
from atmospherics import Atmosphere
class Resource():
def __init__(self, name, peak_capacity, storage_capacity, obj=None):
self.name=name
self.available=0
self.previously_available=0
#maximum amount allowed, or bad things happen (fuses blow, pipes burst)
self.peak_capacity = peak_capacity
self.storage_capacity = storage_capacity
self.obj = obj #optional pointer for relevant objects (e.g. Atmosphere)
def merge(self,other):
if self.name != other.name: return None
self.available += other.available
if self.obj and other.obj:
self.obj = self.obj.merge(other.obj)
def update(self,dt):
if self.name == "Electricity":
#print self.available/dt, self.previously_available
frac = (dt/300.0)
self.previously_available *= 1-frac
self.previously_available += frac*self.available/dt
self.available = 0#1-frac
def draw(self,amt):
if self.name == "Electricity":
if self.previously_available < -0.9*self.peak_capacity: return 0
else:
if self.available < amt: return 0
self.available -= amt
return amt
def status(self):
return ' '.join([self.name,str(self.available),str(self.previously_available),str(self.storage_capacity)])
class ResourceBundle():
def __init__(self):
self.contributors = 1
self.resources = { 'Electricity' : Resource('Electricity', 10, 0)}
def merge(self, new_resource):
for (k,v) in self.resources: v.merge(new_resource[k])
self.contributors += 1
def grow(self):
"""Simple stretching of available resource space"""
for (v) in self.resources.values():
v.storage_capacity += v.storage_capacity/self.contributors
self.contributors += 1
def update(self,dt):
for k in self.resources:
self.resources[k].update(dt)
|
facepalm/bliss-station-game
|
src/module_resources.py
|
Python
|
gpl-3.0
| 2,149
|
#!/usr/bin/env python
# -*- coding: utf-8; py-indent-offset:4 -*-
###############################################################################
#
# Copyright (C) 2015 Daniel Rodriguez
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import bisect
import collections
try:
from collections import OrderedDict
except ImportError:
from ..utils.ordereddict import OrderedDict
import math
import six
from six.moves import xrange
import matplotlib.dates as mdates
import matplotlib.font_manager as mfontmgr
import matplotlib.legend as mlegend
import matplotlib.pyplot as mpyplot
import matplotlib.ticker as mticker
from .. import AutoInfoClass, MetaParams, TimeFrame
from .finance import plot_candlestick, plot_ohlc, plot_volume, plot_lineonclose
from .formatters import (MyVolFormatter, MyDateFormatter, getlocator)
from .scheme import PlotScheme
from .utils import tag_box_style
from .multicursor import MultiCursor
class PInfo(object):
def __init__(self, sch):
self.sch = sch
self.nrows = 0
self.row = 0
self.clock = None
self.x = None
self.xlen = 0
self.sharex = None
self.figs = list()
self.cursors = list()
self.daxis = OrderedDict()
self.ldaxis = list()
self.zorder = dict()
self.coloridx = collections.defaultdict(lambda: -1)
self.prop = mfontmgr.FontProperties(size=self.sch.subtxtsize)
def newfig(self, numfig):
fig = mpyplot.figure(numfig)
self.figs.append(fig)
self.daxis = OrderedDict()
self.ldaxis.append(self.daxis)
self.row = 0
self.sharex = None
return fig
def nextcolor(self, ax):
self.coloridx[ax] += 1
return self.coloridx[ax]
def color(self, ax):
return self.sch.color(self.coloridx[ax])
def zordernext(self, ax):
z = self.zorder[ax]
if self.sch.zdown:
return z * 0.9999
return z * 1.0001
def zordercur(self, ax):
return self.zorder[ax]
class Plot(six.with_metaclass(MetaParams, object)):
params = (('scheme', PlotScheme()),)
def __init__(self, **kwargs):
for pname, pvalue in kwargs.items():
setattr(self.p.scheme, pname, pvalue)
def drawtag(self, ax, x, y, facecolor, edgecolor, alpha=0.9, **kwargs):
txt = ax.text(x, y, '%.2f' % y, va='center', ha='left',
fontsize=self.pinf.sch.subtxtsize,
bbox=dict(boxstyle=tag_box_style,
facecolor=facecolor,
edgecolor=edgecolor,
alpha=alpha),
# 3.0 is the minimum default for text
zorder=self.pinf.zorder[ax] + 3.0,
**kwargs)
def plot(self, strategy, numfigs=1):
if not strategy.datas:
return
self.pinf = PInfo(self.p.scheme)
self.sortdataindicators(strategy)
self.calcrows(strategy)
slen = len(strategy)
d, m = divmod(slen, numfigs)
pranges = list()
for i in xrange(numfigs):
a = d * i
if i == (numfigs - 1):
d += m # add remainder to last stint
b = a + d
pranges.append([a, b, d])
for numfig in xrange(numfigs):
# prepare a figure
fig = self.pinf.newfig(numfig)
self.pinf.pstart, self.pinf.pend, self.pinf.psize = pranges[numfig]
self.pinf.xstart = self.pinf.pstart
self.pinf.xend = self.pinf.pend
self.pinf.clock = strategy._clock
self.pinf.xreal = strategy._clock.datetime.plot(
self.pinf.pstart, self.pinf.psize)
self.pinf.xlen = len(self.pinf.xreal)
self.pinf.x = list(xrange(self.pinf.xlen))
# Do the plotting
# Things that go always at the top (observers)
for ptop in self.dplotstop:
self.plotind(ptop, subinds=self.dplotsover[ptop])
# Create the rest on a per data basis
for data in strategy.datas:
for ind in self.dplotsup[data]:
self.plotind(
ind,
subinds=self.dplotsover[ind],
upinds=self.dplotsup[ind],
downinds=self.dplotsdown[ind])
self.plotdata(data, self.dplotsover[data])
for ind in self.dplotsdown[data]:
self.plotind(
ind,
subinds=self.dplotsover[ind],
upinds=self.dplotsup[ind],
downinds=self.dplotsdown[ind])
cursor = MultiCursor(
fig.canvas, list(self.pinf.daxis.values()),
useblit=True, horizOn=True, vertOn=True,
horizShared=True, vertShared=False,
horizMulti=True, vertMulti=False,
color='black', lw=1, ls=':')
self.pinf.cursors.append(cursor)
lastax = list(self.pinf.daxis.values())[-1]
# Date formatting for the x axis - only the last one needs it
if False:
locator = mticker.AutoLocator()
lastax.xaxis.set_major_locator(locator)
# lastax.xaxis.set_major_formatter(MyDateFormatter(self.pinf.xreal))
formatter = mdates.IndexDateFormatter(self.pinf.xreal,
fmt='%Y-%m-%d')
lastax.xaxis.set_major_formatter(formatter)
else:
self.setlocators(strategy._clock)
# Put the subplots as indicated by hspace
fig.subplots_adjust(hspace=self.pinf.sch.plotdist,
top=0.98, left=0.05, bottom=0.05, right=0.95)
# Applying fig.autofmt_xdate if the data axis is the last one
# breaks the presentation of the date labels. why?
# Applying the manual rotation with setp cures the problem
# but the labels from all axis but the last have to be hidden
if False:
fig.autofmt_xdate(bottom=0.25, rotation=0)
elif True:
for ax in self.pinf.daxis.values():
mpyplot.setp(ax.get_xticklabels(), visible=False)
# ax.autoscale_view(tight=True)
mpyplot.setp(lastax.get_xticklabels(),
visible=True,
rotation=self.pinf.sch.tickrotation)
# Things must be tight along the x axis (to fill both ends)
axtight = 'x' if not self.pinf.sch.ytight else 'both'
mpyplot.autoscale(enable=True, axis=axtight, tight=True)
def setlocators(self, data):
ax = list(self.pinf.daxis.values())[-1]
comp = getattr(data, '_compression', 1)
tframe = getattr(data, '_timeframe', TimeFrame.Days)
if tframe == TimeFrame.Years:
fmtmajor = '%Y'
fmtminor = '%Y'
fmtdata = '%Y'
elif tframe == TimeFrame.Months:
fmtmajor = '%Y'
fmtminor = '%b'
fmtdata = '%b'
elif tframe == TimeFrame.Weeks:
fmtmajor = '%b'
fmtminor = '%d'
fmtdata = '%d'
elif tframe == TimeFrame.Days:
fmtmajor = '%b'
fmtminor = '%d'
fmtdata = '%Y-%m-%d'
elif tframe == TimeFrame.Minutes:
fmtmajor = '%d %b'
fmtminor = '%H:%M'
fmtdata = '%Y-%m-%d %H:%M'
fordata = mdates.IndexDateFormatter(self.pinf.xreal, fmt=fmtdata)
for dax in self.pinf.daxis.values():
dax.fmt_xdata = fordata
locmajor = mticker.AutoLocator()
locminor = mticker.AutoMinorLocator()
ax.xaxis.set_minor_locator(locminor)
ax.xaxis.set_major_locator(locmajor)
formajor = mdates.IndexDateFormatter(self.pinf.xreal, fmt=fmtmajor)
forminor = mdates.IndexDateFormatter(self.pinf.xreal, fmt=fmtminor)
ax.xaxis.set_minor_formatter(forminor)
ax.xaxis.set_major_formatter(formajor)
def calcrows(self, strategy):
# Calculate the total number of rows
rowsmajor = self.pinf.sch.rowsmajor
rowsminor = self.pinf.sch.rowsminor
nrows = 0
# Datas and volumes
nrows += len(strategy.datas) * rowsmajor
if self.pinf.sch.volume and not self.pinf.sch.voloverlay:
nrows += len(strategy.datas) * rowsminor
# top indicators/observers
nrows += len(self.dplotstop) * rowsminor
# indicators above datas
nrows += sum(len(v) for v in self.dplotsup.values())
nrows += sum(len(v) for v in self.dplotsdown.values())
self.pinf.nrows = nrows
def newaxis(self, obj, rowspan):
ax = mpyplot.subplot2grid((self.pinf.nrows, 1), (self.pinf.row, 0),
rowspan=rowspan, sharex=self.pinf.sharex)
# update the sharex information if not available
if self.pinf.sharex is None:
self.pinf.sharex = ax
# update the row index with the taken rows
self.pinf.row += rowspan
# save the mapping indicator - axis and return
self.pinf.daxis[obj] = ax
# Activate grid in all axes if requested
ax.yaxis.tick_right()
ax.grid(self.pinf.sch.grid, which='both')
return ax
def plotind(self, ind,
subinds=None, upinds=None, downinds=None,
masterax=None):
ind._plotinit()
sch = self.p.scheme
# check subind
subinds = subinds or []
upinds = upinds or []
downinds = downinds or []
# plot subindicators on self with independent axis above
for upind in upinds:
self.plotind(upind)
# Get an axis for this plot
ax = masterax or self.newaxis(ind, rowspan=self.pinf.sch.rowsminor)
indlabel = ind.plotlabel()
for lineidx in range(ind.size()):
line = ind.lines[lineidx]
linealias = ind.lines._getlinealias(lineidx)
lineplotinfo = getattr(ind.plotlines, '_%d' % lineidx, None)
if not lineplotinfo:
lineplotinfo = getattr(ind.plotlines, linealias, None)
if not lineplotinfo:
lineplotinfo = AutoInfoClass()
if lineplotinfo._get('_plotskip', False):
continue
# Legend label only when plotting 1st line
if masterax and not ind.plotinfo.plotlinelabels:
label = indlabel * (lineidx == 0) or '_nolegend'
else:
label = lineplotinfo._get('_name', '') or linealias
# plot data
lplot = line.plotrange(self.pinf.xstart, self.pinf.xend)
if not math.isnan(lplot[-1]):
label += ' %.2f' % lplot[-1]
plotkwargs = dict()
linekwargs = lineplotinfo._getkwargs(skip_=True)
if linekwargs.get('color', None) is None:
if not lineplotinfo._get('_samecolor', False):
self.pinf.nextcolor(ax)
plotkwargs['color'] = self.pinf.color(ax)
plotkwargs.update(dict(aa=True, label=label))
plotkwargs.update(**linekwargs)
if ax in self.pinf.zorder:
plotkwargs['zorder'] = self.pinf.zordernext(ax)
pltmethod = getattr(ax, lineplotinfo._get('_method', 'plot'))
plottedline = pltmethod(self.pinf.x, lplot, **plotkwargs)
try:
plottedline = plottedline[0]
except:
# Possibly a container of artists (when plotting bars)
pass
self.pinf.zorder[ax] = plottedline.get_zorder()
if not math.isnan(lplot[-1]):
# line has valid values, plot a tag for the last value
self.drawtag(ax, len(self.pinf.xreal), lplot[-1],
facecolor='white',
edgecolor=self.pinf.color(ax))
# plot subindicators that were created on self
for subind in subinds:
self.plotind(subind, subinds=self.dplotsover[subind], masterax=ax)
if not masterax:
# adjust margin if requested ... general of particular
ymargin = ind.plotinfo._get('plotymargin', 0.0)
ymargin = max(ymargin, self.pinf.sch.yadjust)
if ymargin:
ax.margins(y=ymargin)
# Set specific or generic ticks
yticks = ind.plotinfo._get('plotyticks', [])
if not yticks:
yticks = ind.plotinfo._get('plotyhlines', [])
if yticks:
ax.set_yticks(yticks)
else:
locator = mticker.MaxNLocator(nbins=4, prune='both')
ax.yaxis.set_major_locator(locator)
# Set specific hlines if asked to
hlines = ind.plotinfo._get('plothlines', [])
if not hlines:
hlines = ind.plotinfo._get('plotyhlines', [])
for hline in hlines:
ax.axhline(hline, color=self.pinf.sch.hlinescolor,
ls=self.pinf.sch.hlinesstyle,
lw=self.pinf.sch.hlineswidth)
if self.pinf.sch.legendind and \
ind.plotinfo._get('plotlegend', True):
handles, labels = ax.get_legend_handles_labels()
# Ensure that we have something to show
if labels:
# Legend done here to ensure it includes all plots
legend = ax.legend(loc=self.pinf.sch.legendindloc,
numpoints=1, frameon=False,
shadow=False, fancybox=False,
prop=self.pinf.prop)
legend.set_title(indlabel, prop=self.pinf.prop)
# hack: if title is set. legend has a Vbox for the labels
# which has a default "center" set
legend._legend_box.align = 'left'
# plot subindicators on self with independent axis below
for downind in downinds:
self.plotind(downind)
def plotvolume(self, data, opens, highs, lows, closes, volumes, label):
if self.pinf.sch.voloverlay:
rowspan = self.pinf.sch.rowsmajor
else:
rowspan = self.pinf.sch.rowsminor
ax = self.newaxis(data.volume, rowspan=rowspan)
if self.pinf.sch.voloverlay:
volalpha = self.pinf.sch.voltrans
else:
volalpha = 1.0
maxvol = volylim = max(volumes)
if maxvol:
# Plot the volume (no matter if as overlay or standalone)
vollabel = label
volplot, = plot_volume(ax, self.pinf.x, opens, closes, volumes,
colorup=self.pinf.sch.volup,
colordown=self.pinf.sch.voldown,
alpha=volalpha, label=vollabel)
nbins = 6
prune = 'both'
if self.pinf.sch.voloverlay:
# store for a potential plot over it
nbins = int(nbins / self.pinf.sch.volscaling)
prune = None
volylim /= self.pinf.sch.volscaling
ax.set_ylim(0, volylim, auto=True)
else:
# plot a legend
handles, labels = ax.get_legend_handles_labels()
if handles:
# Legend done here to ensure it includes all plots
legend = ax.legend(loc=self.pinf.sch.legendindloc,
numpoints=1, frameon=False,
shadow=False, fancybox=False,
prop=self.pinf.prop)
locator = mticker.MaxNLocator(nbins=nbins, prune=prune)
ax.yaxis.set_major_locator(locator)
ax.yaxis.set_major_formatter(MyVolFormatter(maxvol))
if not maxvol:
ax.set_yticks([])
return None
return volplot
def setxdata(self, data):
# only if this data has a master, do something
if data.mlen:
# this data has a master, get the real length of this data
self.pinf.xlen = len(data.mlen)
# find the starting point with regards to master start: pstart
self.pinf.xstart = bisect.bisect_left(
data.mlen, self.pinf.pstart)
# find the ending point with regards to master start: pend
self.pinf.xend = bisect.bisect_right(
data.mlen, self.pinf.pend)
# extract the Xs from the subdata
self.pinf.x = data.mlen[self.pinf.xstart:self.pinf.xend]
# rebase the Xs to the start of the main data point
self.pinf.x = [x - self.pinf.pstart for x in self.pinf.x]
def plotdata(self, data, indicators):
for ind in indicators:
upinds = self.dplotsup[ind]
for upind in upinds:
self.plotind(upind,
subinds=self.dplotsover[upind],
upinds=self.dplotsup[upind],
downinds=self.dplotsdown[upind])
# set the x axis data (if needed)
self.setxdata(data)
opens = data.open.plotrange(self.pinf.xstart, self.pinf.xend)
highs = data.high.plotrange(self.pinf.xstart, self.pinf.xend)
lows = data.low.plotrange(self.pinf.xstart, self.pinf.xend)
closes = data.close.plotrange(self.pinf.xstart, self.pinf.xend)
volumes = data.volume.plotrange(self.pinf.xstart, self.pinf.xend)
vollabel = 'Volume'
if self.pinf.sch.volume and self.pinf.sch.voloverlay:
volplot = self.plotvolume(
data, opens, highs, lows, closes, volumes, vollabel)
axvol = self.pinf.daxis[data.volume]
ax = axvol.twinx()
self.pinf.daxis[data] = ax
else:
ax = self.newaxis(data, rowspan=self.pinf.sch.rowsmajor)
datalabel = ''
dataname = ''
if hasattr(data, '_name') and data._name:
datalabel += data._name
if hasattr(data, '_compression') and \
hasattr(data, '_timeframe'):
tfname = TimeFrame.getname(data._timeframe, data._compression)
datalabel += ' (%d %s)' % (data._compression, tfname)
datalabel += ' O:%.2f H:%2.f L:%.2f C:%.2f' % \
(opens[-1], highs[-1], lows[-1], closes[-1])
if self.pinf.sch.style.startswith('line'):
plotted = plot_lineonclose(
ax, self.pinf.x, closes,
color=self.pinf.sch.loc, label=datalabel)
else:
if self.pinf.sch.style.startswith('candle'):
plotted = plot_candlestick(
ax, self.pinf.x, opens, highs, lows, closes,
colorup=self.pinf.sch.barup,
colordown=self.pinf.sch.bardown,
label=datalabel)
elif self.pinf.sch.style.startswith('bar') or True:
# final default option -- should be "else"
plotted = plot_ohlc(
ax, self.pinf.x, opens, highs, lows, closes,
colorup=self.pinf.sch.barup,
colordown=self.pinf.sch.bardown,
label=datalabel)
self.pinf.zorder[ax] = plotted[0].get_zorder()
# Code to place a label at the right hand side with the last value
self.drawtag(ax, len(self.pinf.xreal), closes[-1],
facecolor='white', edgecolor=self.pinf.sch.loc)
ax.yaxis.set_major_locator(mticker.MaxNLocator(prune='both'))
# make sure "over" indicators do not change our scale
ax.set_ylim(ax.get_ylim())
if self.pinf.sch.volume:
if not self.pinf.sch.voloverlay:
self.plotvolume(
data, opens, highs, lows, closes, volumes, vollabel)
else:
# Prepare overlay scaling/pushup or manage own axis
if self.pinf.sch.volpushup:
# push up overlaid axis by lowering the bottom limit
axbot, axtop = ax.get_ylim()
axbot *= (1.0 - self.pinf.sch.volpushup)
ax.set_ylim(axbot, axtop)
for ind in indicators:
self.plotind(ind, subinds=self.dplotsover[ind], masterax=ax)
handles, labels = ax.get_legend_handles_labels()
if handles:
# put data and volume legend entries in the 1st positions
# because they are "collections" they are considered after Line2D
# for the legend entries, which is not our desire
if self.pinf.sch.volume and self.pinf.sch.voloverlay:
if volplot:
# even if volume plot was requested, there may be no volume
labels.insert(0, vollabel)
handles.insert(0, volplot)
didx = labels.index(datalabel)
labels.insert(0, labels.pop(didx))
handles.insert(0, handles.pop(didx))
# feed handles/labels to legend to get right order
legend = ax.legend(handles, labels,
loc='upper left', frameon=False, shadow=False,
fancybox=False,
prop=self.pinf.prop, numpoints=1, ncol=1)
# hack: if title is set. legend has a Vbox for the labels
# which has a default "center" set
legend._legend_box.align = 'left'
for ind in indicators:
downinds = self.dplotsdown[ind]
for downind in downinds:
self.plotind(downind,
subinds=self.dplotsover[downind],
upinds=self.dplotsup[downind],
downinds=self.dplotsdown[downind])
def show(self):
mpyplot.show()
def sortdataindicators(self, strategy):
# These lists/dictionaries hold the subplots that go above each data
self.dplotstop = list()
self.dplotsup = collections.defaultdict(list)
self.dplotsdown = collections.defaultdict(list)
self.dplotsover = collections.defaultdict(list)
# Sort observers in the different lists/dictionaries
for x in strategy.getobservers():
if not x.plotinfo.plot or x.plotinfo.plotskip:
continue
if x.plotinfo.subplot:
self.dplotstop.append(x)
else:
key = getattr(x._clock, 'owner', x._clock)
self.dplotsover[key].append(x)
# Sort indicators in the different lists/dictionaries
for x in strategy.getindicators():
if not hasattr(x, 'plotinfo'):
# no plotting support - so far LineSingle derived classes
continue
if not x.plotinfo.plot or x.plotinfo.plotskip:
continue
# support LineSeriesStub which has "owner" to point to the data
key = getattr(x._clock, 'owner', x._clock)
if getattr(x.plotinfo, 'plotforce', False):
if key not in strategy.datas:
datas = strategy.datas
while True:
if key not in strategy.datas:
key = key._clock
else:
break
if x.plotinfo.subplot:
if x.plotinfo.plotabove:
self.dplotsup[key].append(x)
else:
self.dplotsdown[key].append(x)
else:
self.dplotsover[key].append(x)
|
nicoddemus/backtrader
|
backtrader/plot/plot.py
|
Python
|
gpl-3.0
| 24,950
|
#
# Copyright (C) 2013-2018 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
Testmodule for the H5MD interface.
"""
import os
import sys
import unittest as ut
import numpy as np
import espressomd # pylint: disable=import-error
import h5py # h5py has to be imported *after* espressomd (MPI)
from espressomd.interactions import Virtual
npart = 26
class CommonTests(ut.TestCase):
"""
Class that holds common test methods.
"""
system = espressomd.System(box_l=[1.0, 1.0, 1.0])
# avoid particles to be set outside of the main box, otherwise particle
# positions are folded in the core when writing out and we cannot directly
# compare positions in the dataset and where particles were set. One would
# need to unfold the positions of the hdf5 file.
box_l = npart / 2.0
system.box_l = [box_l, box_l, box_l]
system.cell_system.skin = 0.4
system.time_step = 0.01
for i in range(npart):
system.part.add(id=i, pos=np.array([float(i),
float(i),
float(i)]),
v=np.array([1.0, 2.0, 3.0]), type=23)
if espressomd.has_features(['MASS']):
system.part[i].mass = 2.3
if espressomd.has_features(['EXTERNAL_FORCES']):
system.part[i].ext_force = [0.1, 0.2, 0.3]
vb = Virtual()
system.bonded_inter.add(vb)
for i in range(npart - 1):
system.part[i].add_bond((vb, i + 1))
system.integrator.run(steps=0)
@classmethod
def setUpClass(cls):
if os.path.isfile('test.h5'):
os.remove('test.h5')
cls.py_file = cls.py_pos = cls.py_vel = cls.py_f = cls.py_id = cls.py_img = None
def test_metadata(self):
"""Test if the H5MD metadata has been written properly."""
self.assertEqual(self.py_file['h5md'].attrs['version'][0], 1)
self.assertEqual(self.py_file['h5md'].attrs['version'][1], 1)
self.assertIn('creator', self.py_file['h5md'])
self.assertIn('name', self.py_file['h5md/creator'].attrs)
self.assertIn('version', self.py_file['h5md/creator'].attrs)
self.assertEqual(
self.py_file['h5md/creator'].attrs['name'][:], b'ESPResSo')
self.assertIn('author', self.py_file['h5md'])
self.assertIn('name', self.py_file['h5md/author'].attrs)
def test_pos(self):
"""Test if positions have been written properly."""
self.assertTrue(
np.allclose(
np.array(
[
(float(i) %
self.box_l, float(i) %
self.box_l, float(i) %
self.box_l) for i in range(npart)]), np.array(
[
x for (
_, x) in sorted(
zip(
self.py_id, self.py_pos))])))
def test_img(self):
"""Test if images have been written properly."""
images = np.append(np.zeros((int(npart / 2), 3)),
np.ones((int(npart / 2), 3)))
images = images.reshape(npart, 3)
self.assertTrue((np.allclose(np.array(
[x for (_, x) in sorted(zip(self.py_id, self.py_img))]), images)))
def test_vel(self):
"""Test if velocities have been written properly."""
self.assertTrue(np.allclose(
np.array([[1.0, 2.0, 3.0] for _ in range(npart)]),
np.array([x for (_, x) in sorted(zip(self.py_id, self.py_vel))])),
msg="Velocities not written correctly by H5md!")
@ut.skipIf(
not espressomd.has_features(
['EXTERNAL_FORCES']),
"EXTERNAL_FORCES not compiled in, can not check writing forces.")
def test_f(self):
"""Test if forces have been written properly."""
self.assertTrue(np.allclose(
np.array([[0.1, 0.2, 0.3] for _ in range(npart)]),
np.array([x for (_, x) in sorted(zip(self.py_id, self.py_f))])),
msg="Forces not written correctly by H5md!")
def test_bonds(self):
"""Test if bonds have been written properly."""
self.assertEqual(len(self.py_bonds), npart - 1)
for i in range(npart - 1):
bond = [x for x in self.py_bonds if x[0] == i][0]
self.assertEqual(bond[0], i + 0)
self.assertEqual(bond[1], i + 1)
@ut.skipIf(not espressomd.has_features(['H5MD']),
"H5MD not compiled in, can not check functionality.")
class H5mdTestOrdered(CommonTests):
"""
Test the core implementation of writing hdf5 files if written ordered.
"""
@classmethod
def setUpClass(cls):
write_ordered = True
from espressomd.io.writer import h5md # pylint: disable=import-error
h5 = h5md.H5md(
filename="test.h5",
write_pos=True,
write_vel=True,
write_force=True,
write_species=True,
write_mass=True,
write_ordered=write_ordered)
h5.write()
h5.flush()
h5.close()
cls.py_file = h5py.File("test.h5", 'r')
cls.py_pos = cls.py_file['particles/atoms/position/value'][0]
cls.py_img = cls.py_file['particles/atoms/image/value'][0]
cls.py_vel = cls.py_file['particles/atoms/velocity/value'][0]
cls.py_f = cls.py_file['particles/atoms/force/value'][0]
cls.py_id = cls.py_file['particles/atoms/id/value'][0]
cls.py_bonds = cls.py_file['connectivity/atoms']
@classmethod
def tearDownClass(cls):
os.remove("test.h5")
def test_ids(self):
"""Test if ids have been written properly."""
self.assertTrue(np.allclose(
np.array(range(npart)),
self.py_id), msg="ids correctly ordered and written by H5md!")
@ut.skipIf(not espressomd.has_features(['H5MD']),
"H5MD not compiled in, can not check functionality.")
class H5mdTestUnordered(CommonTests):
"""
Test the core implementation of writing hdf5 files if written un-ordered.
"""
@classmethod
def setUpClass(cls):
write_ordered = False
from espressomd.io.writer import h5md # pylint: disable=import-error
h5 = h5md.H5md(
filename="test.h5",
write_pos=True,
write_vel=True,
write_force=True,
write_species=True,
write_mass=True,
write_ordered=write_ordered)
h5.write()
h5.flush()
h5.close()
cls.py_file = h5py.File("test.h5", 'r')
cls.py_pos = cls.py_file['particles/atoms/position/value'][0]
cls.py_img = cls.py_file['particles/atoms/image/value'][0]
cls.py_vel = cls.py_file['particles/atoms/velocity/value'][0]
cls.py_f = cls.py_file['particles/atoms/force/value'][0]
cls.py_id = cls.py_file['particles/atoms/id/value'][0]
cls.py_bonds = cls.py_file['connectivity/atoms']
@classmethod
def tearDownClass(cls):
os.remove("test.h5")
if __name__ == "__main__":
suite = ut.TestSuite()
suite.addTests(ut.TestLoader().loadTestsFromTestCase(H5mdTestUnordered))
suite.addTests(ut.TestLoader().loadTestsFromTestCase(H5mdTestOrdered))
result = ut.TextTestRunner(verbosity=4).run(suite)
sys.exit(not result.wasSuccessful())
|
hmenke/espresso
|
testsuite/python/h5md.py
|
Python
|
gpl-3.0
| 8,047
|
# Copyright 2009 - 2011 Burak Sezer <purak@hadronproject.org>
#
# This file is part of lpms
#
# lpms is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# lpms is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with lpms. If not, see <http://www.gnu.org/licenses/>.
import lpms
from lpms import out
from lpms import constants as cst
# FIXME: This module must be rewritten. Object Oriended is nice.
class SyncronizeRepo(object):
def __init__(self):
self.data = None
self.remote = None
self._type = None
def read_conf_file(self):
with open(cst.repo_conf) as data:
self.data = data.read().split("\n")
def run(self, repo):
keyword = "["+repo+"]"
# import repo.conf
self.read_conf_file()
if keyword in self.data:
first = self.data.index(keyword)
for line in self.data[first+1:]:
if line.startswith("["):
continue
if self._type is None and line.startswith("type"):
self._type = line.split("@")[1].strip()
if self._type == 'local':
return
elif self.remote is None and line.startswith("remote"):
self.remote = line.split("@")[1].strip()
if self._type == "git":
from lpms.syncers import git as syncer
lpms.logger.info("synchronizing %s from %s" % (repo, self.remote))
out.notify("synchronizing %s from %s" % (out.color(repo, "green"), self.remote))
syncer.run(repo, self.remote)
|
hadronproject/lpms
|
lpms/operations/sync.py
|
Python
|
gpl-3.0
| 2,054
|
# coding=utf-8
"""Provider code for BTN."""
from __future__ import unicode_literals
import logging
import socket
import time
import jsonrpclib
from medusa import (
app,
scene_exceptions,
tv,
)
from medusa.common import cpu_presets
from medusa.helper.common import episode_num
from medusa.indexers.indexer_config import INDEXER_TVDBV2
from medusa.logger.adapters.style import BraceAdapter
from medusa.providers.torrent.torrent_provider import TorrentProvider
from six import itervalues
log = BraceAdapter(logging.getLogger(__name__))
log.logger.addHandler(logging.NullHandler())
# API docs:
# https://web.archive.org/web/20160316073644/http://btnapps.net/docs.php
# https://web.archive.org/web/20160425205926/http://btnapps.net/apigen/class-btnapi.html
class BTNProvider(TorrentProvider):
"""BTN Torrent provider."""
def __init__(self):
"""Initialize the class."""
super(BTNProvider, self).__init__('BTN')
# Credentials
self.api_key = None
# URLs
self.url = 'https://broadcasthe.net'
self.urls = {
'base_url': 'https://api.broadcasthe.net',
}
# Proper Strings
self.proper_strings = []
# Miscellaneous Options
self.supports_absolute_numbering = True
# Torrent Stats
self.minseed = None
self.minleech = None
# Cache
self.cache = tv.Cache(self, min_time=10) # Only poll BTN every 15 minutes max
def search(self, search_strings, age=0, ep_obj=None, **kwargs):
"""
Search a provider and parse the results.
:param search_strings: A dict with {mode: search value}
:param age: Not used
:param ep_obj: Not used
:returns: A list of search results (structure)
"""
results = []
if not self._check_auth():
return results
# Search Params
search_params = {
'age': '<=10800', # Results from the past 3 hours
}
for mode in search_strings:
log.debug('Search mode: {0}', mode)
if mode != 'RSS':
searches = self._search_params(ep_obj, mode)
else:
searches = [search_params]
for search_params in searches:
if mode != 'RSS':
log.debug('Search string: {search}',
{'search': search_params})
response = self._api_call(search_params)
if not response or response.get('results') == '0':
log.debug('No data returned from provider')
continue
results += self.parse(response.get('torrents', {}), mode)
return results
def parse(self, data, mode):
"""
Parse search results for items.
:param data: The raw response from a search
:param mode: The current mode used to search, e.g. RSS
:return: A list of items found
"""
items = []
torrent_rows = itervalues(data)
for row in torrent_rows:
title, download_url = self._process_title_and_url(row)
if not all([title, download_url]):
continue
seeders = row.get('Seeders', 1)
leechers = row.get('Leechers', 0)
# Filter unseeded torrent
if seeders < min(self.minseed, 1):
log.debug("Discarding torrent because it doesn't meet the"
" minimum seeders: {0}. Seeders: {1}",
title, seeders)
continue
size = row.get('Size') or -1
pubdate_raw = row.get('Time')
pubdate = self.parse_pubdate(pubdate_raw, fromtimestamp=True)
item = {
'title': title,
'link': download_url,
'size': size,
'seeders': seeders,
'leechers': leechers,
'pubdate': pubdate,
}
log.debug(
'Found result: {title} with {x} seeders'
' and {y} leechers',
{'title': title, 'x': seeders, 'y': leechers}
)
items.append(item)
return items
def _check_auth(self):
if not self.api_key:
log.warning('Missing API key. Check your settings')
return False
return True
@staticmethod
def _process_title_and_url(parsed_json):
"""Create the title base on properties.
Try to get the release name, if it doesn't exist make one up
from the properties obtained.
"""
title = parsed_json.get('ReleaseName')
if not title:
# If we don't have a release name we need to get creative
title = ''
if 'Series' in parsed_json:
title += parsed_json['Series']
if 'GroupName' in parsed_json:
title += '.' + parsed_json['GroupName']
if 'Resolution' in parsed_json:
title += '.' + parsed_json['Resolution']
if 'Source' in parsed_json:
title += '.' + parsed_json['Source']
if 'Codec' in parsed_json:
title += '.' + parsed_json['Codec']
if title:
title = title.replace(' ', '.')
url = parsed_json.get('DownloadURL').replace('\\/', '/')
return title, url
def _search_params(self, ep_obj, mode, season_numbering=None):
if not ep_obj:
return []
searches = []
season = 'Season' if mode == 'Season' else ''
air_by_date = ep_obj.series.air_by_date
sports = ep_obj.series.sports
if not season_numbering and (air_by_date or sports):
date_fmt = '%Y' if season else '%Y.%m.%d'
search_name = ep_obj.airdate.strftime(date_fmt)
else:
search_name = '{type} {number}'.format(
type=season,
number=ep_obj.season if season else episode_num(
ep_obj.season, ep_obj.episode
),
).strip()
params = {
'category': season or 'Episode',
'name': search_name,
}
# Search
if ep_obj.series.indexer == INDEXER_TVDBV2:
params['tvdb'] = self._get_tvdb_id()
searches.append(params)
else:
name_exceptions = scene_exceptions.get_scene_exceptions(ep_obj.series)
name_exceptions.add(ep_obj.series.name)
for name in name_exceptions:
# Search by name if we don't have tvdb id
params['series'] = name
searches.append(params)
# extend air by date searches to include season numbering
if air_by_date and not season_numbering:
searches.extend(
self._search_params(ep_obj, mode, season_numbering=True)
)
return searches
def _api_call(self, params=None, results_per_page=300, offset=0):
"""Call provider API."""
parsed_json = {}
try:
server = jsonrpclib.Server(self.urls['base_url'])
parsed_json = server.getTorrents(
self.api_key,
params or {},
int(results_per_page),
int(offset)
)
time.sleep(cpu_presets[app.CPU_PRESET])
except jsonrpclib.jsonrpc.ProtocolError as error:
message = error.args[0]
if message == (-32001, 'Invalid API Key'):
log.warning('Incorrect authentication credentials.')
elif message == (-32002, 'Call Limit Exceeded'):
log.warning('You have exceeded the limit of 150 calls per hour.')
elif isinstance(message, tuple) and message[1] in (524, ):
log.warning('Provider is currently unavailable. Error: {code} {text}',
{'code': message[1], 'text': message[2]})
else:
log.error('JSON-RPC protocol error while accessing provider. Error: {msg!r}',
{'msg': message})
except (socket.error, ValueError) as error:
log.warning('Error while accessing provider. Error: {msg!r}', {'msg': error})
return parsed_json
provider = BTNProvider()
|
fernandog/Medusa
|
medusa/providers/torrent/json/btn.py
|
Python
|
gpl-3.0
| 8,411
|
from leap.bitmask.config.providerconfig import ProviderConfig
from leap.bitmask.crypto.srpauth import SRPAuth
from leap.soledad.client import Soledad
from twisted.internet import reactor
import logging
logging.basicConfig(level=logging.DEBUG)
# EDIT THIS --------------------------------------------
user = u"USERNAME"
uuid = u"USERUUID"
_pass = u"USERPASS"
server_url = "https://soledad.server.example.org:2323"
# EDIT THIS --------------------------------------------
secrets_path = "/tmp/%s.secrets" % uuid
local_db_path = "/tmp/%s.soledad" % uuid
cert_file = "/tmp/cacert.pem"
provider_config = '/tmp/cdev.json'
provider = ProviderConfig()
provider.load(provider_config)
soledad = None
def printStuff(r):
print r
def printErr(err):
logging.exception(err.value)
def init_soledad(_):
token = srpauth.get_token()
print "token", token
global soledad
soledad = Soledad(uuid, _pass, secrets_path, local_db_path,
server_url, cert_file,
auth_token=token)
def getall(_):
d = soledad.get_all_docs()
return d
d1 = soledad.create_doc({"test": 42})
d1.addCallback(getall)
d1.addCallbacks(printStuff, printErr)
d2 = soledad.sync()
d2.addCallbacks(printStuff, printErr)
d2.addBoth(lambda r: reactor.stop())
srpauth = SRPAuth(provider)
d = srpauth.authenticate(user, _pass)
d.addCallbacks(init_soledad, printErr)
reactor.run()
|
leapcode/soledad
|
docs/client_examples/soledad_sync.py
|
Python
|
gpl-3.0
| 1,447
|
import os
import sys
import copy
import time
import datetime
import ctypes
from multiprocessing import Queue
from threading import Event, Lock, Thread
# Auto-detect whether we're running 32 or 64 bit, and decide which DLL to load.
if sys.maxsize > 2**32:
dll_name = "CEtAPIx64.dll"
print("Python Alea: detected 64-bit application; will use '{}'".format( \
dll_name))
else:
dll_name = "CEtAPI.dll"
print("Python Alea: detected 32-bit application; will use '{}'".format( \
dll_name))
# Auto-detect the current directory, and the place where CEtAPI.dll should be.
dir_path = os.path.dirname(os.path.abspath(__file__))
dll_path = os.path.join(dir_path, dll_name)
# Check whether the DLL exists.
if not os.path.isfile(dll_path):
print("WARNING: Could not find CEtAPI.dll in its expected location: '{}'".format(dll_path))
# Load the DLL.
# Available functions in C-wrapped API:
# etapi.Open
# etapi.Close
# etapi.Version
# etapi.IsOpen
# etapi.SetDataFile
# etapi.WriteMessageToDataFile
# etapi.StartRecording
# etapi.StopRecording
# etapi.PerformCalibration
# etapi.WaitForCalibrationResult
# etapi.ShowStatusWindow
# etapi.HideStatusWindow
# etapi.DataStreaming
# etapi.WaitForData
# etapi.ClearDataBuffer
# etapi.ExitServer
# New in IntelliGaze_SDK_v2.0.7370 (supported from PyAlea 0.0.9):
# etapi.RecordData
# etapi.SendTrigger
etapi = None
try:
print("Python Alea: loading '{}'".format(dll_name))
etapi = ctypes.windll.LoadLibrary(dll_path)
except:
print("WARNING: Failed to load '{}'! Alea functionality not available!".format(dll_name))
# Define what errors mean.
EtApiError = { \
0: "Function success", \
-1: "No acknowledge received", \
-2: "Function argument error", \
-3: "API was not opened", \
-4: "No response from eye tracking server", \
-5: "IP or port failure (could not connect to IP or port)", \
-6: "Failed to send command to eye tracker", \
-7: "Server could not successfully process the given command", \
-10: "Could not create Mutex and Events in Open", \
-100: "Could not connect to EtAPI", \
}
def check_result(et_api_error):
if et_api_error == 0:
return True
else:
return False
# Define general data struct used in the EtAPI.
class CAleaData(ctypes.Structure):
_fields_ = [ \
# RAW DATA
# Timestamp for this data.
("rawDataTimeStamp", ctypes.c_long), \
# Weighted-average of both eyes' coordinates.
("intelliGazeX", ctypes.c_double), \
("intelliGazeY", ctypes.c_double), \
# LEFT EYE
# Gaze position is measured in pixels, and relates to the screen.
("gazePositionXLeftEye", ctypes.c_double), \
("gazePositionYLeftEye", ctypes.c_double), \
# Confidence is in the range [0.0, 1.0]
("gazePositionConfidenceLeftEye", ctypes.c_double), \
# Pupil size is measured in pixels, and a circular pupil is assumed.
("pupilDiameterLeftEye", ctypes.c_double), \
# RIGHT EYE
# Gaze position is measured in pixels, and relates to the screen.
("gazePositionXRightEye", ctypes.c_double), \
("gazePositionYRightEye", ctypes.c_double), \
# Confidence is in the range [0.0, 1.0]
("gazePositionConfidenceRightEye", ctypes.c_double), \
# Pupil size is measured in pixels, and a circular pupil is assumed.
("pupilDiameterRightEye", ctypes.c_double), \
# EVENT DATA
# Event ID.
("eventID", ctypes.c_int), \
# Timestamp for this data.
("eventDataTimeStamp", ctypes.c_long), \
# The duration of the event.
("duration", ctypes.c_long), \
# The position is measured in pixels (screen coordinates).
("positionX", ctypes.c_double), \
("positionY", ctypes.c_double), \
# The horizontal and vertical dispersion.
("dispersionX", ctypes.c_double), \
("dispersionY", ctypes.c_double), \
# Confidence is in the range [0.0, 1.0]
("confidence", ctypes.c_double), \
]
# Define C structs.
class CRawEye(ctypes.Structure):
_fields_ = [ \
# Position parameters are in millimeters relative to the camera.
("eyeballPosX", ctypes.c_double), \
("eyeballPosY", ctypes.c_double), \
("eyeballPosZ", ctypes.c_double), \
# Confidence is in the range [0.0, 1.0]
("eyeballConfidence", ctypes.c_double), \
# Gaze vectors are normalised components.
("gazeVectorX", ctypes.c_double), \
("gazeVectorY", ctypes.c_double), \
("gazeVectorZ", ctypes.c_double), \
# Confidence is in the range [0.0, 1.0]
("gazeVectorConfidence", ctypes.c_double), \
# Pupil size is measured in pixels, and a circular pupil is assumed.
("pupilDiameter", ctypes.c_double), \
# Gaze position is measured in pixels, and relates to the screen.
("gazePositionX", ctypes.c_double), \
("gazePositionY", ctypes.c_double), \
# Confidence is in the range [0.0, 1.0]
("gazePositionConfidence", ctypes.c_double), \
]
class CRawHead(ctypes.Structure):
_fields_ = [ \
# Head position is measured in millimeters, relative to the camera.
("headPosX", ctypes.c_double), \
("headPosY", ctypes.c_double), \
("headPosZ", ctypes.c_double), \
# Head orientation is measured in degrees.
("headYaw", ctypes.c_double), \
("headPitch", ctypes.c_double), \
("headRoll", ctypes.c_double), \
# Confidence is in the range [0.0, 1.0]
("headPoseConfidence", ctypes.c_double), \
# Head translation speed is measured in millimeter per second.
("headTranslationSpeedX", ctypes.c_double), \
("headTranslationSpeedY", ctypes.c_double), \
("headTranslationSpeedZ", ctypes.c_double), \
# Head rotation speed is measured in degrees per second.
("headRotationSpeedX", ctypes.c_double), \
("headRotationSpeedY", ctypes.c_double), \
("headRotationSpeedZ", ctypes.c_double), \
# Confidence is in the range [0.0, 1.0]
("headSpeedConfidence", ctypes.c_double), \
]
class CRawData(ctypes.Structure):
_fields_ = [ \
# Timestamp for this data.
("timeStamp", ctypes.c_int), \
# Independent left and right eye data.
("leftEye", CRawEye), \
("rightEye", CRawEye), \
# Head data.
("head", CRawHead), \
# Weighted-average of both eyes' coordinates.
("intelliGazeX", ctypes.c_double), \
("intelliGazeY", ctypes.c_double), \
]
class CFixation(ctypes.Structure):
_fields_ = [ \
# The timestamp marks the fixation start.
("timeStamp", ctypes.c_int), \
# The position is measured in pixels (screen coordinates).
("positionX", ctypes.c_double), \
("positionY", ctypes.c_double), \
# The duration of the fixation.
("duration", ctypes.c_double), \
# The horizontal and vertical dispersion.
("dispersionX", ctypes.c_double), \
("dispersionY", ctypes.c_double), \
# Confidence is in the range [0.0, 1.0]
("Confidence", ctypes.c_double), \
]
class CSaccade(ctypes.Structure):
_fields_ = [ \
# The timestamp marks the saccade start.
("timeStamp", ctypes.c_int), \
# The duration of the saccade.
("duration", ctypes.c_double), \
]
class CBlink(ctypes.Structure):
_fields_ = [ \
# The timestamp marks the blink start.
("timeStamp", ctypes.c_int), \
# The position is measured in pixels (screen coordinates), and
# reflects the last fixation coordinates.
("positionX", ctypes.c_double), \
("positionY", ctypes.c_double), \
# The duration of the blink.
("duration", ctypes.c_double), \
]
class CBlickfang(ctypes.Structure):
_fields_ = [ \
# Unique ID of this blickfang
("id", ctypes.c_int), \
# Coordinates of the upper left of the blickfang
("x", ctypes.c_int), \
("y", ctypes.c_int), \
# Width and height of the blickfang.
("w", ctypes.c_int), \
("h", ctypes.c_int), \
]
class CEyeGesture(ctypes.Structure):
_fields_ = [ \
# c_char_p is used for pointers to multi-character strings.
("Action", ctypes.c_char_p), \
]
class CServerCalibrationStatus(ctypes.Structure):
_fields_ = [ \
# The number of the point that is currently being calibrated (1-16)
("currentPoint", ctypes.c_int), \
# The position on screen (in pixels) of the current point.
("posX", ctypes.c_int), \
("posY", ctypes.c_int), \
# If True, the eye tracker is currently detecting a fixation. Usually
# this means you can start reducing the size of the fixation target,
# which signals that the point is currently being calibrated and might
# make the fixation closer to the target's centre.
("fixation", ctypes.c_bool), \
# If True, the eye tracker is about to skip the current point. Usually
# this is an indication that the target should be made obvious to the
# particiant, for example by flashing or enlarging it.
("acceptWarning", ctypes.c_bool), \
]
# Define a class that wraps the API in a user-friendly way.
class AleaTracker:
"""Python class for talking to Alea Technologies eye trackers"""
def __init__(self, app_key, file_path="alea_default.csv", target_ip=None, \
target_port=None, listen_ip=None, listen_port=None, alea_logging=True, \
debug=False):
"""
desc:
Initialises the API, opens the socket connection, establishes a
link to the hardware. Make sure that no firewall is blocking the
port. This function will use the default values for IP addresses
and ports, unless all keyword arguments are specified. This also
sets a callback function for raw data.
arguments:
app_key:
desc:
Application-specific key to register the application, ask
alea technologies for your specific application key.
type: str
keywords:
file_path:
desc:
Full path to where the data should be stored. (Default =
"alea_default.csv")
type: str
target_ip:
desc:
IP address of the eye tracker. (Default = "127.0.0.1")
type: str
target_port:
desc:
Target port of the server socket link. (Default = 27412)
type: int
listen_ip:
desc:
IP address of the client application. (Default =
"127.0.0.1")
type: str
listen_port:
desc:
Listen port of the client socket link. (Default = 27413)
type: int
debug:
desc:
In DEBUG mode, some info will be written to a text file.
(Default = False)
type: bool
"""
# Open a new debug file if required.
if debug:
self._debug = True
self._debug_file_name = "pygaze_alea_debug_{}.txt".format( \
time.strftime("%Y-%m-%d_%H-%M-%S"))
with open(self._debug_file_name, "w") as f:
f.write("time\tmessage")
self._debug_file_lock = Lock()
else:
self._debug = False
self._debug_file_name = None
self._debug_file_lock = None
# Initialise a new Alea API instance.
if self._debug:
self._debug_log("Initialising AleaAPI instance")
self.api = AleaAPI()
# Open the connection to the API.
if self._debug:
self._debug_log("Alea connection: opening with target IP={} port={}, listen IP={} port={}".format( \
target_ip, target_port, listen_ip, listen_port))
self.api.Open(app_key, targetIP=target_ip, targetPort=target_port, \
listenIP=listen_ip, listenPort=listen_port)
# Set an event that signals we are currently connected. This event
# will be used to signal to Threads that they should stop what they
# are doing.
self._connected = Event()
self._connected.set()
# Get the version and device number, which are formatted
# "%d.%d.%d.%d" % (major, minor, build, device)
version = self.api.Version()
major, minor, build, device = version.split(".")
self.api_version = "{}.{}.{}".format(major, minor, build)
self.device = "device_code_{}".format(device)
if device == "0":
self.device = "IG30"
elif device == "1":
self.device = "IG15"
# Print a message to the terminal.
print("Successfully connected to Alea API, version={}, device={}".format( \
self.api_version, self.device))
if self._debug:
self._debug_log("Successfully connected to Alea API, version={}, device={}".format( \
self.api_version, self.device))
# LOGGING
# Set the alea_logging property.
self._alea_logging = alea_logging
# Use the Alea API functions to log.
if self._alea_logging:
pass
# Use the PyAlea functions to log.
else:
# Parse the file path to find out what separator to use.
dir_name = os.path.dirname(file_path)
file_name = os.path.basename(file_path)
name, ext = os.path.splitext(file_name)
# If no file extension was included, default to TSV.
if ext == "":
ext = ".tsv"
# Choose a separator depending on the file extension.
if ext == ".csv":
self._sep = ","
elif ext == ".tsv":
self._sep = "\t"
else:
self._sep = "\t"
# Construct the data file name.
self._data_file_path = os.path.join(dir_name, name+ext)
# Define the log-able variables.
self._log_vars = [ \
"rawDataTimeStamp", \
"intelliGazeX", \
"intelliGazeY", \
"gazePositionXLeftEye", \
"gazePositionYLeftEye", \
"gazePositionConfidenceLeftEye", \
"pupilDiameterLeftEye", \
"gazePositionXRightEye", \
"gazePositionYRightEye", \
"gazePositionConfidenceRightEye", \
"pupilDiameterRightEye", \
"eventID", \
"eventDataTimeStamp", \
"duration", \
"positionX", \
"positionY", \
"dispersionX", \
"dispersionY", \
"confidence", \
]
# Open a new log file.
if self._debug:
self._debug_log("Opening new log file '{}'".format( \
self._log_file))
self._log_file = open(self._data_file_path, "w")
# Write a header to the log.
header = ["TYPE"]
header.extend(self._log_vars)
self._log_file.write(self._sep.join(map(str, header)))
# Create a lock to prevent simultaneous access to the log file.
self._log_file_lock = Lock()
# Each log will be counted, and every N logs the file will be
# flushed. (This writes the data from the buffer to RAM and then
# to drive.)
self._log_counter = 0
self._log_consolidation_freq = 60
# LOGGING THREAD
# Initialise a new Queue to push samples through.
self._logging_queue = Queue()
# Set an event that signals when DATA IS BEING logged. This is set
# by the logging Thread to signal that the Queue is empty.
self._logging_queue_empty = Event()
self._logging_queue_empty.set()
# Set an event to signal when data SHOULD BE logged. This is set
# to signal to the logging Thread that it should log or not.
self._recording = Event()
self._recording.clear()
# Initialise the logging Thread, but only if we're using the PyAlea
# way of logging.
if not self._alea_logging:
self._logging_thread = Thread( \
target=self._log_samples,
name='PyGaze_AleaTracker_logging', \
args=[])
# STREAMING THREAD
# Create a placeholder for the most recent sample.
self._recent_sample = CAleaData()
# Create a Lock to prevent simultaneous access to the most recent
# sample by the streaming thread and the sample function.
self._recent_sample_lock = Lock()
# Initialise the streaming Thread.
self._streaming_thread = Thread( \
target=self._stream_samples,
name='PyGaze_AleaTracker_streaming', \
args=[])
# SAMPLE STREAMING
# Tell the API to start streaming samples from the tracker.
if self._debug:
self._debug_log("DataStreaming: starting")
self.api.DataStreaming(True)
# Start the logging Thread.
if not self._alea_logging:
if self._debug:
self._debug_log("Logging Thread: starting")
self._logging_thread.start()
# Start the streaming Thread.
if self._debug:
self._debug_log("Streaming Thread: starting")
self._streaming_thread.start()
def _debug_log(self, message):
if self._debug:
self._debug_file_lock.acquire()
with open(self._debug_file_name, "a") as f:
f.write("\n{}\t{}".format(datetime.datetime.now().strftime( \
"%Y-%m-%d_%H:%M:%S.%f")[:-3], message))
self._debug_file_lock.release()
def _flush_log_file(self):
# Wait until the log file lock is released.
self._log_file_lock.acquire()
# Internal buffer to RAM.
self._log_file.flush()
# RAM to disk.
os.fsync(self._log_file.fileno())
# Release the log file lock.
self._log_file_lock.release()
def _log_samples(self):
while self._connected.is_set():
# Check if the sample Queue is empty.
if self._logging_queue.empty():
# Signal to other Threads that the logging Queue is empty.
if not self._logging_queue_empty.is_set():
self._logging_queue_empty.set()
# Process samples from the Queue.
else:
# Signal to other Threads that the Queue isn't empty.
if self._logging_queue_empty.is_set():
self._logging_queue_empty.clear()
# Get the next object from the Queue.
sample = self._logging_queue.get()
# Log the message string and/or the sample.
if type(sample) in [tuple, list]:
self._write_tuple(sample)
elif type(sample) == CAleaData:
self._write_sample(sample)
else:
print("WARNING: Unrecognised object in log queue: '{}'".format( \
sample))
# Increment the log counter.
self._log_counter += 1
# Check if the log file needs to be consolidated.
if self._log_counter % self._log_consolidation_freq == 0:
# Consolidate the text file on the harddrive.
self._flush_log_file()
def _stream_samples(self):
while self._connected.is_set():
# Wait for the next sample, or until 100 milliseconds have passed.
sample = self.api.WaitForData(100)
# Check if there wasn't a timeout.
if sample is not None:
if self._debug:
self._debug_log("WaitForData: sample obtained with timestamp {}".format( \
sample.rawDataTimeStamp))
# Update the most recent sample.
self._recent_sample_lock.acquire()
self._recent_sample = copy.deepcopy(sample)
self._recent_sample_lock.release()
# Add the sample to the Queue, but only during recording.
if (not self._alea_logging) and self._recording.is_set():
self._logging_queue.put(sample)
else:
if self._debug:
self._debug_log("WaitForData: timeout")
def _write_sample(self, sample):
# Construct a list with the sample data.
line = ["DAT"]
for var in self._log_vars:
line.append(sample.__getattribute__(var))
# Log the sample to the log file.
self._log_file_lock.acquire()
self._log_file.write("\n" + self._sep.join(map(str, line)))
self._log_file_lock.release()
def _write_tuple(self, tup):
# Construct a list values that need to be logged.
line = []
# Add the values that need to be logged. Usually this will be ("MSG",
# timestamp, message).
line.extend(tup)
# Pad the list so that it will be of equal length to the sample
# lines, which makes it easier to be read into a spreadsheet editor
# and by some read_csv functions.
line.extend([""] * (len(self._log_vars) - len(line) - 1))
# Log the line to the log file.
self._log_file_lock.acquire()
self._log_file.write("\n" + self._sep.join(map(str, line)))
self._log_file_lock.release()
def calibrate(self, n_points=9, location=0, randomise=True, \
randomize=None, slow=False, audio=True, eye=0, \
calibration_improvement=False, skip_bad_points=False, \
automatic=True, bgc=(127,127,127), fgc=(0,0,0), image=""):
"""
desc:
Performs an eye-tracker-controlled calibration: the tracker will
autonomously run through the calibration process, uncluding the
displaying of calibration points. The CalibrationDoneDelegate and
ResultCalibrationExCB callbacks will be called when the
calibration is finished or when an error occurs.
keywords:
n_points:
desc:
Number of points used in the calibration. Choose from 1, 5,
9, or 16. (Default = 9)
type: int
location:
desc:
Indication of where the calibration points should be
presented. Choose from 0 (Full, outer points are 5% off
the monitor edge), 1 (Center, outer points are 20% off
the monitor edge), 2 (Bottom, points are in the lower half
of the monitor), 3 (Horizontal, points are located in a
horizontal line), and 4 (Vertical, points are located in
a vertical line). (Default = 0)
type: int
randomise:
desc:
Set to True to allow the tracker to randomise the order in
which calibration points are shown. Some experienced users
have a tendency to anticipate where points will be shown,
and to produce a systematic calibration error by moving
their eyes to the next point too quickly. Shuffling the
points prevents this. (Default = True)
type: bool
randomize:
desc:
Same a randomise, but in US spelling. (Default = None)
type: bool
slow:
desc:
Set to True to allow the tracker to show big and slow
calibration targets. (Default = False)
type: bool
audio:
desc:
Set to True to allow the tracker to play a sound when the
point jumps to a new position. (Default = True)
type: bool
eye:
desc:
Determines what eyes to calibrate and what eyes to track.
Choose from 0 (calibrate both eyes), 1 (calibrate the left
eye and track both eyes, "right glass eye"), 2 (calibrate
the right eye and track both eyes, "left glass eye"), 3
(calibrate and track only the left eye, "right pirate
eye"), or 4 (calibrate and track only the right eye, "left
pirate eye"). (Default = 0)
type: int
calibration_improvement:
desc:
Set to True if outliers or skipped points from a previous
calibrations should be re-calibrated. Can only be done
when a previous calibration returned with an "Improvement"
suggestion! (Default = False)
type: bool
skip_bad_points:
desc:
When set to True, IntelliGaze will not get stuck at
uncalibratable points. It will skip them, and try to
complete the calibration without them. (Default = False)
type: bool
automatic:
desc:
Set to True to allow the tracker to detect fixations and
accept points automatically. (Default = True)
type: bool
bgc:
desc:
RGB value of the background colour. This should have a
similar brightness to the experiment or application that
this calibration will be used with. The format is a tuple
with 3 ints in the range 0-255, representing red, green,
and blue. For example, (0,0,0) is black, and (255,255,255)
is white. (Default = (127,127,127))
type: int
fgc:
desc:
RGB value of the calibration point colour. This should
have a good contrast to the background. The format is a
tuple with 3 ints in the range 0-255, representing red,
green, and blue. For example, (255,0,0) is pure red.
(Default = (0,0,0) )
type: tuple
image:
desc:
Leave empty for the default circle, or provide a path to
an image file to use that image as calibration target. Use
the string "ANIMATION:PARROT" to calibrate with an
animation. (Default = "")
type: str
"""
# Check inputs.
if n_points not in [1, 5, 9, 16]:
# Close the connection and raise an Exception.
self.api.Close()
raise Exception("User requested {} points, but only 1, 5, 9, or 16 are allowed.".format( \
n_points))
if type(location) == int:
if location not in [0, 1, 2, 3, 4]:
# Close the connection and raise an Exception.
self.api.Close()
raise Exception("User requested location {}, but only 0, 1, 2, 3 or 4 are allowed.".format( \
location))
elif type(location) == str:
location = location.lower()
if location not in ["full", "centre", "center", "bottom", "horizontal", "vertical"]:
# Close the connection and raise an Exception.
self.api.Close()
raise Exception('User requested location {}, but only "full", "centre", "center", "bottom", "horizontal", "vertical" are allowed.'.format( \
location))
if location == "full":
location = 0
elif location in ["centre", "center"]:
location = 1
elif location == "bottom":
location = 2
elif location == "horizontal":
location = 3
elif location == "vertical":
location = 4
else:
# Close the connection and raise an Exception.
self.api.Close()
raise Exception('User requested location "{}" (type={}), but only "full", "centre", "center", "bottom", "horizontal", "vertical" are allowed.'.format( \
location, type(location)))
if randomize is not None:
randomise = randomize
if eye not in [0, 1, 2, 3, 4]:
# Close the connection and raise an Exception.
self.api.Close()
raise Exception("User requested eye %d, but only 0, 1, 2, 3 or 4 are allowed." \
% (eye))
# Run the calibration.
if self._debug:
self._debug_log("PerformCalibration: n_points={}, location={}, randomise={}, slow={}, audio={}, automatic={}, bgc={}, fgc={}, image={}".format( \
n_points, location, randomise, slow, audio, automatic, bgc, fgc, image))
self.api.PerformCalibration(noPoints=n_points, location=location, \
randomizePoints=randomise, slowMode=slow, audioFeedback=audio, \
eye=eye, calibrationImprovement=calibration_improvement, \
skipBadPoints=skip_bad_points, autoCalibration=automatic, \
backColor=bgc, pointColor=fgc, \
imageName=image)
# Wait until the running calibration has finished.
if self._debug:
self._debug_log("WaitForCalibrationResult: waiting...")
status, improve = self.api.WaitForCalibrationResult()
if self._debug:
self._debug_log("WaitForCalibrationResult: status={}, improve={}".format( \
status, improve))
return status, improve
def log(self, message):
"""
desc:
Logs a message to the data file. The message will be timestamped
with the most recently streamed sample.
arguments:
message:
desc:
The message to be logged to the data file.
type: string
"""
# Log a message to the Alea data file.
if self._alea_logging:
self.api.SendTrigger(message)
# Log a message in the PyAlea format.
else:
# Get current timestamp.
self._recent_sample_lock.acquire()
t = self._recent_sample.rawDataTimeStamp
self._recent_sample_lock.release()
# Construct a tuple, and add it to the queue.
self._logging_queue.put(("MSG", t, message))
def start_recording(self):
"""
desc:
Starts the streaming of data to the log file.
"""
if self._debug:
self._debug_log("Starting recording")
# Tell the Alea API to start recording.
if self._alea_logging:
self.api.RecordData(True)
# Set the recording event. If PyAlea logging is used, this will also
# signal to the sample processing Thread to queue samples for the
# logging Thread.
self._recording.set()
def stop_recording(self):
"""
desc:
Pauses the streaming of data to the log file.
"""
if self._debug:
self._debug_log("Stopping recording")
# Tell the Alea API to stop recording.
if self._alea_logging:
self.api.RecordData(False)
# Unset the recording event. If PyAlea logging is used, this will also
# signal to the sample processing Thread to stop queueing samples
# for the logging Thread.
self._recording.clear()
def sample(self):
"""
desc:
Returns the latest sample's timestamp, intelliGaze X and Y
coordinates, and average pupil size. This function does not block
and/or wait for a new sample to come in, but instead simply reads
the stored latest sample. This can be up to the inter-sample time
old. E.g., it will be up to 33 ms old at a 30 Hz sampling rate.
returns:
desc: A typle that contains the timestamp, X, Y, and pupil size.
The format is (int, float, float, float).
type: tuple
"""
# Copy data from the most recent sample.
self._recent_sample_lock.acquire()
t = copy.copy(self._recent_sample.rawDataTimeStamp)
x = copy.copy(self._recent_sample.intelliGazeX)
y = copy.copy(self._recent_sample.intelliGazeY)
l_size = copy.copy(self._recent_sample.pupilDiameterLeftEye)
r_size = copy.copy(self._recent_sample.pupilDiameterRightEye)
self._recent_sample_lock.release()
# Compute the pupil size.
if (l_size > 0) and (r_size > 0):
p_size = (l_size + r_size) / 2.0
elif l_size > 0:
p_size = l_size
elif r_size > 0:
p_size = r_size
else:
p_size = 0.0
return (t, x, y, p_size)
def close(self):
"""
desc:
Closes the connection to the eye tracker. This will also
automatically stop recording and close the data file. Note that
this operation will block until all queued data is stored.
"""
# Stop streaming samples from the API.
if self._debug:
self._debug_log("DataStreaming: turning off data streaming")
self.api.DataStreaming(False)
# Stop recording if it is still ongoing.
if self._recording.is_set():
self.stop_recording()
# Wait until the Queue is empty, or until 60 seconds have passed.
if not self._alea_logging:
if self._debug:
self._debug_log("Waiting for the logging Queue to empty")
queue_empty = self._logging_queue_empty.wait(timeout=60.0)
if not queue_empty:
print("WARNING: Logging Thread timeout occurred; something might have gone wrong!")
if self._debug:
self._debug_log("Logging Queue failed to empty within 60 seconds")
# Signal to the Threads to stop.
if self._debug:
self._debug_log("Signalling to Threads that the connection is closed")
self._connected.clear()
# Close the log file.
if not self._alea_logging:
if self._debug:
self._debug_log("Closing the log file")
self._log_file.close()
# Close the connection.
if self._debug:
self._debug_log("Close: closing API connection")
self.api.Close()
# Define the class that handles API communication.
class AleaAPI:
"""Python wrapper for the ET API from Alea Technologies"""
def __init__(self):
"""
desc:
Initialises an AleaAPI object, but really only checks whether the
DLL is properly loaded. Make sure to call Open to start the
connection.
"""
# Raise an exception if the DLL could not be loaded.
if etapi is None:
raise Exception("Could not load the CEtAPi DLL. Has it been installed? Has it been added to the path?")
def _error(self, code):
# Attempt to close the connection to the API.
try:
self.Close()
except:
print("WARNING: Failed to close the connection to the API!")
# Throw an Exception.
raise Exception("Alea EtAPI error: {}".format(EtApiError[code]))
def Open(self, appKey, targetIP=None, targetPort=None, listenIP=None, \
listenPort=None):
"""
desc:
Initialises the API, opens the socket connection, establishes a
link to the hardware. Make sure that no firewall is blocking the
port. This function will use the default values for IP addresses
and ports, unless all keyword arguments are specified.
arguments:
appKey:
desc:
Application-specific key to register the application, ask
alea technologies for your specific application key.
type: str
keywords:
targetIP:
desc:
IP address of the eye tracker. (Default = "127.0.0.1")
type: str
targetPort:
desc:
Target port of the server socket link. (Default = 27412)
type: int
listenIP:
desc:
IP address of the client application. (Default is
"127.0.0.1")
type: str
listenPort:
desc:
Listen port of the client socket link. (Default = 27413)
"""
# Check whether all keyword arguments are None.
if (targetIP is None) or (targetPort is None) or (listenIP is None) or (listenPort is None):
# Use the default values as set in the API.
r = etapi.Open(ctypes.c_char_p(appKey.encode("utf-8")))
else:
# Use the user-defined values.
r = etapi.Open(ctypes.c_char_p(appKey.encode("utf-8")), \
ctypes.c_char_p(targetIP.encode("utf-8")), ctypes.c_int32(targetPort), \
ctypes.c_char_p(listenIP.encode("utf-8")), ctypes.c_int32(listenPort))
# Check the result.
if not check_result(r):
self._error(r)
def IsOpen(self):
"""
desc:
Checks whether the API is open (but not whether the server is up
and running).
returns:
desc: True if the API is open, and False if not.
type: bool
"""
# Make a call to the API, and save the result in a variable.
is_open = ctypes.c_bool()
r = etapi.IsOpen(ctypes.byref(is_open))
# Check the result.
if check_result(r):
return is_open.value
else:
self._error(r)
def RecordData(self, start):
"""
desc:
Starts or stops data recording. For the time being (2021-01-29),
the location of the output data file is in user’s documents
folder, e.g.
C:\\User\\alea_technologies_gmbh\\IntelliGazeServer\\data\\Exp1\\User1\\
arguments:
start:
desc:
True if the recording to the Alea data file should start,
and False if the recording should stop.
type: bool
"""
# Make a call to the API.
r = etapi.RecordData(ctypes.c_bool(start))
# Check the result.
if not check_result(r):
self._error(r)
def SendTrigger(self, message):
"""
desc:
Sends a trigger message. If data recording is in progress, the
message will be recorded as well. Usually such messages are used
to separate trials within an experiment, and to record events such
as stimulus onsets/offsets, responses, etc.
arguments:
start:
desc:
The message that should be recorded in the data file.
type: str
"""
# Record the message to the data file.
r = etapi.SendTrigger(ctypes.c_char_p(message.encode("utf-8")))
# Check the result.
if not check_result(r):
self._error(r)
def WaitForData(self, timeOutMilliseconds):
"""
desc:
Blocks until the next sample comes in. It is not recommended to
use this function for data streaming. Use Sample instead.
arguments:
timeOutMilliseconds:
desc: Timeout in milliseconds. This function will return
on obtaining a sample or on timing out.
type: int
returns:
desc: The latest AleaData when it becomes available. This is a
CAleaData struct, or None if a timeout occurred.
type: ctypes.Structure
"""
# Create a sample struct to write incoming data to.
sample = CAleaData()
dwMilliseconds = ctypes.c_int32(timeOutMilliseconds)
# Make a call to the API, and save the result in a variable.
r = etapi.WaitForData(ctypes.byref(sample), dwMilliseconds)
# Check if the result is a timeout.
if r == -1:
# Set the sample to None.
sample = None
# Catch any other errors.
else:
if not check_result(r):
self._error(r)
return sample
def Close(self):
"""
desc:
Closes the API, releases the socket connection, and frees API
resources. Call close before quiting the client application!
"""
# Make a call to the API, and save the result in a variable.
r = etapi.Close()
# In the C API wrapper, the Close function doesn't actually return
# anything. Instead, it raises a warning about a blocking operation
# being interrupted by a call to WSACancelBlockingCall. Thus, the
# result is likely to be 1. We'll ignore this locally.
if r == 1:
return
# Check the result.
if not check_result(r):
self._error(r)
def Version(self):
"""
desc:
Returns the major.minor.build version and the device type. The
device type is coded 0 for IG30 systems, and 1 for IG15 systems.
returns:
desc: The version and device in a single string, formatted
"major.minor.build.device"
type: str
"""
# Make a call to the API, and save the result in a variable.
major = ctypes.c_int32()
minor = ctypes.c_int32()
build = ctypes.c_int32()
device = ctypes.c_int32()
r = etapi.Version(ctypes.byref(major), ctypes.byref(minor), \
ctypes.byref(build), ctypes.byref(device))
# Convert to string.
version = "{}.{}.{}.{}".format( \
major.value, minor.value, build.value, device.value)
# Check the result.
if check_result(r):
return version
else:
self._error(r)
def PerformCalibration(self, noPoints=9, location=0, \
randomizePoints=True, slowMode=False, audioFeedback=True, eye=0, \
calibrationImprovement=False, skipBadPoints=False, \
autoCalibration=True, backColor=(127,127,127), pointColor=(0,0,0), \
imageName=""):
"""
desc:
Performs an eye-tracker-controlled calibration: the tracker will
autonomously run through the calibration process, uncluding the
displaying of calibration points. The CalibrationDoneDelegate and
ResultCalibrationExCB callbacks will be called when the
calibration is finished or when an error occurs.
keywords:
noPoints:
desc:
Number of points used in the calibration. Choose from 1, 5,
9, or 16. (Default = 9)
type: int
location:
desc:
Indication of where the calibration points should be
presented. Choose from 0 (Full, outer points are 5% off
the monitor edge), 1 (Center, outer points are 20% off
the monitor edge), 2 (Bottom, points are in the lower half
of the monitor), 3 (Horizontal, points are located in a
horizontal line), and 4 (Vertical, points are located in
a vertical line). (Default = 0)
type: int
randomizePoints:
desc:
Set to True to allow the tracker to randomise the order in
which calibration points are shown. Some experienced users
have a tendency to anticipate where points will be shown,
and to produce a systematic calibration error by moving
their eyes to the next point too quickly. Shuffling the
points prevents this. (Default = True)
type: bool
slowMode:
desc:
Set to True to allow the tracker to show big and slow
calibration targets. (Default = False)
type: bool
audioFeedback:
desc:
Set to True to allow the tracker to play a sound when the
point jumps to a new position. (Default = True)
type: bool
eye:
desc:
Determines what eyes to calibrate and what eyes to track.
Choose from 0 (calibrate both eyes), 1 (calibrate the left
eye and track both eyes, "right glass eye"), 2 (calibrate
the right eye and track both eyes, "left glass eye"), 3
(calibrate and track only the left eye, "right pirate
eye"), or 4 (calibrate and track only the right eye, "left
pirate eye"). (Default = 0)
type: int
calibrationImprovement:
desc:
Set to True if outliers or skipped points from a previous
calibrations should be re-calibrated. Can only be done
when a previous calibration returned with an "Improvement"
suggestion! (Default = False)
type: bool
skipBadPoints:
desc:
When set to True, IntelliGaze will not get stuck at
uncalibratable points. It will skip them, and try to
complete the calibration without them. (Default = False)
type: bool
autoCalibration:
desc:
Set to True to allow the tracker to detect fixations and
accept points automatically. (Default = True)
type: bool
backColor:
desc:
RGB value of the background colour. This should have a
similar brightness to the experiment or application that
this calibration will be used with. The format is a tuple
with 3 ints in the range 0-255, representing red, green,
and blue. For example, (0,0,0) is black, and (255,255,255)
is white. (Default = (127,127,127) )
type: int
pointColor:
desc:
RGB value of the calibration point colour. This should
have a good contrast to the background. The format is a
tuple with 3 ints in the range 0-255, representing red,
green, and blue. For example, (255,0,0) is pure red.
(Default = (0,0,0) )
type: tuple
imageName:
desc:
Leave empty for the default circle, or provide a path to
an image file to use that image as calibration target. Use
the string "ANIMATION:PARROT" to calibrate with an
animation. (Default = "")
type: str
"""
# Convert the colours from RGB to 32-bit integer ARGB format.
alpha = 255 * 256 * 256 * 256
backColor = alpha + backColor[0] * 256 * 256 + backColor[1] * 256 \
+ backColor[2]
pointColor = alpha + pointColor[0] * 256 * 256 + pointColor[1] * 256 \
+ pointColor[2]
# Make a call to the API, and save the result in a variable.
r = etapi.PerformCalibration(ctypes.c_int32(noPoints), \
ctypes.c_int32(location), ctypes.c_bool(randomizePoints), \
ctypes.c_bool(slowMode), ctypes.c_bool(audioFeedback), \
ctypes.c_int32(eye), ctypes.c_bool(calibrationImprovement), \
ctypes.c_bool(skipBadPoints), ctypes.c_bool(autoCalibration), \
ctypes.c_int32(backColor), ctypes.c_int32(pointColor), \
ctypes.c_char_p(imageName.encode("utf-8")))
# Check the result.
if not check_result(r):
self._error(r)
def WaitForCalibrationResult(self):
"""
desc:
Waits until the calibration is done, or until an error occurs.
returns:
desc: Status and improve values for the current calibration,
Boolean values captured in a tuple (status, improve)
type: tuple
"""
# Set up variables to pass to the API function.
status = ctypes.c_int32()
improve = ctypes.c_bool()
# Set the wait time to -1, to signal there isn't a fixed timeout.
dwMilliseconds = ctypes.c_int(-1)
# Wait for it.
r = etapi.WaitForCalibrationResult(ctypes.byref(status), \
ctypes.byref(improve), dwMilliseconds)
# Check the result.
if not check_result(r):
self._error(r)
return (status.value, improve.value)
def DataStreaming(self, mode):
"""
desc:
Instructs the eye tracker to stream data, which will cause
callback functions to be called when new data becomes available.
When streaming is disabled, the application centre of IntelliGaze
is active, as is the mouse control. When data streaming is turned
on, IntelliGaze is operated in "background mode": The application
centre is invisible, and no IntelliGaze mouse control takes place.
arguments:
mode:
desc:
Determines the streaming mode. Choose from 0 (disable
streaming), 1 (stream raw data at the maximum tracker
speed), 2 (stream eye events data, i.e. fixations and
saccades), 4 (stream Blickfang activation data), or 8
(EyeGesture data).
type: int
"""
# Make a call to the API, and save the result in a variable.
r = etapi.DataStreaming(ctypes.c_int32(mode))
# Check the result.
if not check_result(r):
self._error(r)
def ShowStatusWindow(self, posX, posY, size, opacity):
"""
desc:
Displays the eye tracker status window at the given position. The
status window informs about the relative position of the head and
any eye tracking problems.
arguments:
posX:
desc:
Horizontal position on the screen (in pixels).
type: int
posY:
desc:
Vertical position on the screen (in pixels).
type: int
size:
desc:
Width of the status window (in pixels). Can range from 100
to 768.
type: int
opacity:
desc:
Opacity of the status window, expressed as a percentage.
type: int
"""
# Make a call to the API, and save the result in a variable.
r = etapi.ShowStatusWindow(ctypes.c_int32(posX), ctypes.c_int32(posY), \
ctypes.c_int32(size), ctypes.c_int32(opacity))
# Check the result.
if not check_result(r):
self._error(r)
def HideStatusWindow(self):
"""
desc:
Hides the status window. For info on how to show the status window,
see the ShowStatusWindow function.
"""
# Make a call to the API, and save the result in a variable.
r = etapi.HideStatusWindow()
# Check the result.
if not check_result(r):
self._error(r)
def ExitServer(self):
"""
desc:
Exits the eye tracker server application.
"""
# Make a call to the API, and save the result in a variable.
r = etapi.ExitServer()
# Check the result.
if not check_result(r):
self._error(r)
def QuitServer(self):
"""
desc:
Exits the eye tracker server application.
"""
# NOTE: Not entirely sure which is the correct function: ExitServer is
# used in the C API, but QuitServer is listed in the documentation.
# Make a call to the API, and save the result in a variable.
r = etapi.QuitServer()
# Check the result.
if not check_result(r):
self._error(r)
# # # # #
# UNSUPPORTED IN C API
# The following functions appear in the API, but are not supported in the C
# wrapper for the API. They are commented out for now, but retained in the
# code base in case support is supported/required in future releases.
# def StartCalibration(self, noPoints=9, location=0, randomizePoints=True, \
# eye=0, calibrationImprovement=False, skipBadPoints=True,
# autoCalibration=True):
#
# """
# desc:
# Starts a client-controlled calibration. This means the client
# software is responsible for showing and moving the calibration
# points! The eye tracker will call the CalibrationDoneDelegate
# callback when it's finished or an error occurred.
#
# keywords:
# noPoints:
# desc:
# Number of points used in the calibration. Choose from 1, 5,
# 9, or 16. (Default = 9)
# type: int
# location:
# desc:
# Indication of where the calibration points should be
# presented. Choose from 0 (Full, outer points are 5% off
# the monitor edge), 1 (Center, outer points are 20% off
# the monitor edge), 2 (Bottom, points are in the lower half
# of the monitor), 3 (Horizontal, points are located in a
# horizontal line), and 4 (Vertical, points are located in
# a vertical line). (Default = 0)
# type: int
# randomizePoints:
# desc:
# Set to True to allow the tracker to randomise the order in
# which calibration points are shown. Some experienced users
# have a tendency to anticipate where points will be shown,
# and to produce a systematic calibration error by moving
# their eyes to the next point too quickly. Shuffling the
# points prevents this. (Default = True)
# type: bool
# eye:
# desc:
# Determines what eyes to calibrate and what eyes to track.
# Choose from 0 (calibrate both eyes), 1 (calibrate the left
# eye and track both eyes, "right glass eye"), 2 (calibrate
# the right eye and track both eyes, "left glass eye"), 3
# (calibrate and track only the left eye, "right pirate
# eye"), or 4 (calibrate and track only the right eye, "left
# pirate eye"). (Default = 0)
# type: int
# calibrationImprovement:
# desc:
# Set to True if outliers or skipped points from a previous
# calibrations should be re-calibrated. Can only be done
# when a previous calibration returned with an "Improvement"
# suggestion! (Default = False)
# type: bool
# skipBadPoints:
# desc:
# When set to True, IntelliGaze will not get stuck at
# uncalibratable points. It will skip them, and try to
# complete the calibration without them. (Default = True)
# type: bool
# autoCalibration:
# desc:
# Set to True to allow the tracker to detect fixations and
# accept points automatically. (Default = True)
# type: bool
# """
#
# # Make a call to the API, and save the result in a variable.
# r = etapi.StartCalibration(ctypes.c_int32(noPoints), \
# ctypes.c_int32(location), ctypes.c_bool(randomizePoints), \
# ctypes.c_int32(eye), ctypes.c_bool(calibrationImprovement), \
# ctypes.c_bool(skipBadPoints), ctypes.c_bool(autoCalibration))
# # Check the result.
# if not check_result(r):
# self._error(r)
# def StopCalibration(self):
#
# """
# desc:
# Interrupts the calibration procedure, and will cause the eye
# tracker to notify the client about the calibration result by
# calling the CalibrationDoneDelegate callback.
# """
#
# # Make a call to the API, and save the result in a variable.
# r = etapi.StopCalibration()
# # Check the result.
# if not check_result(r):
# self._error(r)
# def CalibrationStatus(self, isMoving, isHot, acceptPoint):
#
# """
# desc:
# Informs the eye tracker server about the current status of the
# calibration procedure. Note that this function allows client
# software to let the tracker know about the calibration,
# particularly whether the calibration target is moving, whether
# it's "hot" (OK to accept fixations for), and whether to the point
# should be force-accepted. This data is required by the eye tracker
# to know when to search for fixations during the calibration
# procedure.
#
# arguments:
# isMoving:
# desc:
# Set this to True while the fixation target is moving.
# type: bool
# isHot:
# desc:
# Set this to True to make the eye tracker accept the next
# fixation it detects.
# type: bool
# acceptPoint:
# desc:
# If set to True, the eye tracker will accept the next
# fixation it detects to accept the calibration point. Use
# this parameter when doing a manual (not self-paced)
# calibration, i.e. set this to True when the operator hits
# a key to confirm fixation. (Not available in
# autoCalibration mode.)
# type: bool
# """
#
# # Make a call to the API, and save the result in a variable.
# r = etapi.StartCalibration(ctypes.c_bool(isMoving), \
# ctypes.c_bool(isHot), ctypes.c_bool(acceptPoint))
# # Check the result.
# if not check_result(r):
# self._error(r)
# def LoadCalibration(self, profileName):
#
# """
# desc:
# Tries to load a calibration for the passed profile name.
#
# arguments:
# profileName:
# desc:
# Name of the profile to load.
# type: str
#
# returns:
# desc: True when the function succeeds, and False when it didn't.
# type: bool
#
# """
#
# # Make a call to the API, and save the result in a variable.
# r = etapi.LoadCalibration(ctypes.c_char_p(profileName.encode("utf-8")))
# # Check and return the result.
# return check_result(r)
# def SaveCalibration(self, profileName):
#
# """
# desc:
# Tries to save the current calibration profile under the passed
# profile name.
#
# arguments:
# profileName:
# desc:
# Name of the profile to save the current calibration for.
# type: str
#
# returns:
# desc: True when the function succeeds, and False when it didn't.
# type: bool
#
# """
#
# # Make a call to the API, and save the result in a variable.
# r = etapi.SaveCalibration(ctypes.c_char_p(profileName.encode("utf-8")))
# # Check and return the result.
# return check_result(r)
# def CalibrationSize(self):
#
# """
# desc:
# Returns the size of the calibration area. Can be used to remap the
# gaze data if the screen resolution is changed.
#
# returns:
# desc:
# A (width,height) tuple of integers describing the calibration
# area's size.
# type: tuple
# """
#
# # Create two variables to hold the width and height in.
# width = ctypes.c_int32()
# height = ctypes.c_int32()
#
# # Make a call to the API, and save the result in a variable.
# r = etapi.CalibrationSize(ctypes.byref(width), ctypes.byref(height))
# # Check the result.
# if not check_result(r):
# self._error(r)
#
# return (width.value, height.value)
# def SetCorrectionPoint(self, targetX, targetY):
#
# """
# desc:
# Improves the calibration accuracy by feeding gaze activations back
# into the gaze mapping function. Call this function if a participant
# is looking at a target. This is effectively a drift correction.
#
# arguments:
# targetX:
# desc:
# The horizontal location of the target on the screen
# (measured in pixels).
# type: int
# targetY:
# desc:
# The vertical location of the target on the screen
# (measured in pixels).
# type: int
# """
#
# # Make a call to the API, and save the result in a variable.
# r = etapi.SetCorrectionPoint(ctypes.c_int32(targetX), \
# ctypes.c_int32(targetY))
# # Check the result.
# if not check_result(r):
# self._error(r)
# def StartCalibration(self, noPoints=9, location=0, randomizePoints=True, \
# eye=0, calibrationImprovement=False, skipBadPoints=True,
# autoCalibration=True):
#
# """
# desc:
# Starts a client-controlled calibration. This means the client
# software is responsible for showing and moving the calibration
# points! The eye tracker will call the CalibrationDoneDelegate
# callback when it's finished or an error occurred.
#
# keywords:
# noPoints:
# desc:
# Number of points used in the calibration. Choose from 1, 5,
# 9, or 16. (Default = 9)
# type: int
# location:
# desc:
# Indication of where the calibration points should be
# presented. Choose from 0 (Full, outer points are 5% off
# the monitor edge), 1 (Center, outer points are 20% off
# the monitor edge), 2 (Bottom, points are in the lower half
# of the monitor), 3 (Horizontal, points are located in a
# horizontal line), and 4 (Vertical, points are located in
# a vertical line). (Default = 0)
# type: int
# randomizePoints:
# desc:
# Set to True to allow the tracker to randomise the order in
# which calibration points are shown. Some experienced users
# have a tendency to anticipate where points will be shown,
# and to produce a systematic calibration error by moving
# their eyes to the next point too quickly. Shuffling the
# points prevents this. (Default = True)
# type: bool
# eye:
# desc:
# Determines what eyes to calibrate and what eyes to track.
# Choose from 0 (calibrate both eyes), 1 (calibrate the left
# eye and track both eyes, "right glass eye"), 2 (calibrate
# the right eye and track both eyes, "left glass eye"), 3
# (calibrate and track only the left eye, "right pirate
# eye"), or 4 (calibrate and track only the right eye, "left
# pirate eye"). (Default = 0)
# type: int
# calibrationImprovement:
# desc:
# Set to True if outliers or skipped points from a previous
# calibrations should be re-calibrated. Can only be done
# when a previous calibration returned with an "Improvement"
# suggestion! (Default = False)
# type: bool
# skipBadPoints:
# desc:
# When set to True, IntelliGaze will not get stuck at
# uncalibratable points. It will skip them, and try to
# complete the calibration without them. (Default = True)
# type: bool
# autoCalibration:
# desc:
# Set to True to allow the tracker to detect fixations and
# accept points automatically. (Default = True)
# type: bool
# """
#
# # Make a call to the API, and save the result in a variable.
# r = etapi.StartCalibration(ctypes.c_int32(noPoints), \
# ctypes.c_int32(location), ctypes.c_bool(randomizePoints), \
# ctypes.c_int32(eye), ctypes.c_bool(calibrationImprovement), \
# ctypes.c_bool(skipBadPoints), ctypes.c_bool(autoCalibration))
# # Check the result.
# if not check_result(r):
# self._error(r)
# def StopCalibration(self):
#
# """
# desc:
# Interrupts the calibration procedure, and will cause the eye
# tracker to notify the client about the calibration result by
# calling the CalibrationDoneDelegate callback.
# """
#
# # Make a call to the API, and save the result in a variable.
# r = etapi.StopCalibration()
# # Check the result.
# if not check_result(r):
# self._error(r)
# def CalibrationStatus(self, isMoving, isHot, acceptPoint):
#
# """
# desc:
# Informs the eye tracker server about the current status of the
# calibration procedure. Note that this function allows client
# software to let the tracker know about the calibration,
# particularly whether the calibration target is moving, whether
# it's "hot" (OK to accept fixations for), and whether to the point
# should be force-accepted. This data is required by the eye tracker
# to know when to search for fixations during the calibration
# procedure.
#
# arguments:
# isMoving:
# desc:
# Set this to True while the fixation target is moving.
# type: bool
# isHot:
# desc:
# Set this to True to make the eye tracker accept the next
# fixation it detects.
# type: bool
# acceptPoint:
# desc:
# If set to True, the eye tracker will accept the next
# fixation it detects to accept the calibration point. Use
# this parameter when doing a manual (not self-paced)
# calibration, i.e. set this to True when the operator hits
# a key to confirm fixation. (Not available in
# autoCalibration mode.)
# type: bool
# """
#
# # Make a call to the API, and save the result in a variable.
# r = etapi.StartCalibration(ctypes.c_bool(isMoving), \
# ctypes.c_bool(isHot), ctypes.c_bool(acceptPoint))
# # Check the result.
# if not check_result(r):
# self._error(r)
# def LoadCalibration(self, profileName):
#
# """
# desc:
# Tries to load a calibration for the passed profile name.
#
# arguments:
# profileName:
# desc:
# Name of the profile to load.
# type: str
#
# returns:
# desc: True when the function succeeds, and False when it didn't.
# type: bool
#
# """
#
# # Make a call to the API, and save the result in a variable.
# r = etapi.LoadCalibration(ctypes.c_char_p(profileName.encode("utf-8")))
# # Check and return the result.
# return check_result(r)
# def SaveCalibration(self, profileName):
#
# """
# desc:
# Tries to save the current calibration profile under the passed
# profile name.
#
# arguments:
# profileName:
# desc:
# Name of the profile to save the current calibration for.
# type: str
#
# returns:
# desc: True when the function succeeds, and False when it didn't.
# type: bool
#
# """
#
# # Make a call to the API, and save the result in a variable.
# r = etapi.SaveCalibration(ctypes.c_char_p(profileName.encode("utf-8")))
# # Check and return the result.
# return check_result(r)
# def CalibrationSize(self):
#
# """
# desc:
# Returns the size of the calibration area. Can be used to remap the
# gaze data if the screen resolution is changed.
#
# returns:
# desc:
# A (width,height) tuple of integers describing the calibration
# area's size.
# type: tuple
# """
#
# # Create two variables to hold the width and height in.
# width = ctypes.c_int32()
# height = ctypes.c_int32()
#
# # Make a call to the API, and save the result in a variable.
# r = etapi.CalibrationSize(ctypes.byref(width), ctypes.byref(height))
# # Check the result.
# if not check_result(r):
# self._error(r)
#
# return (width.value, height.value)
# def SetCorrectionPoint(self, targetX, targetY):
#
# """
# desc:
# Improves the calibration accuracy by feeding gaze activations back
# into the gaze mapping function. Call this function if a participant
# is looking at a target. This is effectively a drift correction.
#
# arguments:
# targetX:
# desc:
# The horizontal location of the target on the screen
# (measured in pixels).
# type: int
# targetY:
# desc:
# The vertical location of the target on the screen
# (measured in pixels).
# type: int
# """
#
# # Make a call to the API, and save the result in a variable.
# r = etapi.SetCorrectionPoint(ctypes.c_int32(targetX), \
# ctypes.c_int32(targetY))
# # Check the result.
# if not check_result(r):
# self._error(r)
|
esdalmaijer/PyGaze
|
pygaze/_eyetracker/alea/alea.py
|
Python
|
gpl-3.0
| 73,900
|
#/usr/bin/env python
# -*- coding: utf-8 -*-
##
## Copyright (c) 2010-2012 Jorge J. García Flores, LIMSI/CNRS
## This file is part of Unoporuno.
## Unoporuno is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
## Unoporuno is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
## You should have received a copy of the GNU General Public License
## along with Unoporuno. If not, see <http://www.gnu.org/licenses/>.
##
# usage: python unoporuno_export.py (name|number) results_path [person_id_file]
# exports a unoporuno database to an xml file following unopourno DTD
# if a person_id_file is present, it exports only those id's indicated in the file
import logging, ConfigParser, sys
logging.basicConfig(level=logging.INFO)
config = ConfigParser.ConfigParser()
config.read("unoporuno.conf")
if len(config.sections())==0:
config.read(os.environ['HOME']+"/.unoporuno/unoporuno.conf")
if len(config.sections())==0:
logging.error("No configuration file on unoporuno.conf")
exit(-1)
UNOPORUNO_ROOT = config.get('unoporuno', 'root')
UNOPORUNO_PATH = UNOPORUNO_ROOT + '/module/'
CIDESAL_WEBAPP_PATH = UNOPORUNO_ROOT +'/webapp/'
if not CIDESAL_WEBAPP_PATH in sys.path:
sys.path.append(CIDESAL_WEBAPP_PATH)
sys.path.append(CIDESAL_WEBAPP_PATH+'cidesal/')
from unoporuno.models import Busqueda, Persona, Snippet
if not UNOPORUNO_PATH in sys.path:
sys.path.append(UNOPORUNO_PATH)
from dospordos.tools import DiasporaOutput
try:
busqueda_in = sys.argv[1]
except:
logging.error('No parameter busqueda')
logging.error('Usage: python unoporuno_export.py NAME|NUMBER path')
exit(-1)
if busqueda_in.isdigit():
try:
busqueda = Busqueda.objects.get(id=int(busqueda_in))
except:
logging.error('No busqueda object with id=' +busqueda_in+ ' in UNOPORUNO database.')
exit(-1)
else:
try:
busqueda = Busqueda.objects.get(nombre=busqueda_in)
except:
logging.error('No busqueda object with id=' +busqueda_in+ ' in UNOPORUNO database.')
exit(-1)
logging.info('Processing busqueda ' +busqueda.nombre )
try:
results_path = sys.argv[2]
except:
logging.error('Missing parameter path')
logging.error('Usage: python unoporuno_export.py NAME|NUMBER path')
exit(-1)
try:
person_id_list = []
person_id_file = open(sys.argv[3])
for l in person_id_file:
person_id_list.append(l.strip())
except:
person_id_list = None
diaspora_output = DiasporaOutput(results_path)
if person_id_list:
logging.debug('launching export_persona with id_list=' +str(person_id_list))
diaspora_output.export_unoporuno_persona(busqueda, person_id_list)
else:
diaspora_output.export_unoporuno_busqueda(busqueda)
|
jorgegarciaflo/unoporuno
|
scripts/unoporuno_export.py
|
Python
|
gpl-3.0
| 3,167
|
# -*- coding: utf-8 -*-
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# <pep8 compliant>
# Script copyright (C) Campbell Barton
# Contributors: Campbell Barton, Jiri Hnidek, Paolo Ciccone
"""
This script imports a Wavefront OBJ files to Blender.
Usage:
Run this script from "File->Import" menu and then load the desired OBJ file.
Note, This loads mesh objects and materials only, nurbs and curves are not supported.
http://wiki.blender.org/index.php/Scripts/Manual/Import/wavefront_obj
"""
import ast
import array
import os
import bpy
import mathutils
from bpy_extras.io_utils import unpack_list
from bpy_extras.image_utils import load_image
from bpy_extras.wm_utils.progress_report import (
ProgressReport,
ProgressReportSubstep,
)
def line_value(line_split):
"""
Returns 1 string representing the value for this line
None will be returned if theres only 1 word
"""
length = len(line_split)
if length == 1:
return None
elif length == 2:
return line_split[1]
elif length > 2:
return b' '.join(line_split[1:])
def obj_image_load(imagepath, DIR, recursive, relpath):
"""
Mainly uses comprehensiveImageLoad
but tries to replace '_' with ' ' for Max's exporter replaces spaces with underscores.
"""
if "_" in imagepath:
image = load_image(imagepath.replace("_", " "), DIR, recursive=recursive, relpath=relpath)
if image:
return image
return load_image(imagepath, DIR, recursive=recursive, place_holder=True, relpath=relpath)
def create_materials(filepath, relpath,
material_libs, unique_materials, unique_material_images,
use_image_search, float_func):
"""
Create all the used materials in this obj,
assign colors and images to the materials from all referenced material libs
"""
DIR = os.path.dirname(filepath)
context_material_vars = set()
# Don't load the same image multiple times
context_imagepath_map = {}
def load_material_image(blender_material, context_material_name, img_data, type):
"""
Set textures defined in .mtl file.
"""
imagepath = os.fsdecode(img_data[-1])
map_options = {}
curr_token = []
for token in img_data[:-1]:
if token.startswith(b'-'):
if curr_token:
map_options[curr_token[0]] = curr_token[1:]
curr_token[:] = []
curr_token.append(token)
texture = bpy.data.textures.new(name=type, type='IMAGE')
# Absolute path - c:\.. etc would work here
image = context_imagepath_map.get(imagepath, ...)
if image == ...:
image = context_imagepath_map[imagepath] = \
obj_image_load(imagepath, DIR, use_image_search, relpath)
if image is not None:
texture.image = image
# Adds textures for materials (rendering)
if type == 'Kd':
mtex = blender_material.texture_slots.add()
mtex.texture = texture
mtex.texture_coords = 'UV'
mtex.use_map_color_diffuse = True
# adds textures to faces (Textured/Alt-Z mode)
# Only apply the diffuse texture to the face if the image has not been set with the inline usemat func.
unique_material_images[context_material_name] = image # set the texface image
elif type == 'Ka':
mtex = blender_material.texture_slots.add()
mtex.use_map_color_diffuse = False
mtex.texture = texture
mtex.texture_coords = 'UV'
mtex.use_map_ambient = True
elif type == 'Ks':
mtex = blender_material.texture_slots.add()
mtex.use_map_color_diffuse = False
mtex.texture = texture
mtex.texture_coords = 'UV'
mtex.use_map_color_spec = True
elif type == 'Ke':
mtex = blender_material.texture_slots.add()
mtex.use_map_color_diffuse = False
mtex.texture = texture
mtex.texture_coords = 'UV'
mtex.use_map_emit = True
elif type == 'Bump':
mtex = blender_material.texture_slots.add()
mtex.use_map_color_diffuse = False
mtex.texture = texture
mtex.texture.use_normal_map = True
mtex.texture_coords = 'UV'
mtex.use_map_normal = True
bump_mult = map_options.get(b'-bm')
if bump_mult:
mtex.normal_factor = bump_mult[0]
elif type == 'D':
mtex = blender_material.texture_slots.add()
mtex.use_map_color_diffuse = False
mtex.texture = texture
mtex.texture_coords = 'UV'
mtex.use_map_alpha = True
blender_material.use_transparency = True
blender_material.transparency_method = 'Z_TRANSPARENCY'
if "alpha" not in context_material_vars:
blender_material.alpha = 0.0
# Todo, unset deffuse material alpha if it has an alpha channel
elif type == 'disp':
mtex = blender_material.texture_slots.add()
mtex.use_map_color_diffuse = False
mtex.texture = texture
mtex.texture_coords = 'UV'
mtex.use_map_displacement = True
elif type == 'refl':
mtex = blender_material.texture_slots.add()
mtex.use_map_color_diffuse = False
mtex.texture = texture
mtex.texture_coords = 'REFLECTION'
mtex.use_map_color_diffuse = True
map_type = map_options.get(b'-type')
if map_type and map_type != [b'sphere']:
print("WARNING, unsupported reflection type '%s', defaulting to 'sphere'"
"" % ' '.join(i.decode() for i in map_type))
mtex.mapping = 'SPHERE'
else:
raise Exception("invalid type %r" % type)
map_offset = map_options.get(b'-o')
map_scale = map_options.get(b'-s')
if map_offset:
mtex.offset.x = float(map_offset[0])
if len(map_offset) >= 2:
mtex.offset.y = float(map_offset[1])
if len(map_offset) >= 3:
mtex.offset.z = float(map_offset[2])
if map_scale:
mtex.scale.x = float(map_scale[0])
if len(map_scale) >= 2:
mtex.scale.y = float(map_scale[1])
if len(map_scale) >= 3:
mtex.scale.z = float(map_scale[2])
# Add an MTL with the same name as the obj if no MTLs are spesified.
temp_mtl = os.path.splitext((os.path.basename(filepath)))[0] + ".mtl"
if os.path.exists(os.path.join(DIR, temp_mtl)):
material_libs.add(temp_mtl)
del temp_mtl
# Create new materials
for name in unique_materials: # .keys()
if name is not None:
unique_materials[name] = bpy.data.materials.new(name.decode('utf-8', "replace"))
unique_material_images[name] = None # assign None to all material images to start with, add to later.
# XXX Why was this needed? Cannot find any good reason, and adds stupid empty matslot in case we do not separate
# mesh (see T44947).
# ~ unique_materials[None] = None
# ~ unique_material_images[None] = None
for libname in sorted(material_libs):
# print(libname)
mtlpath = os.path.join(DIR, libname)
if not os.path.exists(mtlpath):
print("\tMaterial not found MTL: %r" % mtlpath)
else:
do_ambient = True
do_highlight = False
do_reflection = False
do_transparency = False
do_glass = False
do_fresnel = False
do_raytrace = False
emit_colors = [0.0, 0.0, 0.0]
# print('\t\tloading mtl: %e' % mtlpath)
context_material = None
mtl = open(mtlpath, 'rb')
for line in mtl: # .readlines():
line = line.strip()
if not line or line.startswith(b'#'):
continue
line_split = line.split()
line_id = line_split[0].lower()
if line_id == b'newmtl':
# Finalize previous mat, if any.
if context_material:
emit_value = sum(emit_colors) / 3.0
if emit_value > 1e-6:
# We have to adapt it to diffuse color too...
emit_value /= sum(context_material.diffuse_color) / 3.0
context_material.emit = emit_value
if not do_ambient:
context_material.ambient = 0.0
if do_highlight:
# FIXME, how else to use this?
context_material.specular_intensity = 1.0
if do_reflection:
context_material.raytrace_mirror.use = True
context_material.raytrace_mirror.reflect_factor = 1.0
if do_transparency:
context_material.use_transparency = True
context_material.transparency_method = 'RAYTRACE' if do_raytrace else 'Z_TRANSPARENCY'
if "alpha" not in context_material_vars:
context_material.alpha = 0.0
if do_glass:
if "ior" not in context_material_vars:
context_material.raytrace_transparency.ior = 1.5
if do_fresnel:
context_material.raytrace_mirror.fresnel = 1.0 # could be any value for 'ON'
"""
if do_raytrace:
context_material.use_raytrace = True
else:
context_material.use_raytrace = False
"""
# XXX, this is not following the OBJ spec, but this was
# written when raytracing wasnt default, annoying to disable for blender users.
context_material.use_raytrace = True
context_material_name = line_value(line_split)
context_material = unique_materials.get(context_material_name)
context_material_vars.clear()
emit_colors[:] = [0.0, 0.0, 0.0]
do_ambient = True
do_highlight = False
do_reflection = False
do_transparency = False
do_glass = False
do_fresnel = False
do_raytrace = False
elif context_material:
# we need to make a material to assign properties to it.
if line_id == b'ka':
context_material.mirror_color = (
float_func(line_split[1]), float_func(line_split[2]), float_func(line_split[3]))
# This is highly approximated, but let's try to stick as close from exporter as possible... :/
context_material.ambient = sum(context_material.mirror_color) / 3
elif line_id == b'kd':
context_material.diffuse_color = (
float_func(line_split[1]), float_func(line_split[2]), float_func(line_split[3]))
context_material.diffuse_intensity = 1.0
elif line_id == b'ks':
context_material.specular_color = (
float_func(line_split[1]), float_func(line_split[2]), float_func(line_split[3]))
context_material.specular_intensity = 1.0
elif line_id == b'ke':
# We cannot set context_material.emit right now, we need final diffuse color as well for this.
emit_colors[:] = [
float_func(line_split[1]), float_func(line_split[2]), float_func(line_split[3])]
elif line_id == b'ns':
context_material.specular_hardness = int((float_func(line_split[1]) * 0.51) + 1)
elif line_id == b'ni': # Refraction index (between 1 and 3).
context_material.raytrace_transparency.ior = max(1, min(float_func(line_split[1]), 3))
context_material_vars.add("ior")
elif line_id == b'd': # dissolve (transparency)
context_material.alpha = float_func(line_split[1])
context_material.use_transparency = True
context_material.transparency_method = 'Z_TRANSPARENCY'
context_material_vars.add("alpha")
elif line_id == b'tr': # translucency
context_material.translucency = float_func(line_split[1])
elif line_id == b'tf':
# rgb, filter color, blender has no support for this.
pass
elif line_id == b'illum':
illum = int(line_split[1])
# inline comments are from the spec, v4.2
if illum == 0:
# Color on and Ambient off
do_ambient = False
elif illum == 1:
# Color on and Ambient on
pass
elif illum == 2:
# Highlight on
do_highlight = True
elif illum == 3:
# Reflection on and Ray trace on
do_reflection = True
do_raytrace = True
elif illum == 4:
# Transparency: Glass on
# Reflection: Ray trace on
do_transparency = True
do_reflection = True
do_glass = True
do_raytrace = True
elif illum == 5:
# Reflection: Fresnel on and Ray trace on
do_reflection = True
do_fresnel = True
do_raytrace = True
elif illum == 6:
# Transparency: Refraction on
# Reflection: Fresnel off and Ray trace on
do_transparency = True
do_reflection = True
do_raytrace = True
elif illum == 7:
# Transparency: Refraction on
# Reflection: Fresnel on and Ray trace on
do_transparency = True
do_reflection = True
do_fresnel = True
do_raytrace = True
elif illum == 8:
# Reflection on and Ray trace off
do_reflection = True
elif illum == 9:
# Transparency: Glass on
# Reflection: Ray trace off
do_transparency = True
do_reflection = True
do_glass = True
elif illum == 10:
# Casts shadows onto invisible surfaces
# blender can't do this
pass
elif line_id == b'map_ka':
img_data = line.split()[1:]
if img_data:
load_material_image(context_material, context_material_name, img_data, 'Ka')
elif line_id == b'map_ks':
img_data = line.split()[1:]
if img_data:
load_material_image(context_material, context_material_name, img_data, 'Ks')
elif line_id == b'map_kd':
img_data = line.split()[1:]
if img_data:
load_material_image(context_material, context_material_name, img_data, 'Kd')
elif line_id == b'map_ke':
img_data = line.split()[1:]
if img_data:
load_material_image(context_material, context_material_name, img_data, 'Ke')
elif line_id in {b'map_kn', b'map_bump', b'bump'}: # 'bump' is incorrect but some files use it.
img_data = line.split()[1:]
if img_data:
load_material_image(context_material, context_material_name, img_data, 'Bump')
elif line_id in {b'map_d', b'map_tr'}: # Alpha map - Dissolve
img_data = line.split()[1:]
if img_data:
load_material_image(context_material, context_material_name, img_data, 'D')
elif line_id in {b'map_disp', b'disp'}: # displacementmap
img_data = line.split()[1:]
if img_data:
load_material_image(context_material, context_material_name, img_data, 'disp')
elif line_id in {b'map_refl', b'refl'}: # reflectionmap
img_data = line.split()[1:]
if img_data:
load_material_image(context_material, context_material_name, img_data, 'refl')
else:
print("\t%r:%r (ignored)" % (filepath, line))
mtl.close()
def hideBone(bone):
bone.layers[1] = True
bone.layers[0] = False
def showBone(bone):
bone.layers[0] = True
bone.layers[1] = False
def visibleBone(bone):
return bone.layers[0]
def setMinimumLenght(bone):
default_length = 0.005
if bone.length == 0:
bone.tail = bone.head - mathutils.Vector((0, .01, 0))
if bone.length < default_length:
bone.length = default_length
def create_armatures(filepath, relpath,
armature_libs, unique_materials, unique_material_images,
use_image_search, float_func, new_armatures, new_objects, bone_names):
"""
Create armatures in this obj,
"""
DIR = os.path.dirname(filepath)
# Add an MTL with the same name as the obj if no MTLs are spesified.
temp_arl = os.path.splitext((os.path.basename(filepath)))[0] + ".arl"
if os.path.exists(os.path.join(DIR, temp_arl)):
armature_libs.add(temp_arl)
del temp_arl
for libname in sorted(armature_libs):
# print(libname)
arlpath = os.path.join(DIR, libname)
if not os.path.exists(arlpath):
print("\tArmature not found ARL: %r" % arlpath)
else:
# context_multi_line = b''
# line_start = b''
line_split = []
vec = []
# bone_names = []
bone_parents = []
bone_heads = []
# print('\t\tloading armature: %e' % arlpath)
with open(arlpath, 'rb') as mtl:
bone_count = None
read_b_name = read_b_head = read_b_parent = False
for line in mtl: # .readlines():
line = line.strip()
if not line or line.startswith(b'#'):
continue
line_split = line.split()
if not bone_count:
bone_count = int(line_split[0])
read_b_name = read_b_parent = read_b_head = False
read_b_name = True
elif read_b_name:
bone_names.append(line)
read_b_name = read_b_parent = read_b_head = False
read_b_parent = True
elif read_b_parent:
bone_parents.append(int(line_split[0]))
read_b_name = read_b_parent = read_b_head = False
read_b_head = True
elif read_b_head:
bone_heads.append([float_func(line_split[0]), float_func(line_split[1]), float_func(line_split[2])])
read_b_name = read_b_parent = read_b_head = False
read_b_name = True
# Create the armature object
me = bpy.data.armatures.new('Armature')
me.draw_type = 'STICK'
ob = bpy.data.objects.new(me.name, me)
ob.show_x_ray = True
bpy.context.scene.collection.objects.link(ob)
bpy.context.view_layer.objects.active = ob
bpy.ops.object.mode_set(mode='EDIT')
# Create all bones
for bone_id, bone_name in enumerate(bone_names):
bone = me.edit_bones.new(bone_name.decode('utf-8', 'replace'))
bone.head = bone_heads[bone_id]
bone.tail = bone.head # + mathutils.Vector((0,.01,0))
# Set bone heirarchy
for bone_id, bone_parent_id in enumerate(bone_parents):
if bone_parent_id >= 0:
me.edit_bones[bone_id].parent = me.edit_bones[bone_parent_id]
# Set calculate bone tails
for edit_bone in me.edit_bones:
if visibleBone(edit_bone):
childBones = [childBone for childBone in edit_bone.children
if visibleBone(childBone)]
else:
childBones = [childBone for childBone in edit_bone.children]
if childBones:
# Set tail to children middle
edit_bone.tail = mathutils.Vector(map(sum, zip(*(childBone.head.xyz for childBone in childBones))))/len(childBones)
else:
if edit_bone.parent:
vec = edit_bone.parent.tail - edit_bone.head
if (vec.length < .001):
edit_bone.tail = edit_bone.parent.vector + edit_bone.head
edit_bone.length = edit_bone.parent.length
else:
edit_bone.tail = (edit_bone.head - edit_bone.parent.tail) + edit_bone.head
edit_bone.length = 0.1
for edit_bone in me.edit_bones:
setMinimumLenght(edit_bone)
# Must add before creating the bones
bpy.ops.object.mode_set(mode='OBJECT')
new_armatures.append(ob)
def getVert(new_objects):
return [vert for obj in new_objects for vert in obj.data.vertices]
def split_mesh(verts_loc, faces, unique_materials, filepath, SPLIT_OB_OR_GROUP, verts_bw):
"""
Takes vert_loc and faces, and separates into multiple sets of
(verts_loc, faces, unique_materials, dataname)
"""
filename = os.path.splitext((os.path.basename(filepath)))[0]
if not SPLIT_OB_OR_GROUP or not faces:
use_verts_nor = any((False if f[1] is ... else True) for f in faces)
use_verts_tex = any((False if f[2] is ... else True) for f in faces)
use_verts_col = any((False if f[3] is ... else True) for f in faces)
# use the filename for the object name since we aren't chopping up the mesh.
return [(verts_loc, faces, unique_materials, filename, use_verts_nor, use_verts_tex, use_verts_col, verts_bw)]
def key_to_name(key):
# if the key is a tuple, join it to make a string
if not key:
return filename # assume its a string. make sure this is true if the splitting code is changed
else:
return key.decode('utf-8', 'replace')
# Return a key that makes the faces unique.
face_split_dict = {}
oldkey = -1 # initialize to a value that will never match the key
for face in faces:
key = face[6]
if oldkey != key:
# Check the key has changed.
(verts_split, faces_split, unique_materials_split, vert_remap,
use_verts_nor, use_verts_tex, use_verts_col, verts_bw_split) = face_split_dict.setdefault(key, ([], [], {}, {}, [], [], [], []))
oldkey = key
face_vert_loc_indices = face[0]
if not use_verts_nor and face[1] is not ...:
use_verts_nor.append(True)
if not use_verts_tex and face[2] is not ...:
use_verts_tex.append(True)
if not use_verts_col and face[3] is not ...:
use_verts_col.append(True)
# Remap verts to new vert list and add where needed
for enum, i in enumerate(face_vert_loc_indices):
map_index = vert_remap.get(i)
if map_index is None:
map_index = len(verts_split)
vert_remap[i] = map_index # set the new remapped index so we only add once and can reference next time.
verts_split.append(verts_loc[i]) # add the vert to the local verts
if verts_bw:
verts_bw_split.append(verts_bw[i]) # add the vertex weight
face_vert_loc_indices[enum] = map_index # remap to the local index
matname = face[4]
if matname and matname not in unique_materials_split:
unique_materials_split[matname] = unique_materials[matname]
faces_split.append(face)
# remove one of the items and reorder
return [(verts_split, faces_split, unique_materials_split, key_to_name(key), bool(use_vnor), bool(use_vtex), bool(use_vcol), verts_bw_split)
for key, (verts_split, faces_split, unique_materials_split, _, use_vnor, use_vtex, use_vcol, verts_bw_split)
in face_split_dict.items()]
def create_mesh(new_objects,
use_edges,
verts_loc,
verts_nor,
verts_tex,
verts_col,
faces,
unique_materials,
unique_material_images,
unique_smooth_groups,
vertex_groups,
dataname,
verts_bw,
new_armatures,
bone_names
):
"""
Takes all the data gathered and generates a mesh, adding the new object to new_objects
deals with ngons, sharp edges and assigning materials
"""
if unique_smooth_groups:
sharp_edges = set()
smooth_group_users = {context_smooth_group: {} for context_smooth_group in unique_smooth_groups.keys()}
context_smooth_group_old = -1
fgon_edges = set() # Used for storing fgon keys when we need to tesselate/untesselate them (ngons with hole).
edges = []
tot_loops = 0
context_object = None
# reverse loop through face indices
for f_idx in range(len(faces) - 1, -1, -1):
(face_vert_loc_indices,
face_vert_nor_indices,
face_vert_tex_indices,
face_vert_col_indices,
context_material,
context_smooth_group,
context_object,
face_invalid_blenpoly,
) = faces[f_idx]
len_face_vert_loc_indices = len(face_vert_loc_indices)
if len_face_vert_loc_indices == 1:
faces.pop(f_idx) # cant add single vert faces
# Face with a single item in face_vert_nor_indices is actually a polyline!
elif len(face_vert_nor_indices) == 1 or len_face_vert_loc_indices == 2:
if use_edges:
edges.extend((face_vert_loc_indices[i], face_vert_loc_indices[i + 1])
for i in range(len_face_vert_loc_indices - 1))
faces.pop(f_idx)
else:
# Smooth Group
if unique_smooth_groups and context_smooth_group:
# Is a part of of a smooth group and is a face
if context_smooth_group_old is not context_smooth_group:
edge_dict = smooth_group_users[context_smooth_group]
context_smooth_group_old = context_smooth_group
prev_vidx = face_vert_loc_indices[-1]
for vidx in face_vert_loc_indices:
edge_key = (prev_vidx, vidx) if (prev_vidx < vidx) else (vidx, prev_vidx)
prev_vidx = vidx
edge_dict[edge_key] = edge_dict.get(edge_key, 0) + 1
# NGons into triangles
if face_invalid_blenpoly:
# ignore triangles with invalid indices
if len(face_vert_loc_indices) > 3:
from bpy_extras.mesh_utils import ngon_tessellate
ngon_face_indices = ngon_tessellate(verts_loc, face_vert_loc_indices)
faces.extend([(
[face_vert_loc_indices[ngon[0]],
face_vert_loc_indices[ngon[1]],
face_vert_loc_indices[ngon[2]],
],
[face_vert_nor_indices[ngon[0]],
face_vert_nor_indices[ngon[1]],
face_vert_nor_indices[ngon[2]],
] if face_vert_nor_indices else [],
[face_vert_tex_indices[ngon[0]],
face_vert_tex_indices[ngon[1]],
face_vert_tex_indices[ngon[2]],
] if face_vert_tex_indices else [],
[face_vert_col_indices[ngon[0]],
face_vert_col_indices[ngon[1]],
face_vert_col_indices[ngon[2]],
] if face_vert_col_indices else [],
context_material,
context_smooth_group,
context_object,
[],
)
for ngon in ngon_face_indices]
)
tot_loops += 3 * len(ngon_face_indices)
# edges to make ngons
if len(ngon_face_indices) > 1:
edge_users = set()
for ngon in ngon_face_indices:
prev_vidx = face_vert_loc_indices[ngon[-1]]
for ngidx in ngon:
vidx = face_vert_loc_indices[ngidx]
if vidx == prev_vidx:
continue # broken OBJ... Just skip.
edge_key = (prev_vidx, vidx) if (prev_vidx < vidx) else (vidx, prev_vidx)
prev_vidx = vidx
if edge_key in edge_users:
fgon_edges.add(edge_key)
else:
edge_users.add(edge_key)
faces.pop(f_idx)
else:
tot_loops += len_face_vert_loc_indices
# Build sharp edges
if unique_smooth_groups:
for edge_dict in smooth_group_users.values():
for key, users in edge_dict.items():
if users == 1: # This edge is on the boundry of a group
sharp_edges.add(key)
# map the material names to an index
material_mapping = {name: i for i, name in enumerate(unique_materials)} # enumerate over unique_materials keys()
materials = [None] * len(unique_materials)
for name, index in material_mapping.items():
materials[index] = unique_materials[name]
me = bpy.data.meshes.new(dataname)
# make sure the list isnt too big
for material in materials:
me.materials.append(material)
me.vertices.add(len(verts_loc))
me.loops.add(tot_loops)
me.polygons.add(len(faces))
# verts_loc is a list of (x, y, z) tuples
me.vertices.foreach_set("co", unpack_list(verts_loc))
loops_vert_idx = []
faces_loop_start = []
faces_loop_total = []
lidx = 0
for f in faces:
vidx = f[0]
nbr_vidx = len(vidx)
loops_vert_idx.extend(vidx)
faces_loop_start.append(lidx)
faces_loop_total.append(nbr_vidx)
lidx += nbr_vidx
me.loops.foreach_set("vertex_index", loops_vert_idx)
me.polygons.foreach_set("loop_start", faces_loop_start)
me.polygons.foreach_set("loop_total", faces_loop_total)
if verts_nor and me.loops:
# Note: we store 'temp' normals in loops, since validate() may alter final mesh,
# we can only set custom lnors *after* calling it.
me.create_normals_split()
if verts_tex and me.polygons:
me.uv_layers.new()
if verts_col and me.polygons:
me.vertex_colors.new()
context_material_old = -1 # avoid a dict lookup
mat = 0 # rare case it may be un-initialized.
for i, (face, blen_poly) in enumerate(zip(faces, me.polygons)):
if len(face[0]) < 3:
raise Exception("bad face") # Shall not happen, we got rid of those earlier!
(face_vert_loc_indices,
face_vert_nor_indices,
face_vert_tex_indices,
face_vert_col_indices,
context_material,
context_smooth_group,
context_object,
face_invalid_blenpoly,
) = face
if context_smooth_group:
blen_poly.use_smooth = True
if context_material:
if context_material_old is not context_material:
mat = material_mapping[context_material]
context_material_old = context_material
blen_poly.material_index = mat
if verts_nor and face_vert_nor_indices:
for face_noidx, lidx in zip(face_vert_nor_indices, blen_poly.loop_indices):
me.loops[lidx].normal[:] = verts_nor[0 if (face_noidx is ...) else face_noidx]
if verts_col and face_vert_col_indices:
for face_colidx, lidx in zip(face_vert_col_indices, blen_poly.loop_indices):
me.vertex_colors[0].data[lidx].color[:] = verts_col[0 if (face_colidx is ...) else face_colidx][:3]
if verts_tex and face_vert_tex_indices:
if context_material:
image = unique_material_images[context_material]
if image: # Can be none if the material dosnt have an image.
me.uv_textures[0].data[i].image = image
blen_uvs = me.uv_layers[0]
for face_uvidx, lidx in zip(face_vert_tex_indices, blen_poly.loop_indices):
blen_uvs.data[lidx].uv = verts_tex[0 if (face_uvidx is ...) else face_uvidx]
use_edges = use_edges and bool(edges)
if use_edges:
me.edges.add(len(edges))
# edges should be a list of (a, b) tuples
me.edges.foreach_set("vertices", unpack_list(edges))
me.validate(clean_customdata=False) # *Very* important to not remove lnors here!
me.update(calc_edges=use_edges)
# Un-tessellate as much as possible, in case we had to triangulate some ngons...
if fgon_edges:
import bmesh
bm = bmesh.new()
bm.from_mesh(me)
verts = bm.verts[:]
get = bm.edges.get
edges = [get((verts[vidx1], verts[vidx2])) for vidx1, vidx2 in fgon_edges]
try:
bmesh.ops.dissolve_edges(bm, edges=edges, use_verts=False)
except:
# Possible dissolve fails for some edges, but don't fail silently in case this is a real bug.
import traceback
traceback.print_exc()
bm.to_mesh(me)
bm.free()
# XXX If validate changes the geometry, this is likely to be broken...
if unique_smooth_groups and sharp_edges:
for e in me.edges:
if e.key in sharp_edges:
e.use_edge_sharp = True
me.show_edge_sharp = True
if verts_nor:
clnors = array.array('f', [0.0] * (len(me.loops) * 3))
me.loops.foreach_get("normal", clnors)
if not unique_smooth_groups:
me.polygons.foreach_set("use_smooth", [True] * len(me.polygons))
me.normals_split_custom_set(tuple(zip(*(iter(clnors),) * 3)))
me.use_auto_smooth = True
me.show_edge_sharp = True
ob = bpy.data.objects.new(me.name, me)
armature_ob = None
if new_armatures:
armature_ob = new_armatures[0]
if armature_ob:
# Assingn vertex weights
mod = ob.modifiers.new(type="ARMATURE", name="Armature")
mod.use_vertex_groups = True
mod.object = armature_ob
parent_armature = True
if parent_armature:
ob.parent = armature_ob
for vert_id, bws in enumerate(verts_bw):
for bw in bws:
bone_idx, bone_weight = bw
# print('----')
# print('bone_idx', bone_idx)
# print('bone_names', bone_names)
bone_name = bone_names[bone_idx].decode('utf-8', "replace")
if bone_weight == 0.0 or bone_name == 'root groud':
continue
if bone_name:
vert_group = ob.vertex_groups.get(bone_name)
if not vert_group:
vert_group = ob.vertex_groups.new(bone_name)
vert_group.add([vert_id], bone_weight, 'REPLACE')
new_objects.append(ob)
# Create the vertex groups. No need to have the flag passed here since we test for the
# content of the vertex_groups. If the user selects to NOT have vertex groups saved then
# the following test will never run
for group_name, group_indices in vertex_groups.items():
group = ob.vertex_groups.new(group_name.decode('utf-8', "replace"))
group.add(group_indices, 1.0, 'REPLACE')
def create_nurbs(context_nurbs, vert_loc, new_objects):
"""
Add nurbs object to blender, only support one type at the moment
"""
deg = context_nurbs.get(b'deg', (3,))
curv_range = context_nurbs.get(b'curv_range')
curv_idx = context_nurbs.get(b'curv_idx', [])
parm_u = context_nurbs.get(b'parm_u', [])
parm_v = context_nurbs.get(b'parm_v', [])
name = context_nurbs.get(b'name', b'ObjNurb')
cstype = context_nurbs.get(b'cstype')
if cstype is None:
print('\tWarning, cstype not found')
return
if cstype != b'bspline':
print('\tWarning, cstype is not supported (only bspline)')
return
if not curv_idx:
print('\tWarning, curv argument empty or not set')
return
if len(deg) > 1 or parm_v:
print('\tWarning, surfaces not supported')
return
cu = bpy.data.curves.new(name.decode('utf-8', "replace"), 'CURVE')
cu.dimensions = '3D'
nu = cu.splines.new('NURBS')
nu.points.add(len(curv_idx) - 1) # a point is added to start with
nu.points.foreach_set("co", [co_axis for vt_idx in curv_idx for co_axis in (vert_loc[vt_idx] + (1.0,))])
nu.order_u = deg[0] + 1
# get for endpoint flag from the weighting
if curv_range and len(parm_u) > deg[0] + 1:
do_endpoints = True
for i in range(deg[0] + 1):
if abs(parm_u[i] - curv_range[0]) > 0.0001:
do_endpoints = False
break
if abs(parm_u[-(i + 1)] - curv_range[1]) > 0.0001:
do_endpoints = False
break
else:
do_endpoints = False
if do_endpoints:
nu.use_endpoint_u = True
# close
'''
do_closed = False
if len(parm_u) > deg[0]+1:
for i in xrange(deg[0]+1):
# print curv_idx[i], curv_idx[-(i+1)]
if curv_idx[i]==curv_idx[-(i+1)]:
do_closed = True
break
if do_closed:
nu.use_cyclic_u = True
'''
ob = bpy.data.objects.new(name.decode('utf-8', "replace"), cu)
new_objects.append(ob)
def strip_slash(line_split):
if line_split[-1][-1] == 92: # '\' char
if len(line_split[-1]) == 1:
line_split.pop() # remove the \ item
else:
line_split[-1] = line_split[-1][:-1] # remove the \ from the end last number
return True
return False
def get_float_func(filepath):
"""
find the float function for this obj file
- whether to replace commas or not
"""
file = open(filepath, 'rb')
for line in file: # .readlines():
line = line.lstrip()
if line.startswith(b'v'): # vn vt v
if b',' in line:
file.close()
return lambda f: float(f.replace(b',', b'.'))
elif b'.' in line:
file.close()
return float
file.close()
# in case all vert values were ints
return float
def load(context,
filepath,
*,
global_clamp_size=0.0,
use_smooth_groups=True,
use_edges=True,
use_split_objects=True,
use_split_groups=True,
use_image_search=True,
use_groups_as_vgroups=False,
relpath=None,
global_matrix=None
):
"""
Called by the user interface or another script.
load_obj(path) - should give acceptable results.
This function passes the file and sends the data off
to be split into objects and then converted into mesh objects
"""
def handle_vec(line_start, context_multi_line, line_split, tag, data, vec, vec_len):
ret_context_multi_line = tag if strip_slash(line_split) else b''
if line_start == tag:
vec[:] = [float_func(v) for v in line_split[1:]]
elif context_multi_line == tag:
vec += [float_func(v) for v in line_split]
if not ret_context_multi_line:
data.append(tuple(vec[:vec_len]))
return ret_context_multi_line
def handle_bw_vec(line_start, context_multi_line, line_split, line, tag, data, vec, vec_len):
str_line = [line]
ret_context_multi_line = tag if strip_slash(str_line) else b''
if line_start == tag:
vec[:] = str_line
elif context_multi_line == tag:
vec[:] = [vec[0] + str_line[0]]
if not ret_context_multi_line:
str_vec = b''.join(vec)
str_str = str_vec.decode("utf-8", "ignore")
str_data = str_str.split(' ', 1)[1]
data.append(ast.literal_eval(str_data)[:vec_len])
return ret_context_multi_line
def create_face(context_material, context_smooth_group, context_object):
face_vert_loc_indices = []
face_vert_nor_indices = []
face_vert_tex_indices = []
face_vert_col_indices = []
return (
face_vert_loc_indices, # face item 0
face_vert_nor_indices, # face item 1
face_vert_tex_indices, # face item 2
face_vert_col_indices, # face item 3
context_material, # face item 4
context_smooth_group, # face item 5
context_object, # face item 6
[], # If non-empty, that face is a Blender-invalid ngon (holes...), need a mutable object for that...
)
with ProgressReport(context.window_manager) as progress:
progress.enter_substeps(1, "Importing OBJ %r..." % filepath)
if global_matrix is None:
global_matrix = mathutils.Matrix()
if use_split_objects or use_split_groups:
use_groups_as_vgroups = False
verts_loc = []
verts_nor = []
verts_tex = []
verts_col = []
verts_bw = []
faces = [] # tuples of the faces
material_libs = set() # filenames to material libs this OBJ uses
armature_libs = set() # filenames to armature libs this OBJ uses
vertex_groups = {} # when use_groups_as_vgroups is true
# Get the string to float conversion func for this file- is 'float' for almost all files.
float_func = get_float_func(filepath)
# Context variables
context_material = None
context_smooth_group = None
context_object = None
context_vgroup = None
# Nurbs
context_nurbs = {}
nurbs = []
context_parm = b'' # used by nurbs too but could be used elsewhere
# Until we can use sets
unique_materials = {}
unique_material_images = {}
unique_smooth_groups = {}
# unique_obects= {} - no use for this variable since the objects are stored in the face.
# when there are faces that end with \
# it means they are multiline-
# since we use xreadline we cant skip to the next line
# so we need to know whether
context_multi_line = b''
# Per-face handling data.
face_vert_loc_indices = None
face_vert_nor_indices = None
face_vert_tex_indices = None
face_vert_col_indices = None
face_vert_nor_valid = face_vert_tex_valid = face_vert_col_valid = False
face_items_usage = set()
face_invalid_blenpoly = None
prev_vidx = None
face = None
vec = []
progress.enter_substeps(3, "Parsing OBJ file...")
with open(filepath, 'rb') as f:
for line in f: # .readlines():
line_split = line.split()
if not line_split:
continue
line_start = line_split[0] # we compare with this a _lot_
if line_start == b'v' or context_multi_line == b'v':
context_multi_line = handle_vec(line_start, context_multi_line, line_split, b'v', verts_loc, vec, 3)
elif line_start == b'vn' or context_multi_line == b'vn':
context_multi_line = handle_vec(line_start, context_multi_line, line_split, b'vn', verts_nor, vec, 3)
elif line_start == b'vt' or context_multi_line == b'vt':
context_multi_line = handle_vec(line_start, context_multi_line, line_split, b'vt', verts_tex, vec, 2)
elif line_start == b'vc' or context_multi_line == b'vc':
context_multi_line = handle_vec(line_start, context_multi_line, line_split, b'vc', verts_col, vec, 4)
elif line_start == b'bw' or context_multi_line == b'bw':
context_multi_line = handle_bw_vec(line_start, context_multi_line, line_split, line, b'bw', verts_bw, vec, 4)
# Handle faces lines (as faces) and the second+ lines of fa multiline face here
# use 'f' not 'f ' because some objs (very rare have 'fo ' for faces)
elif line_start == b'f' or context_multi_line == b'f':
if not context_multi_line:
line_split = line_split[1:]
# Instantiate a face
face = create_face(context_material, context_smooth_group, context_object)
(face_vert_loc_indices, face_vert_nor_indices, face_vert_tex_indices, face_vert_col_indices,
_1, _2, _3, face_invalid_blenpoly) = face
faces.append(face)
face_items_usage.clear()
# Else, use face_vert_loc_indices and face_vert_tex_indices and face_vert_col_indices previously defined and used the obj_face
context_multi_line = b'f' if strip_slash(line_split) else b''
for v in line_split:
obj_vert = v.split(b'/')
# obj_vert[0] coordinate index
# obj_vert[1] texture mapping index
# obj_vert[2] normal index
# obj_vert[3] color index
idx = int(obj_vert[0]) - 1
vert_loc_index = (idx + len(verts_loc) + 1) if (idx < 0) else idx
# Add the vertex to the current group
# *warning*, this wont work for files that have groups defined around verts
if use_groups_as_vgroups and context_vgroup:
vertex_groups[context_vgroup].append(vert_loc_index)
# This a first round to quick-detect ngons that *may* use a same edge more than once.
# Potential candidate will be re-checked once we have done parsing the whole face.
if not face_invalid_blenpoly:
# If we use more than once a same vertex, invalid ngon is suspected.
if vert_loc_index in face_items_usage:
face_invalid_blenpoly.append(True)
else:
face_items_usage.add(vert_loc_index)
face_vert_loc_indices.append(vert_loc_index)
# formatting for faces with normals and textures and vert color is
# loc_index/tex_index/nor_index/vcol_index
if len(obj_vert) > 1 and obj_vert[1] and obj_vert[1] != b'0':
idx = int(obj_vert[1]) - 1
face_vert_tex_indices.append((idx + len(verts_tex) + 1) if (idx < 0) else idx)
face_vert_tex_valid = True
else:
face_vert_tex_indices.append(...)
if len(obj_vert) > 2 and obj_vert[2] and obj_vert[2] != b'0':
idx = int(obj_vert[2]) - 1
face_vert_nor_indices.append((idx + len(verts_nor) + 1) if (idx < 0) else idx)
face_vert_nor_valid = True
else:
face_vert_nor_indices.append(...)
if len(obj_vert) > 3 and obj_vert[3] and obj_vert[3] != b'0':
idx = int(obj_vert[3]) - 1
face_vert_col_indices.append((idx + len(verts_col) + 1) if (idx < 0) else idx)
face_vert_col_valid = True
else:
face_vert_col_indices.append(...)
if not context_multi_line:
# Clear nor/tex indices in case we had none defined for this face.
if not face_vert_nor_valid:
face_vert_nor_indices.clear()
if not face_vert_tex_valid:
face_vert_tex_indices.clear()
if not face_vert_col_valid:
face_vert_col_indices.clear()
face_vert_nor_valid = face_vert_tex_valid = face_vert_col_valid = False
# Means we have finished a face, we have to do final check if ngon is suspected to be blender-invalid...
if face_invalid_blenpoly:
face_invalid_blenpoly.clear()
face_items_usage.clear()
prev_vidx = face_vert_loc_indices[-1]
for vidx in face_vert_loc_indices:
edge_key = (prev_vidx, vidx) if (prev_vidx < vidx) else (vidx, prev_vidx)
if edge_key in face_items_usage:
face_invalid_blenpoly.append(True)
break
face_items_usage.add(edge_key)
prev_vidx = vidx
elif use_edges and (line_start == b'l' or context_multi_line == b'l'):
# very similar to the face load function above with some parts removed
if not context_multi_line:
line_split = line_split[1:]
# Instantiate a face
face = create_face(context_material, context_smooth_group, context_object)
face_vert_loc_indices = face[0]
# XXX A bit hackish, we use special 'value' of face_vert_nor_indices (a single True item) to tag this
# as a polyline, and not a regular face...
face[1][:] = [True]
faces.append(face)
# Else, use face_vert_loc_indices previously defined and used the obj_face
context_multi_line = b'l' if strip_slash(line_split) else b''
for v in line_split:
obj_vert = v.split(b'/')
idx = int(obj_vert[0]) - 1
face_vert_loc_indices.append((idx + len(verts_loc) + 1) if (idx < 0) else idx)
elif line_start == b's':
if use_smooth_groups:
context_smooth_group = line_value(line_split)
if context_smooth_group == b'off':
context_smooth_group = None
elif context_smooth_group: # is not None
unique_smooth_groups[context_smooth_group] = None
elif line_start == b'o':
if use_split_objects:
context_object = line_value(line_split)
# unique_obects[context_object]= None
elif line_start == b'g':
if use_split_groups:
context_object = line_value(line.split())
# print 'context_object', context_object
# unique_obects[context_object]= None
elif use_groups_as_vgroups:
context_vgroup = line_value(line.split())
if context_vgroup and context_vgroup != b'(null)':
vertex_groups.setdefault(context_vgroup, [])
else:
context_vgroup = None # dont assign a vgroup
elif line_start == b'usemtl':
context_material = line_value(line.split())
unique_materials[context_material] = None
elif line_start == b'mtllib': # usemap or usemat
# can have multiple mtllib filenames per line, mtllib can appear more than once,
# so make sure only occurrence of material exists
material_libs |= {os.fsdecode(f) for f in line.split()[1:]}
elif line_start == b'arllib': # armature
# can have multiple arllib filenames per line, arllib can appear more than once
armature_libs |= {os.fsdecode(f) for f in line.split()[1:]}
# Nurbs support
elif line_start == b'cstype':
context_nurbs[b'cstype'] = line_value(line.split()) # 'rat bspline' / 'bspline'
elif line_start == b'curv' or context_multi_line == b'curv':
curv_idx = context_nurbs[b'curv_idx'] = context_nurbs.get(b'curv_idx', []) # in case were multiline
if not context_multi_line:
context_nurbs[b'curv_range'] = float_func(line_split[1]), float_func(line_split[2])
line_split[0:3] = [] # remove first 3 items
if strip_slash(line_split):
context_multi_line = b'curv'
else:
context_multi_line = b''
for i in line_split:
vert_loc_index = int(i) - 1
if vert_loc_index < 0:
vert_loc_index = len(verts_loc) + vert_loc_index + 1
curv_idx.append(vert_loc_index)
elif line_start == b'parm' or context_multi_line == b'parm':
if context_multi_line:
context_multi_line = b''
else:
context_parm = line_split[1]
line_split[0:2] = [] # remove first 2
if strip_slash(line_split):
context_multi_line = b'parm'
else:
context_multi_line = b''
if context_parm.lower() == b'u':
context_nurbs.setdefault(b'parm_u', []).extend([float_func(f) for f in line_split])
elif context_parm.lower() == b'v': # surfaces not supported yet
context_nurbs.setdefault(b'parm_v', []).extend([float_func(f) for f in line_split])
# else: # may want to support other parm's ?
elif line_start == b'deg':
context_nurbs[b'deg'] = [int(i) for i in line.split()[1:]]
elif line_start == b'end':
# Add the nurbs curve
if context_object:
context_nurbs[b'name'] = context_object
nurbs.append(context_nurbs)
context_nurbs = {}
context_parm = b''
''' # How to use usemap? depricated?
elif line_start == b'usema': # usemap or usemat
context_image= line_value(line_split)
'''
progress.step("Done, loading materials and images...")
create_materials(filepath, relpath, material_libs, unique_materials,
unique_material_images, use_image_search, float_func)
progress.step("Done, building geometries (verts:%i faces:%i materials: %i smoothgroups:%i) ..." %
(len(verts_loc), len(faces), len(unique_materials), len(unique_smooth_groups)))
# deselect all
if bpy.ops.object.select_all.poll():
bpy.ops.object.select_all(action='DESELECT')
scene = context.scene
new_objects = [] # put new objects here
new_armatures = [] # put new armatures here
bone_names = []
create_armatures(filepath, relpath, armature_libs, unique_materials,
unique_material_images, use_image_search, float_func, new_armatures, new_objects, bone_names)
# Split the mesh by objects/materials, may
SPLIT_OB_OR_GROUP = bool(use_split_objects or use_split_groups)
for data in split_mesh(verts_loc, faces, unique_materials, filepath, SPLIT_OB_OR_GROUP, verts_bw):
verts_loc_split, faces_split, unique_materials_split, dataname, use_vnor, use_vtex, use_vcol, verts_bw_split = data
# Create meshes from the data, warning 'vertex_groups' wont support splitting
# ~ print(dataname, use_vnor, use_vtex, use_vcol)
create_mesh(new_objects,
use_edges,
verts_loc_split,
verts_nor if use_vnor else [],
verts_tex if use_vtex else [],
verts_col if use_vcol else [],
faces_split,
unique_materials_split,
unique_material_images,
unique_smooth_groups,
vertex_groups,
dataname,
verts_bw_split,
new_armatures,
bone_names,
)
# nurbs support
for context_nurbs in nurbs:
create_nurbs(context_nurbs, verts_loc, new_objects)
for obj in new_armatures:
obj.select_set(state=True)
# we could apply this anywhere before scaling.
# Child object inherit world_matrix, so only apply it to the parent
parent_obj = obj
while parent_obj.parent is not None:
parent_obj = parent_obj.parent
parent_obj.matrix_world = global_matrix
# Create new obj
for obj in new_objects:
base = scene.objects.link(obj)
base.select_set(state=True)
# we could apply this anywhere before scaling.
# Child object inherit world_matrix, so only apply it to the parent
parent_obj = obj
while parent_obj.parent is not None:
parent_obj = parent_obj.parent
parent_obj.matrix_world = global_matrix
scene.update()
axis_min = [1000000000] * 3
axis_max = [-1000000000] * 3
if global_clamp_size:
# Get all object bounds
for ob in new_objects:
for v in ob.bound_box:
for axis, value in enumerate(v):
if axis_min[axis] > value:
axis_min[axis] = value
if axis_max[axis] < value:
axis_max[axis] = value
# Scale objects
max_axis = max(axis_max[0] - axis_min[0], axis_max[1] - axis_min[1], axis_max[2] - axis_min[2])
scale = 1.0
while global_clamp_size < max_axis * scale:
scale = scale / 10.0
for obj in new_objects:
obj.scale = scale, scale, scale
progress.leave_substeps("Done.")
progress.leave_substeps("Finished importing: %r" % filepath)
return {'FINISHED'}
|
feureau/Small-Scripts
|
Blender/Blender config/2.91/scripts/addons/XNALaraMesh/import_obj.py
|
Python
|
gpl-3.0
| 62,932
|
"""
Django settings for skillserve project.
Generated by 'django-admin startproject' using Django 1.11.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
import dj_database_url
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
PROJECT_ROOT = os.path.dirname(os.path.abspath(__file__))
BASE_DIR = os.path.dirname(PROJECT_ROOT)
def convert_bool(value, default=False):
return value if isinstance(value, bool) else {
'': default,
'false': False, 'true': True,
'no': False, 'yes': True,
'off': False, 'on': True,
'0': False, '1': True,
}[value.lower()]
def environ_get_bool(key, *, default):
return convert_bool(os.environ.get(key, default), default)
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = environ_get_bool('DEBUG', default=True)
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = (
os.environ.get('SECRET_KEY', '-8idz@b=osbpt6n^3d%x*z1207u0@!+q8_+atlk@fl!(=b#_^q')
if DEBUG else
os.environ['SECRET_KEY']
)
ALLOWED_HOSTS = os.environ.get('ALLOWED_HOSTS', '').split()
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'nested_inline',
'rest_framework',
'skillboards.apps.SkillboardsConfig',
'skillstatic.apps.SkillstaticConfig',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'skillserve.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'skillserve.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Update database configuration with $DATABASE_URL.
db_from_env = dj_database_url.config()
DATABASES['default'].update(db_from_env)
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
SECURE_SSL_REDIRECT = environ_get_bool('REDIRECT_SSL', default=False)
CSRF_COOKIE_SECURE = not DEBUG
SESSION_COOKIE_SECURE = not DEBUG
CONN_MAX_AGE = 60
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_ROOT = os.path.join(PROJECT_ROOT, 'staticfiles')
STATIC_URL = '/static/'
WEBPACK_OUTPUT_DIR = os.environ.get(
'WEBPACK_OUTPUT_DIR',
os.path.join(BASE_DIR, 'frontend-dist'))
# Extra places for collectstatic to find static files.
STATICFILES_DIRS = [
WEBPACK_OUTPUT_DIR
]
|
Lucretiel/SkillServe
|
skillserve/settings.py
|
Python
|
gpl-3.0
| 4,428
|
#!/usr/bin/python
# coding=UTF-8
import smbus
import time
import datetime
import feedparser
import sys
# OpenWeatherMap key...
weatherKey=''
import pyowm
owm = pyowm.OWM(weatherKey)
# Define some device parameters
I2C_ADDR = 0x27 # I2C device address
LCD_WIDTH = 20 # Maximum characters per line
# Define some device constants
LCD_CHR = 1 # Mode - Sending data
LCD_CMD = 0 # Mode - Sending command
LCD_LINE_1 = 0x80 # LCD RAM address for the 1st line
LCD_LINE_2 = 0xC0 # LCD RAM address for the 2nd line
LCD_LINE_3 = 0x94 # LCD RAM address for the 3rd line
LCD_LINE_4 = 0xD4 # LCD RAM address for the 4th line
LCD_BACKLIGHT = 0x08 # On
#LCD_BACKLIGHT = 0x00 # Off
ENABLE = 0b00000100 # Enable bit
# Timing constants
E_PULSE = 0.0005
E_DELAY = 0.0005
#Open I2C interface
#bus = smbus.SMBus(0) # Rev 1 Pi uses 0
bus = smbus.SMBus(1) # Rev 2 Pi uses 1
def lcd_init():
# Initialise display
lcd_byte(0x33,LCD_CMD) # 110011 Initialise
lcd_byte(0x32,LCD_CMD) # 110010 Initialise
lcd_byte(0x06,LCD_CMD) # 000110 Cursor move direction
lcd_byte(0x0C,LCD_CMD) # 001100 Display On,Cursor Off, Blink Off
lcd_byte(0x28,LCD_CMD) # 101000 Data length, number of lines, font size
lcd_byte(0x01,LCD_CMD) # 000001 Clear display
time.sleep(E_DELAY)
def lcd_byte(bits, mode):
# Send byte to data pins
# bits = the data
# mode = 1 for data
# 0 for command
bits_high = mode | (bits & 0xF0) | LCD_BACKLIGHT
bits_low = mode | ((bits<<4) & 0xF0) | LCD_BACKLIGHT
# High bits
bus.write_byte(I2C_ADDR, bits_high)
lcd_toggle_enable(bits_high)
# Low bits
bus.write_byte(I2C_ADDR, bits_low)
lcd_toggle_enable(bits_low)
def lcd_toggle_enable(bits):
# Toggle enable
time.sleep(E_DELAY)
bus.write_byte(I2C_ADDR, (bits | ENABLE))
time.sleep(E_PULSE)
bus.write_byte(I2C_ADDR,(bits & ~ENABLE))
time.sleep(E_DELAY)
def lcd_string(message,line):
# Send string to display
message = message.ljust(LCD_WIDTH," ")
lcd_byte(line, LCD_CMD)
for i in range(LCD_WIDTH):
lcd_byte(ord(message[i]),LCD_CHR)
def left(s, amount):
return s[:amount]
def right(s, amount):
return s[-amount:]
def scrollIt(theText,lineNum):
# Scrolls text on the LCD display.
while theText!="":
lcd_string(theText, LCD_LINE_4)
theText = theText[1:]
time.sleep(0.1)
lcd_string("", LCD_LINE_4)
def main():
# Initialise display
lcd_init()
whichHeadline = 0
while True:
theTime = str(datetime.datetime.now().time())
theDate = time.strftime("%d/%m/%Y")
lcd_string(" " + theTime[0:5] + ", " + theDate,LCD_LINE_1)
try:
observation = owm.weather_at_place('Market Deeping,uk')
w = observation.get_weather()
weather = w.get_detailed_status()
if len(weather) > 20:
weather = w.get_status()
weather = weather[0].upper() + weather[1:]
wLength=len(weather)
wLength = (20 - wLength) // 2
weather = (' ' * wLength) + weather
currTemp = w.get_temperature(unit='celsius')
humid = w.get_humidity()
wind = w.get_wind()
windspeed = int(wind['speed'])
sunset = w.get_sunset_time('iso')
sunset = right(sunset,11)
sunset = left(sunset,8)
cloud = w.get_clouds()
lcd_string(weather,LCD_LINE_2)
lcd_string("T:" + str(int(currTemp['temp'])) + "C, " + "W:" + str(windspeed) + "m/s, C:" + str(cloud) + "%",LCD_LINE_3)
# lcd_string("Sunset: " + sunset,LCD_LINE_4)
# Display top 5 headlines from BBC RSS feed.
d = feedparser.parse('http://feeds.bbci.co.uk/news/rss.xml?edition=uk')
headline= d['entries'][whichHeadline]['title']
whichHeadline += 1
if whichHeadline > 4:
whichHeadline = 0
scrollIt("BBC News headlines: " + headline,4)
lcd_string("BBC News headlines:",LCD_LINE_4)
time.sleep(5)
except:
print "Unexpected error:", sys.exc_info()[0]
lcd_string("Weather unavailable",LCD_LINE_3)
lcd_string("",LCD_LINE_4)
time.sleep(5)
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
pass
finally:
lcd_byte(0x01, LCD_CMD)
|
stsb11/SB-Pi-TFT
|
lcd_i2c.py
|
Python
|
gpl-3.0
| 4,163
|
"""
handhRL
Hulks and Horrors: The Roguelike
Copyright 2014 by John S. Berry III
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import math
import textwrap
import shelve
import os
import operator
import random
import hhmessage
import libtcodpy as libtcod
import hhtable
SCREEN_WIDTH = 80
SCREEN_HEIGHT = 50
MAP_WIDTH = 80
MAP_HEIGHT = 43
LIMIT_FPS = 20
ROOM_MAX_SIZE = 10
ROOM_MIN_SIZE = 6
MAX_ROOMS = 30
FOV_ALGO = 0
FOV_LIGHT_WALLS = True
TORCH_RADIUS = 10
BAR_WIDTH = 20
PANEL_HEIGHT = 7
PANEL_Y = SCREEN_HEIGHT - PANEL_HEIGHT
MSG_X = BAR_WIDTH + 2
MSG_WIDTH = SCREEN_WIDTH - BAR_WIDTH - 2
MSG_HEIGHT = PANEL_HEIGHT - 1
INVENTORY_WIDTH = 50
HEAL_AMOUNT = [1, 4]
LIGHTNING_DAMAGE = [2, 12]
LIGHTNING_RANGE = 5
CONFUSE_NUM_TURNS = 10
CONFUSE_RANGE = 8
FIREBALL_DAMAGE = [1, 6]
FIREBALL_RADIUS = 3
LEVEL_UP_BASE = 300
LEVEL_UP_FACTOR = 200
color_dark_wall = libtcod.Color(128, 128, 128)
color_light_wall = libtcod.Color(130, 110, 50)
color_dark_ground = libtcod.Color(192, 192, 192)
color_light_ground = libtcod.Color(200, 180, 50)
class Tile:
# a tile of the map and its properties
def __init__(self, blocked, block_sight=None):
self.blocked = blocked
# all tiles start unexplored
self.explored = False
# by default, if a tile is blocked, it also blocks sight
if block_sight is None:
block_sight = blocked
self.block_sight = block_sight
class Rect:
# a rectangle on the map. used to characterize a room
def __init__(self, x, y, w, h):
self.x1 = x
self.y1 = y
self.x2 = x + w
self.y2 = y + h
def center(self):
center_x = (self.x1 + self.x2) / 2
center_y = (self.y1 + self.y2) / 2
return center_x, center_y
def intersect(self, other):
# return true if rectangle intersects with another one
return self.x1 <= other.x2 and self.x2 >= other.x1 and self.y1 <= other.y2 and self.y2 >= other.y1
class Object:
# this is a generic object: the player, a monster, an item, the stairs...
# it's always represented by a character on the screen.
def __init__(self, x, y, char, name, color, blocks=False, always_visible=False, fighter=None, ai=None, item=None,
equipment=None, placeable=None, seen_player=False, killed_by=None):
self.x = x
self.y = y
self.char = char
self.name = name
self.color = color
self.blocks = blocks
self.always_visible = always_visible
self.fighter = fighter
if self.fighter:
self.fighter.owner = self
self.ai = ai
if self.ai:
self.ai.owner = self
self.item = item
if self.item:
self.item.owner = self
self.equipment = equipment
if self.equipment: # let the equipment component know who owns it
self.equipment.owner = self
# there must be an item component for the equipment component to work properly
self.item = Item()
self.item.owner = self
self.placeable = placeable
if self.placeable:
self.placeable.owner = self
self.seen_player = seen_player
self.killed_by = killed_by
def move(self, dx, dy):
# move by the given amount
if not is_blocked(self.x + dx, self.y + dy):
self.x += dx
self.y += dy
def draw(self):
# set the color and then draw the character that represents this object at its position
if libtcod.map_is_in_fov(fov_map, self.x, self.y) or (self.always_visible and map[self.x][self.y].explored):
libtcod.console_set_default_foreground(con, self.color)
libtcod.console_put_char(con, self.x, self.y, self.char, libtcod.BKGND_NONE)
def clear(self):
# erase the character that represents this object
libtcod.console_put_char(con, self.x, self.y, ' ', libtcod.BKGND_NONE)
def move_towards(self, target_x, target_y):
# create and compute a path for the object to the target
path = libtcod.path_new_using_map(fov_map)
libtcod.path_compute(path, self.x, self.y, target_x, target_y)
# get the target coords of the next spot on the path
mx, my, = libtcod.path_walk(path, True)
if mx is not None:
dx = mx - self.x
dy = my - self.y
self.move(dx, dy)
libtcod.path_delete(path)
else:
libtcod.path_delete(path)
return
def distance(self, x, y):
# return the distance to some coordinates
return math.sqrt((x - self.x) ** 2 + (y - self.y) ** 2)
def distance_to(self, other):
# return the distance to another object
dx = other.x - self.x
dy = other.y - self.y
return math.sqrt(dx ** 2 + dy ** 2)
def send_to_back(self):
# make this object be drawn first, so all others appear above it if they're in the same tile
global objects
objects.remove(self)
objects.insert(0, self)
class Item:
# an item that can be picked up and used.
def __init__(self, reusable=False, uses=1, use_function=None):
self.use_function = use_function
self.reusable = reusable
self.uses = uses
if self.use_function:
self.use_function.owner = self
def use(self, *args):
# just call the use_function if it is defined
if self.use_function is None and not self.owner.equipment:
message('The ' + self.owner.name + ' cannot be used.')
elif self.owner.equipment:
# special case: if object has equipment component, the use option is to equip/dequip
self.owner.equipment.toggle_equip()
return
elif not self.reusable:
if self.use_function.use(*args) != 'cancelled':
inventory.remove(self.owner) # destroy after use unless cancelled
else:
if self.use_function.use(*args) != 'cancelled':
self.uses -= 1
if self.uses < 1:
inventory.remove(self.owner)
def pick_up(self):
# add to the player's inventory and remove from the map
if len(inventory) >= 26:
message('Your inventory is full, cannot pick up ' + self.owner.name + '.', libtcod.red)
else:
inventory.append(self.owner)
objects.remove(self.owner)
message('You picked up a ' + self.owner.name + '!', libtcod.green)
# special case: automatically equip, if corresponding slot is unused
equipment = self.owner.equipment
if equipment and get_equipped_in_slot(equipment.slot) is None:
equipment.equip()
def drop(self):
# special case: if equipped item, remove before dropping
if self.owner.equipment:
self.owner.equipment.dequip()
# add to the map and remove from inventory. also, place at player coords
objects.append(self.owner)
inventory.remove(self.owner)
self.owner.x = player.x
self.owner.y = player.y
message('You dropped a ' + self.owner.name + '.', libtcod.yellow)
class Equipment:
# an object that can be equipped, yielding bonuses. automatically adds the item component.
def __init__(self, slot, to_hit_bonus=0, damage_bonus=0, damage_roll=None, armor_bonus=0, max_hp_bonus=0,
ranged=False, ammo=None):
self.to_hit_bonus = to_hit_bonus
self.damage_bonus = damage_bonus
self.damage_roll = damage_roll
self.armor_bonus = armor_bonus
self.max_hp_bonus = max_hp_bonus
self.slot = slot
self.is_equipped = False
self.ranged = ranged
self.ammo = ammo
def toggle_equip(self): # toggle equip/dequip state
if self.is_equipped:
self.dequip()
else:
self.equip()
def equip(self):
# if the slot is already being used, dequip whatever is there first
old_equipment = get_equipped_in_slot(self.slot)
if old_equipment is not None:
old_equipment.dequip()
# equip an object and show a message about it
self.is_equipped = True
message('Equipped ' + self.owner.name + ' on ' + self.slot + '.', libtcod.light_green)
def dequip(self):
# dequip object and show a message about it.
if not self.is_equipped:
return
self.is_equipped = False
message('Dequipped ' + self.owner.name + ' from ' + self.slot + '.', libtcod.light_yellow)
class Placeable:
# a class for 'placeables', interactive world objects that may be usable.
def __init__(self, reusable=False, used=False, use_class=None):
self.reusable = reusable
self.used = used
self.use_class = use_class
if self.use_class:
self.use_class.owner = self
def use(self, *args):
# interact with the object
# just call the use_function if it is defined
if self.use_class is None:
message('The ' + self.owner.name + ' cannot be used.')
if self.used and not self.reusable:
message('You have already used that object!')
else:
if self.use_class.use(*args) != 'cancelled':
self.used = True # disable after use unless cancelled
class Fighter:
# combat-related properties and methods (monster, player, npc)
def __init__(self, hp, armor_class, to_hit, damage, damage_roll, xp, damage_resistance=0,
kills=0, death_function=None):
self.base_max_hp = hp
self.hp = hp
self.base_armor_class = armor_class
self.base_to_hit = to_hit
self.base_damage = damage
self.base_roll = damage_roll
self.xp = xp
self.damage_resistance = damage_resistance
self.kills = kills
self.death_function = death_function
@property
def to_hit(self):
bonus = sum(equipment.to_hit_bonus for equipment in get_all_equipped(self.owner))
return self.base_to_hit + bonus
@property
def armor_class(self): # return actual defense, by summing up the bonuses from all equipped items
bonus = sum(equipment.armor_bonus for equipment in get_all_equipped(self.owner))
if bonus < -12:
bonus = -12
return self.base_armor_class + bonus
@property
def damage(self): # return actual damage bonus, plus any special bonuses
bonus = sum(equipment.damage_bonus for equipment in get_all_equipped(self.owner))
return self.base_damage + bonus
@property
def damage_roll(self): # return current damage roll or roll from equipment
for equipment in get_all_equipped(self.owner):
if equipment.damage_roll:
return equipment.damage_roll
return self.base_roll
@property
def max_hp(self): # return actual max_hp, by summing up the bonuses from all equipped items
bonus = sum(equipment.max_hp_bonus for equipment in get_all_equipped(self.owner))
return self.base_max_hp + bonus
def take_damage(self, damage, killed_by):
# apply damage if possible
if damage > 0:
self.hp -= damage
# check for death. if there's a death function, call it, and update 'killed_by' to name of attacker
if self.hp <= 0:
function = self.death_function
if function is not None:
self.owner.killed_by = killed_by
function(self.owner)
if self.owner != player: # yield xp to player
player.fighter.xp += self.xp
player.fighter.kills += 1
def heal(self, amount):
# heal by the given amount, without going over max_hp
self.hp += amount
if self.hp > self.max_hp:
self.hp = self.max_hp
def attack(self, target):
# first check for to hit target, capped at 2 to 20
to_hit_target = self.to_hit + target.fighter.armor_class + 5
if to_hit_target > 20:
to_hit_target = 20
elif to_hit_target == 1:
to_hit_target = 2
# check of the target is attacking with a gun
has_gun = False
for i in get_all_equipped(self.owner):
if i.is_equipped and i.ranged:
has_gun = True
gun = i
# check if gun has ammo
if has_gun:
if gun.ammo > 0:
gun.ammo -= 1
else:
message("You don't have any ammo!")
return
# use the right pronoun
if target.ai is not None:
pronoun = 'the '
else:
pronoun = ''
# roll to hit
if hhtable.rolldice(1, 20) >= to_hit_target:
message(self.owner.name.title() + ' misses ' + pronoun + target.name + '.')
return
# now roll for damage (curr. using OD&D style)
damage = (hhtable.rolldice(*self.damage_roll) + self.damage) - target.fighter.damage_resistance
if damage > 0:
# make the target take some damage
message(self.owner.name.title() + ' hits ' + pronoun + target.name + ' for ' + str(damage) + ' hit points.',
libtcod.yellow)
target.fighter.take_damage(damage, self.owner.name)
else:
message(self.owner.name.title() + ' hits ' + pronoun + target.name + ' but it has no effect!',
libtcod.grey)
def shoot(self):
# first check if the character is equipped with a ranged weapon
has_gun = False
for i in get_all_equipped(self.owner):
if i.is_equipped and i.ranged:
has_gun = True
gun = i
if not has_gun:
message("You're not carrying a gun!", libtcod.red)
return
# check if the gun has ammo
if gun.ammo is None or gun.ammo < 1:
message("You're out of ammo in that gun!", libtcod.red)
return
# target a monster
message('Left-click on a target monster, or right-click to cancel.')
target = target_monster()
if not target:
return
# calculate to-hit
to_hit_target = self.to_hit + target.fighter.armor_class + 5
if to_hit_target > 20:
to_hit_target = 20
elif to_hit_target == 1:
to_hit_target = 2
# deduct ammo
gun.ammo -= 1
# roll to hit
if hhtable.rolldice(1, 20) >= to_hit_target:
message(self.owner.name.title() + ' misses the ' + target.name + '.')
return
# now roll for damage (curr. using OD&D style)
damage = (hhtable.rolldice(*self.damage_roll) + gun.damage_bonus) - target.fighter.damage_resistance
if damage > 0:
# make the target take some damage
message(self.owner.name.title() + ' hits the ' + target.name + ' for ' + str(damage) + ' hit points.',
libtcod.yellow)
target.fighter.take_damage(damage, self.owner.name)
else:
message(self.owner.name.title() + ' hits the ' + target.name + ' but it has no effect!',
libtcod.grey)
class BasicMonster:
# AI for a basic monster
def __init__(self):
pass
def take_turn(self):
# a basic monster takes its turn. If you can see it, it can see you
monster = self.owner
if libtcod.map_is_in_fov(fov_map, monster.x, monster.y):
monster.seen_player = True
if monster.seen_player:
# move towards the player if far away
if 2 <= monster.distance_to(player) <= 10:
monster.move_towards(player.x, player.y)
# close enough, attack!
elif player.fighter.hp > 0:
monster.fighter.attack(player)
class FriendlyMonster:
def __init__(self, max_range=10):
self.max_range = max_range
def take_turn(self):
# a monster that protects the player and attacks other monsters
monster = self.owner
enemy = closest_monster(self.max_range, (monster, player))
if enemy is not None:
message(self.owner.name + ' is targeting ' + enemy.name)
if 2 <= monster.distance_to(enemy) <= self.max_range:
monster.move_towards(enemy.x, enemy.y)
elif enemy.fighter.hp > 0:
monster.fighter.attack(enemy)
else:
monster.move_towards(player.x, player.y)
class ConfusedMonster:
# AI for a temporarily confused monster (reverts to normal AI after a while)
def __init__(self, old_ai, num_turns=CONFUSE_NUM_TURNS):
self.old_ai = old_ai
self.num_turns = num_turns
def take_turn(self):
if self.num_turns > 0: # still confused
# move in random direction and decrease confuse duration
self.owner.move(libtcod.random_get_int(0, -1, 1), libtcod.random_get_int(0, -1, 1))
self.num_turns -= 1
else: # restore previous AI
self.owner.ai = self.old_ai
message('The ' + self.owner.name + ' is no longer confused!', libtcod.red)
class Heal:
# generic process for healing items
def __init__(self, dice=HEAL_AMOUNT, max_boost=False, heal_all=False):
self.dice = dice
self.max_boost = max_boost
self.heal_all = heal_all
def use(self):
# heal the player
if player.fighter.hp == player.fighter.max_hp:
message('You are already at full health.', libtcod.red)
return 'cancelled'
if self.heal_all:
heal_roll = player.fighter.max_hp
else:
heal_roll = hhtable.rolldice(*self.dice)
message('Your pain subsides, for now. You restore ' + str(heal_roll) + ' hit points.', libtcod.light_violet)
player.fighter.heal(heal_roll)
class Buff:
# generic process for items which permanently improve stats
def __init__(self, max_hp=0, to_hit=0, damage=0, ac=0, xp=0, dr=0, desc=None):
self.max_hp = max_hp
self.to_hit = to_hit
self.damage = damage
self.ac = ac
self.xp = xp
self.dr = dr
self.desc = desc
def use(self):
# apply all bonuses from the item
player.fighter.base_max_hp += self.max_hp
player.fighter.base_to_hit += self.to_hit
player.fighter.base_damage += self.damage
player.fighter.base_armor_class += self.ac
player.fighter.xp += self.xp
player.fighter.damage_resistance += self.dr
if self.desc is None:
message('A rush flows through you, and you feel improved!')
else:
message(self.desc)
class RandomDamage:
# generic process for items that damage a random target
def __init__(self, damage=LIGHTNING_DAMAGE, attack_range=LIGHTNING_RANGE):
self.damage = damage
self.attack_range = attack_range
def use(self):
# find closest enemy inside max range and damage it
monster = closest_monster(self.attack_range, [player])
if monster is None: # no enemy found within range
message('No enemy is within arc range.')
return 'cancelled'
# zap it!
damage = hhtable.rolldice(*self.damage)
message('A bolt of electricity arcs into the ' + monster.name + ' with a loud ZZZAP! The damage is ' + str(
damage) + ' hit points.', libtcod.light_blue)
monster.fighter.take_damage(damage, 'electrical discharge')
class Grenade:
# generic grenade throw function
def __init__(self, damage=FIREBALL_DAMAGE, radius=FIREBALL_RADIUS, radius_damage=FIREBALL_DAMAGE, kills=False,
kills_radius=False):
self.damage = damage
self.radius = radius
self.radius_damage = radius_damage
self.kills = kills
self.kills_radius = kills_radius
def use(self):
# ask the player for a target tile to throw a 'fireball' at (ie. grenade, AOE, etc)
message('Left-click a target tile, or right-click to cancel.', libtcod.light_cyan)
(x, y) = target_tile()
if x is None:
return 'cancelled'
message('The device explodes, striking everything within ' + str(self.radius) + ' tiles!', libtcod.orange)
for obj in objects: # damage every fighter in range, including the player
if obj.distance(x, y) == 0 and obj.fighter:
if not self.kills:
damage_rolled = hhtable.rolldice(*self.damage)
else:
damage_rolled = obj.fighter.hp
message(obj.name.capitalize() + ' is at ground zero! Takes ' + str(damage_rolled) + ' hit points.',
libtcod.orange)
obj.fighter.take_damage(damage_rolled, 'own grenade')
elif obj.distance(x, y) <= self.radius and obj.fighter:
if not self.kills_radius:
damage_rolled = hhtable.rolldice(*self.radius_damage)
else:
damage_rolled = obj.fighter.hp
message(obj.name.capitalize() + ' takes blast damage for ' + str(damage_rolled) + ' hit points.',
libtcod.orange)
obj.fighter.take_damage(damage_rolled, 'own grenade')
class Confuse:
# generic class for confusion items
def __init__(self, duration=CONFUSE_NUM_TURNS, attackrange=CONFUSE_RANGE):
self.duration = duration
self.attackrange = attackrange
def use(self):
# ask for target and confuse it
message('Left-click an enemy to confuse it, or right-click to cancel.', libtcod.light_cyan)
monster = target_monster(self.attackrange)
if monster is None:
return 'cancelled'
old_ai = monster.ai
monster.ai = ConfusedMonster(old_ai, num_turns=self.duration)
monster.ai.owner = monster # tell the new component who owns it
message('The eyes of the ' + monster.name + ' look vacant, as he starts to stumble around!', libtcod.light_green)
class Detector:
# generic class for a device that detects monster presences
def __init__(self, detect_range=None):
self.detect_range = detect_range
def use(self):
# flag all monsters within range as always_visible (or all monsters on map if detect_range=None)
message('The machine goes "Ping!"')
for obj in objects:
if obj.fighter and self.detect_range is None:
obj.always_visible = True
elif obj.fighter and obj.distance(player.x, player.y) <= self.detect_range:
obj.always_visible = True
class Summon:
# summon a friendly monster
def __init__(self, name, hitdice, color):
self.name = name
self.hitdice = hitdice
self.color = color
def use(self):
x = player.x
y = player.y
summon = get_monster_from_hitdice(x, y, self.name, self.hitdice, self.color, friendly=True)
objects.append(summon)
class Terminal:
def __init__(self, type=None):
self.type = type
if self.type is None:
self.type = random.choice(['log','hint'])
def use(self):
# get a random creepy message
if self.type == 'log':
hhmessage.creep_log()
if self.type == 'hint':
hhmessage.hint_message()
class RestPod:
def __init__(self, heal_amount=(1, 6), heal_bonus=0):
self.heal_bonus = heal_bonus
self.heal_amount = heal_amount
def use(self):
# heal the player
if player.fighter.hp == player.fighter.max_hp:
message('You are already at full health.', libtcod.red)
return 'cancelled'
heal_roll = hhtable.rolldice(*self.heal_amount) + self.heal_bonus
message('You relax inside the metal cocoon. You restore ' + str(heal_roll) + ' hit points.',
libtcod.light_violet)
player.fighter.heal(heal_roll)
class Teleporter:
def __init__(self, new_level=None):
self.new_level = new_level
if self.new_level is None:
self.new_level = libtcod.random_get_int(0, 1, 12)
def use(self):
global dungeon_level
message('You feel a sudden jolt and find yourself staring at a completely different room.', libtcod.red)
dungeon_level = self.new_level
make_map()
initialize_fov()
def main_menu(firstrun=False):
# The main game menu.
img = hhmessage.generate_starpic()
while not libtcod.console_is_window_closed():
# show the background image, at twice the regular resolution
libtcod.image_blit_2x(img, 0, 0, 0)
# show the game title and credits!
libtcod.console_set_default_foreground(0, libtcod.light_yellow)
libtcod.console_print_ex(0, SCREEN_WIDTH / 2, SCREEN_HEIGHT / 2 - 4, libtcod.BKGND_NONE, libtcod.CENTER,
'HULKS AND HORRORS\nThe Roguelike')
libtcod.console_print_ex(0, SCREEN_WIDTH / 2, SCREEN_HEIGHT - 2, libtcod.BKGND_NONE, libtcod.CENTER,
'(c) 2014 by John \'jarcane\' Berry')
# Change menu options to match state of 'savegame'
if os.path.isfile('savegame'):
newopt = 'Overwrite current save'
else:
newopt = 'Play a new game'
# show options and wait for the player's choice
choice = menu('', [newopt, 'Continue last save', 'Display high scores', 'Quit'], 26)
if choice == 0:
new_game(firstrun)
firstrun = False
play_game()
if choice == 1:
try:
load_game()
except:
msgbox('\n No saved game to load.\n', 24)
continue
play_game()
elif choice == 2:
try:
show_scores()
except:
msgbox('\n No high scores yet!\n', 24)
continue
elif choice == 3:
break
def new_game(firstrun=False):
global player, inventory, game_msgs, game_state, dungeon_level
# play intro sequence if starting up
if firstrun:
hhmessage.intro_sequence()
# create Player object
# Assume Soldier class with 10 STR, 10 DEX, 10 CON
fighter_component = Fighter(hp=hhtable.rolldice(3, 6) + hhtable.rolldice(1, 10),
armor_class=10, to_hit=1, damage=1,
damage_roll=[1, 3],
xp=0, death_function=player_death)
player = Object(0, 0, chr(1), get_text_entry('What is your name, Ensign?', hhmessage.generate_screen()),
libtcod.white, blocks=True, fighter=fighter_component)
player.level = 1
# generate map
dungeon_level = 1
make_map()
initialize_fov()
game_state = 'playing'
inventory = []
# create the list of game messages and their colors, starts empty
game_msgs = []
# a warm welcoming message!
message('You awaken in a damp cave beneath the surface of Gamma Crionis IV. The ground rumbles beneath you.',
libtcod.red)
# initial equipment: a knife
equipment_component = Equipment(slot='right hand', damage_roll=[1, 4])
obj = Object(0, 0, '-', 'combat knife', libtcod.sky, equipment=equipment_component)
inventory.append(obj)
equipment_component.equip()
obj.always_visible = True
def initialize_fov():
global fov_recompute, fov_map
fov_recompute = True
libtcod.console_clear(con) # unexplored areas start black
# create the FOV map according to the generated map
fov_map = libtcod.map_new(MAP_WIDTH, MAP_HEIGHT)
for y in range(MAP_HEIGHT):
for x in range(MAP_WIDTH):
libtcod.map_set_properties(fov_map, x, y, not map[x][y].block_sight, not map[x][y].blocked)
def play_game():
player_action = None
mouse = libtcod.Mouse()
key = libtcod.Key()
while not libtcod.console_is_window_closed():
# render the screen
libtcod.sys_check_for_event(libtcod.EVENT_KEY_PRESS | libtcod.EVENT_MOUSE, key, mouse)
render_all()
libtcod.console_flush()
check_level_up()
# erase all objects at old locations before they move
for object in objects:
object.clear()
# handle keys and exit game if needed
player_action = handle_keys(key, mouse)
if game_state == 'dead':
try:
os.remove('savegame')
except:
break
elif player_action == 'exit':
save_game()
break
# let monsters take their turn
if game_state == 'playing' and player_action != 'didnt-take-turn':
for object in objects:
if object.ai:
object.ai.take_turn()
def save_game():
# open a new empty shelve (possibly rewriting old one) to write the game data
file = shelve.open('savegame', 'n')
file['map'] = map
file['objects'] = objects
file['player_index'] = objects.index(player)
file['inventory'] = inventory
file['game_msgs'] = game_msgs
file['game_state'] = game_state
file['stairs_index'] = objects.index(stairs)
file['dungeon_level'] = dungeon_level
file.close()
def load_game():
# open the previous saved shelve and load the game data
global map, objects, player, inventory, game_msgs, game_state, stairs, dungeon_level
file = shelve.open('savegame', 'r')
map = file['map']
objects = file['objects']
player = objects[file['player_index']] # get index of player in objects list and access it
inventory = file['inventory']
game_msgs = file['game_msgs']
game_state = file['game_state']
stairs = objects[file['stairs_index']]
dungeon_level = file['dungeon_level']
file.close()
initialize_fov()
def new_score(player):
# generate a new score from player and dungeon_level, save it to file, then ask to display it.
score = player.fighter.kills * player.level * dungeon_level
score_data = [score, player.name.title(), player.killed_by, str(dungeon_level)]
scores = shelve.open('scorefile', 'c', writeback=True)
if 'scores' in scores:
list = scores['scores']
list.append(score_data)
scores['scores'] = list
else:
new_list = [score_data]
scores['scores'] = new_list
scores.close()
choice = menu('Game Over\n', ['See your score', 'Return to main menu'], 22)
if choice == 0:
show_scores()
def show_scores():
# load the score file, sort the list by score, then display
score_file = shelve.open('scorefile', 'r')
scores = score_file['scores']
scores.sort(key=operator.itemgetter(0), reverse=True)
score_list = ['High Scores']
c = 0
for i in scores:
n_score = '{0: >3}'.format(str(c + 1)) + '. ' + '{0: >5}'.format(str(scores[c][0])) + ' ' + scores[c][1]
n_score += ', killed by ' + scores[c][2] + ' on level ' + scores[c][3]
score_list.append(n_score)
c += 1
if c > 10:
break
score_file.close()
hhmessage.show_text_log(score_list, hhmessage.generate_starpic(), delay=False, center_first_line=True)
def end_game():
ending = [
'*INITIATE COMM SEQUENCE EMERGENCY ALPHA-0x1*',
'Calling Guild Post Alpha Ceti.',
'Come in Guild Post Alpha Ceti.',
'This is the last survivor of the Ark-1.',
'Requesting immediate evacuation.',
'Please respond.',
'Can anyone hear me?',
'... Is there anybody out there?',
'...',
'*silence*'
]
hhmessage.show_text_log(ending, hhmessage.generate_starpic())
os.remove('savegame')
main_menu()
def handle_keys(key, mouse):
if key.vk == libtcod.KEY_ENTER and key.lalt:
# Alt+Enter: toggle fullscreen
libtcod.console_set_fullscreen(not libtcod.console_is_fullscreen())
elif key.vk == libtcod.KEY_ESCAPE:
return 'exit' # exit game
# movement keys
if game_state == 'playing':
if key.vk == libtcod.KEY_UP or key.vk == libtcod.KEY_KP8:
player_move_or_attack(0, -1)
elif key.vk == libtcod.KEY_DOWN or key.vk == libtcod.KEY_KP2:
player_move_or_attack(0, 1)
elif key.vk == libtcod.KEY_LEFT or key.vk == libtcod.KEY_KP4:
player_move_or_attack(-1, 0)
elif key.vk == libtcod.KEY_RIGHT or key.vk == libtcod.KEY_KP6:
player_move_or_attack(1, 0)
elif key.vk == libtcod.KEY_HOME or key.vk == libtcod.KEY_KP7:
player_move_or_attack(-1, -1)
elif key.vk == libtcod.KEY_PAGEUP or key.vk == libtcod.KEY_KP9:
player_move_or_attack(1, -1)
elif key.vk == libtcod.KEY_END or key.vk == libtcod.KEY_KP1:
player_move_or_attack(-1, 1)
elif key.vk == libtcod.KEY_PAGEDOWN or key.vk == libtcod.KEY_KP3:
player_move_or_attack(1, 1)
elif key.vk == libtcod.KEY_KP5 or key.vk == libtcod.KEY_SPACE:
pass # do nothing ie wait for the monster to come to you
else:
# test for other keys
key_char = chr(key.c)
if key_char == 'a':
has_gun = False
for i in get_all_equipped(player):
if i.is_equipped and i.ranged:
has_gun = True
gun = i
if has_gun:
message(gun.owner.name.capitalize() + ' has ' + str(gun.ammo) + ' shots remaining.')
if key_char == 's':
# shoot at someone
player.fighter.shoot()
# remove the target from the map until the next redraw
for object in objects:
object.clear()
return
if key_char == 'g':
# pick up an item
for object in objects:
if object.x == player.x and object.y == player.y and object.item:
object.item.pick_up()
break
if key_char == 'u':
# use a placeable if present
for object in objects:
if object.x == player.x and object.y == player.y and object.placeable:
object.placeable.use()
break
if key_char == 'i':
# show the inventory
chosen_item = inventory_menu('Press the key next to an item to use it, or any other to cancel.\n')
if chosen_item is not None:
chosen_item.use()
if key_char == 'd':
# show inventory, if an item is selected, drop it
chosen_item = inventory_menu('Press the key next to an item to drop it, or any other to cancel.\n')
if chosen_item is not None:
chosen_item.drop()
if key_char == '<':
# go down stairs, if the player is on them
if stairs.x == player.x and stairs.y == player.y:
next_level()
if key_char == 'c':
# show character information
level_up_xp = LEVEL_UP_BASE + (player.level * LEVEL_UP_FACTOR)
try:
highest = 'H' + str(player.fighter.damage_roll[2])
except:
highest = ''
hhmessage.show_text_log([
'Character Information',
'Name: ' + player.name,
'Level: ' + str(player.level),
'Experience: ' + str(player.fighter.xp),
'Experience to level up: ' + str(level_up_xp),
'Maximum HP: ' + str(player.fighter.max_hp),
'AC: ' + str(player.fighter.armor_class),
'DR: ' + str(player.fighter.damage_resistance),
'To-hit: +' + str(player.fighter.to_hit),
'Damage Bonus: +' + str(player.fighter.damage),
'Damage Roll: ' + str(player.fighter.damage_roll[0]) + 'd' + str(
player.fighter.damage_roll[1]) + highest,
], hhmessage.generate_screen(), delay=False)
if key_char == 'h' or key_char == '?':
hhmessage.help_screen()
return 'didnt-take-turn'
def target_tile(max_range=None):
# return the position of a tile left-clicked in player FOV (optionally in a range)
# or return (None,None) if right clicked
key = libtcod.Key()
mouse = libtcod.Mouse()
while True:
# render the screen. this raises the inventory and shows the names of objects under the mouse
libtcod.console_flush()
libtcod.sys_check_for_event(libtcod.EVENT_KEY_PRESS | libtcod.EVENT_MOUSE, key, mouse)
render_all()
(x, y) = (mouse.cx, mouse.cy)
if (mouse.lbutton_pressed and libtcod.map_is_in_fov(fov_map, x, y) and (
max_range is None or player.distance(x, y) <= max_range)):
return x, y
if mouse.rbutton_pressed or key.vk == libtcod.KEY_ESCAPE:
return None, None # cancel on ESC or right clicked
def target_monster(max_range=None):
# returns a clicked monster inside FOV up to a range, or None if right-clicked
while True:
(x, y) = target_tile(max_range)
if x is None: # player cancelled
return None
# return first clicked monster, otherwise keep looping
for obj in objects:
if obj.x == x and obj.y == y and obj.fighter and obj != player:
return obj
def get_names_under_mouse():
key = libtcod.Key()
mouse = libtcod.Mouse()
libtcod.sys_check_for_event(libtcod.EVENT_KEY_PRESS | libtcod.EVENT_MOUSE, key, mouse)
# return a string with the names of all objects under the mouse
(x, y) = (mouse.cx, mouse.cy)
# create a list with the names of all objects at the mouse's coordinates within FOV
names = [obj.name for obj in objects
if obj.x == x and obj.y == y and libtcod.map_is_in_fov(fov_map, obj.x, obj.y)]
if names:
names = ', '.join(names) # join the names, seperated by commas
names = 'Under mouse: ' + names
else:
names = ''
return names.title()
def get_names_under_player():
names = [obj.name for obj in objects
if obj.x == player.x and obj.y == player.y and obj.name != player.name]
if names:
names = ', '.join(names) # join the names, seperated by commas
names = 'Under player: ' + names
else:
names = ''
return names.title()
def get_text_entry(header, img):
timer = 0
command = ""
cursor = 0
x = SCREEN_HEIGHT / 3
y = (SCREEN_HEIGHT / 4) + 2
libtcod.image_blit_2x(img, 0, 0, 0)
libtcod.console_set_default_foreground(0, libtcod.green)
libtcod.console_print_ex(0, SCREEN_WIDTH / 4, SCREEN_HEIGHT / 4, libtcod.BKGND_NONE, libtcod.LEFT, header)
while not libtcod.console_is_window_closed():
key = libtcod.console_check_for_keypress(libtcod.KEY_PRESSED)
timer += 1
if timer % (LIMIT_FPS // 4) == 0:
if timer % (LIMIT_FPS // 2) == 0:
timer = 0
libtcod.console_set_char(0, cursor + x, y, "_")
libtcod.console_set_char_foreground(0, cursor + x, y, libtcod.white)
else:
libtcod.console_set_char(0, cursor + x, y, " ")
libtcod.console_set_char_foreground(0, cursor + x, y, libtcod.white)
if key.vk == libtcod.KEY_BACKSPACE and cursor > 0:
libtcod.console_set_char(0, cursor + x, y, " ")
libtcod.console_set_char_foreground(0, cursor + x, y, libtcod.white)
command = command[:-1]
cursor -= 1
elif key.vk == libtcod.KEY_ENTER:
break
elif key.vk == libtcod.KEY_ESCAPE:
command = ""
break
elif key.c > 0:
letter = chr(key.c)
libtcod.console_set_char(0, cursor + x, y, letter) # print new character at appropriate position on screen
libtcod.console_set_char_foreground(0, cursor + x, y, libtcod.white) # make it white or something
command += letter # add to the string
cursor += 1
libtcod.console_flush()
return command
def player_move_or_attack(dx, dy):
global fov_recompute
global objects
# the coordinates the player is moving to/attacking
x = player.x + dx
y = player.y + dy
# try to find an attackable object there
target = None
for object in objects:
friendly = isinstance(object.ai, FriendlyMonster)
if object.fighter and not friendly and object.x == x and object.y == y:
target = object
break
# attack if target found, move otherwise
if target is not None:
player.fighter.attack(target)
else:
player.move(dx, dy)
fov_recompute = True
def create_room(room):
global map
# go through the tiles in the rectangle and make them passable
for x in range(room.x1 + 1, room.x2):
for y in range(room.y1 + 1, room.y2):
map[x][y].blocked = False
map[x][y].block_sight = False
def random_choice(chances_dict):
# choose one option from dictionary of chances, returning its key
chances = chances_dict.values()
strings = chances_dict.keys()
return strings[random_choice_index(chances)]
def random_choice_index(chances): # choose one option from a list of chances and return its index
dice = libtcod.random_get_int(0, 1, sum(chances))
# go through all chances, keep sum so far
running_sum = 0
choice = 0
for w in chances:
running_sum += w
# see if the dice landed in the part that corresponds to this choice
if dice <= running_sum:
return choice
choice += 1
def from_dungeon_level(table):
# returns a value that depends on level. the table specifies what value occurs after each level, default is 0
for (value, level) in reversed(table):
if dungeon_level >= level:
return value
return 0
def from_player_level(table):
# returns a value dependent on level. Table specifies what value occurs after each level, default is 0
for (value, level) in reversed(table):
if player.level >= level:
return value
return 0
def get_monster_from_hitdice(x, y, name, hitdice, color, friendly=False):
# generate monster object from number of hit dice
# get tuple components
num = hitdice[0]
sides = hitdice[1]
# determine to-hit from num and sides
if sides == 12:
to_hit = num
elif 11 >= sides >= 8:
to_hit = num / 2
else:
to_hit = num / 3
# if sides >= 10, make letter a capital
if sides >= 10:
letter = name[0].capitalize()
else:
letter = name[0]
# get number of damage dice from hitdice, making sure it's at least 1.
if num / 2 < 1:
roll = (1, sides)
else:
roll = (num / 2, sides)
fighter_component = Fighter(hp=hhtable.rolldice(*hitdice), armor_class=10 - num, to_hit=to_hit,
damage=0, damage_roll=roll, xp=num * sides * 5, death_function=monster_death)
if friendly:
ai_component = FriendlyMonster()
else:
ai_component = BasicMonster()
monster = Object(x, y, letter, name, color, blocks=(not friendly), fighter=fighter_component, ai=ai_component)
return monster
def get_item(x, y):
choice = random.choice(['heal', 'grenade', 'misc'])
if choice == 'heal':
# create a healing item
heal_item = hhtable.make_heal_item()
heal_component = Heal(dice=heal_item['roll'], heal_all=heal_item['heal_all'])
item_component = Item(reusable=heal_item['reuse'], uses=heal_item['uses'], use_function=heal_component)
item = Object(x, y, '!', heal_item['name'], libtcod.violet, item=item_component)
elif choice == 'grenade':
# create a grenade
grenade = hhtable.make_grenade()
grenade_component = Grenade(damage=grenade['damage'], radius=grenade['radius'],
radius_damage=grenade['radius_damage'], kills=grenade['kills'],
kills_radius=grenade['kills_radius'])
item_component = Item(use_function=grenade_component)
item = Object(x, y, '*', grenade['name'], libtcod.light_yellow, item=item_component)
elif choice == 'misc':
subchoice = random.choice(['confuse', 'buff', 'random_damage', 'detector', 'summon', 'vector'])
if subchoice == 'random_damage':
# create an arc lightning device
random_damage_component = RandomDamage()
item_component = Item(use_function=random_damage_component)
item = Object(x, y, '#', 'Tesla arc device', libtcod.light_yellow, item=item_component)
elif subchoice == 'confuse':
# create a confuse item
confuse_component = Confuse()
item_component = Item(use_function=confuse_component)
item = Object(x, y, '#', 'neural scrambler', libtcod.light_yellow, item=item_component)
elif subchoice == 'buff':
# create a buff item
buff = hhtable.make_buff()
buff_component = Buff(*buff['args'])
item_component = Item(use_function=buff_component)
item = Object(x, y, chr(167), buff['name'], libtcod.dark_magenta, item=item_component)
elif subchoice == 'detector':
# create a motion tracker
detector_component = Detector(detect_range=10)
item_component = Item(reusable=True, uses=hhtable.rolldice(1, 3), use_function=detector_component)
item = Object(x, y, '#', 'motion tracker', libtcod.light_yellow, item=item_component)
elif subchoice == 'summon':
# create a friendly summonable monster
summon_component = Summon(name='TED-3', hitdice=(4, 6), color=libtcod.sepia)
item_component = Item(use_function=summon_component)
item = Object(x, y, chr(12), 'TED-3', libtcod.sepia, item=item_component)
elif subchoice == 'vector':
# create the vector-jet harness
harness = Equipment('back',armor_bonus=-1)
item = Object(x, y, '%', 'vector-jet harness', libtcod.black, equipment=harness)
return item
def get_weapon(x, y):
weapon = hhtable.make_weapon()
equipment_component = Equipment(slot='right hand', damage_roll=weapon['damage'], to_hit_bonus=weapon['bonus'],
damage_bonus=weapon['bonus'], ranged=weapon['gun'], ammo=weapon['ammo'])
item = Object(x, y, weapon['char'], weapon['name'], libtcod.brass, equipment=equipment_component)
return item
def get_armor(x, y):
armor = hhtable.make_armor()
if armor['char'] == '[':
armor_slot = 'shield'
else:
armor_slot = 'armor'
equipment_component = Equipment(slot=armor_slot, armor_bonus=armor['ac'], damage_bonus=armor['str_bonus'],
to_hit_bonus=armor['dex_bonus'])
item = Object(x, y, armor['char'], armor['name'], libtcod.dark_gray, equipment=equipment_component)
return item
def get_placeable(x, y):
type = random.choice(['terminal', 'restpod', 'teleporter'])
if type == 'terminal':
terminal = Terminal()
placeable = Placeable(use_class=terminal)
obj = Object(x, y, chr(127), 'terminal', libtcod.silver, placeable=placeable)
elif type == 'restpod':
restpod = RestPod(heal_bonus=dungeon_level)
placeable = Placeable(use_class=restpod)
obj = Object(x, y, chr(239), 'rest pod', libtcod.purple, placeable=placeable)
elif type == 'teleporter':
teleport = Teleporter()
placeable = Placeable(use_class=teleport)
obj = Object(x, y, chr(23), 'teleporter', libtcod.dark_blue, placeable=placeable)
return obj
def place_objects(room):
# maximum number of monsters per room
max_monsters = from_dungeon_level([[2, 1], [3, 4], [4, 6], [5, 8]])
# monster table
# key = name
# dict entries:
# key[0]: dungeon level appearing
# key[1]: list[name, hitdice tuple, color]
monster_table = hhtable.make_monster_table(dungeon_level)
# max number of items per room
max_items = from_dungeon_level([[1, 1], [2, 4]])
# chance of each item
# functions the same as the monster chances (weighted values, availability by level)
# future revisions should break this down by type instead of individual item, resolving specific items in the
# sub entries below.
item_chances = {'item': 4,
'armor': 3,
'weapon': 3,
'placeable': 2}
# choose random number of monsters
num_monsters = libtcod.random_get_int(0, 0, max_monsters)
for i in range(num_monsters):
# choose random spot for this monster
x = libtcod.random_get_int(0, room.x1 + 1, room.x2 - 1)
y = libtcod.random_get_int(0, room.y1 + 1, room.y2 - 1)
# only place it if the tile is not blocked
if not is_blocked(x, y):
# pick a monster, then check if it's valid for this dungeon level
choice = random.choice(monster_table.keys())
monster = get_monster_from_hitdice(x, y, *monster_table[choice][1])
objects.append(monster)
# choose a random number of items
num_items = libtcod.random_get_int(0, 0, max_items)
for i in range(num_items):
# choose a random spot for the item
x = libtcod.random_get_int(0, room.x1 + 1, room.x2 - 1)
y = libtcod.random_get_int(0, room.y1 + 1, room.y2 - 1)
# only place it if the tile is not blocked
if not is_blocked(x, y):
choice = random_choice(item_chances)
if choice == 'item':
item = get_item(x, y)
elif choice == 'armor':
item = get_armor(x, y)
elif choice == 'weapon':
item = get_weapon(x, y)
elif choice == 'placeable':
item = get_placeable(x, y)
objects.append(item)
item.send_to_back() # items appear below other objects
def make_map():
global map, objects, stairs
# the list of objects with just the player
objects = [player]
# fill map with "unblocked" tiles
map = [[Tile(True)
for y in range(MAP_HEIGHT)]
for x in range(MAP_WIDTH)]
# create two rooms
rooms = []
num_rooms = 0
for r in range(MAX_ROOMS):
# random width and height
w = libtcod.random_get_int(0, ROOM_MIN_SIZE, ROOM_MAX_SIZE)
h = libtcod.random_get_int(0, ROOM_MIN_SIZE, ROOM_MAX_SIZE)
# random position without leaving map
x = libtcod.random_get_int(0, 0, MAP_WIDTH - w - 1)
y = libtcod.random_get_int(0, 0, MAP_HEIGHT - h - 1)
# "Rect" class makes rectangles easier to work with
new_room = Rect(x, y, w, h)
# run through the other rooms and see if they intersect with this one
failed = False
for other_room in rooms:
if new_room.intersect(other_room):
failed = True
break
if not failed:
# this means there are no intersections so the room is valid
# "paint" it to the map's tiles'
create_room(new_room)
place_objects(new_room)
# center coordinates of new_room, will be useful later
(new_x, new_y) = new_room.center()
# print "room number" onto room (optional, not included in sample code)
# remove later if issues arise, but I think it looks cool and H&H-y
# room_no = Object(new_x,new_y,chr(65+num_rooms), 'room number', libtcod.white, blocks=False)
# objects.insert(0,room_no)
if num_rooms == 0:
# this is the first room, where the player starts at
player.x = new_x
player.y = new_y
else:
# all rooms after the first:
# connect it to the previous room with a tunnel
# center coordinates of previous room
(prev_x, prev_y) = rooms[num_rooms - 1].center()
if libtcod.random_get_int(0, 0, 1) == 1:
# first move horizontally then vertically
create_h_tunnel(prev_x, new_x, prev_y)
create_v_tunnel(prev_y, new_y, new_x)
else:
# first move vertically then horizontally
create_v_tunnel(prev_y, new_y, prev_x)
create_h_tunnel(prev_x, new_x, new_y)
# finally, append the new room to the list
rooms.append(new_room)
num_rooms += 1
# create stairs at the center of the last room
stairs = Object(new_x, new_y, '<', 'stairs', libtcod.white, always_visible=True)
objects.append(stairs)
stairs.send_to_back() # so it draws below monsters
def next_level():
global dungeon_level
if dungeon_level >= 13:
message('At last, you find an escape to the surface. You crawl up the narrow passage in search of rescue.',
libtcod.yellow)
end_game()
# advance to the next level
message('You take a moment to rest, and recover your strength.', libtcod.yellow)
player.fighter.heal(player.fighter.max_hp / 2) # heal player by 50%
message('After a rare moment of peace, you descend further into the cave.', libtcod.red)
dungeon_level += 1
make_map()
initialize_fov()
def menu(header, options, width):
if len(options) > 26:
raise ValueError('Cannot have a menu with more than 26 options.')
# calculate total height for the header (after auto wrap) and one line per option
header_height = libtcod.console_get_height_rect(con, 0, 0, width, SCREEN_HEIGHT, header)
if header == '':
header_height = 0
height = len(options) + header_height
# create an off-screen console that represents the menu's window
window = libtcod.console_new(width, height)
# print the header with auto-wrap
libtcod.console_set_default_foreground(window, libtcod.white)
libtcod.console_print_rect_ex(window, 0, 0, width, height, libtcod.BKGND_NONE, libtcod.LEFT, header)
# print all the options
y = header_height
letter_index = ord('a')
for option_text in options:
text = '(' + chr(letter_index) + ')' + option_text
libtcod.console_print_ex(window, 0, y, libtcod.BKGND_NONE, libtcod.LEFT, text)
y += 1
letter_index += 1
# blit the contents of window to root console
x = SCREEN_WIDTH / 2 - width / 2
y = SCREEN_HEIGHT / 2 - height / 2
libtcod.console_blit(window, 0, 0, width, height, 0, x, y, 1.0, 0.7)
# present the root console to the player and wait for keypress
libtcod.console_flush()
input_valid = False
while not input_valid:
key = libtcod.console_wait_for_keypress(True)
if key.pressed:
key = libtcod.console_wait_for_keypress(False)
if not key.pressed:
input_valid = True
if key.vk == libtcod.KEY_ENTER and key.lalt: # special case, have to check for alt+enter for fullscreen
libtcod.console_set_fullscreen(not libtcod.console_is_fullscreen())
# convert the ASCII code to an index; if it corresponds to an option, return it
index = key.c - ord('a')
if 0 <= index < len(options):
return index
return None
def msgbox(text, width=50):
menu(text, [], width) # use menu() as a sort of 'message box'
def inventory_menu(header):
# show a menu of each item in the inventory as an option
if len(inventory) == 0:
options = ['Inventory is empty.']
else:
options = []
for item in inventory:
text = item.name
# show additional information if equipped
if item.equipment and item.equipment.is_equipped:
text = text + ' (on ' + item.equipment.slot + ')'
options.append(text)
index = menu(header, options, INVENTORY_WIDTH)
# if an item was chosen, return it
if index is None or len(inventory) == 0:
return None
return inventory[index].item
def render_all():
global color_light_wall
global color_light_ground
global fov_recompute
if fov_recompute:
# recompute FOV if needed
fov_recompute = False
libtcod.map_compute_fov(fov_map, player.x, player.y, TORCH_RADIUS, FOV_LIGHT_WALLS, FOV_ALGO)
# go through all tiles, and set their background color
for y in range(MAP_HEIGHT):
for x in range(MAP_WIDTH):
visible = libtcod.map_is_in_fov(fov_map, x, y)
wall = map[x][y].block_sight
if not visible:
# if it's not visible right now, the player can only see it if it's explored
if map[x][y].explored:
# it's out of the player FOV
if wall:
libtcod.console_set_char_background(con, x, y, color_dark_wall, libtcod.BKGND_SET)
else:
libtcod.console_set_char_background(con, x, y, color_dark_ground, libtcod.BKGND_SET)
else:
# it's visible
if wall:
libtcod.console_set_char_background(con, x, y, color_light_wall, libtcod.BKGND_SET)
else:
libtcod.console_set_char_background(con, x, y, color_light_ground, libtcod.BKGND_SET)
map[x][y].explored = True
# draw all objects in the list
for object in objects:
if object != player:
object.draw()
player.draw()
# blit con to root console
libtcod.console_blit(con, 0, 0, MAP_WIDTH, MAP_HEIGHT, 0, 0, 0)
# prepare to render the GUI panel
libtcod.console_set_default_background(panel, libtcod.black)
libtcod.console_clear(panel)
# print the game messages, one line at a time
y = 1
for (line, color) in game_msgs:
libtcod.console_set_default_foreground(panel, color)
libtcod.console_print_ex(panel, MSG_X, y, libtcod.BKGND_NONE, libtcod.LEFT, line)
y += 1
# show the player's stats
level_up_xp = LEVEL_UP_BASE + (player.level * LEVEL_UP_FACTOR)
render_bar(1, 1, BAR_WIDTH, 'HP', player.fighter.hp, player.fighter.max_hp, libtcod.light_red, libtcod.darker_red)
render_bar(1, 2, BAR_WIDTH, 'XP', player.fighter.xp, level_up_xp, libtcod.dark_green, libtcod.grey)
libtcod.console_print_ex(panel, 1, 4, libtcod.BKGND_NONE, libtcod.LEFT, 'Exp. level ' + str(player.level))
libtcod.console_print_ex(panel, 1, 5, libtcod.BKGND_NONE, libtcod.LEFT, 'Cave level ' + str(dungeon_level))
libtcod.console_print_ex(panel, 1, 6, libtcod.BKGND_NONE, libtcod.LEFT, 'Kills: ' + str(player.fighter.kills))
# display names of objects under mouse
libtcod.console_set_default_foreground(panel, libtcod.light_gray)
libtcod.console_print_ex(panel, 1, 0, libtcod.BKGND_NONE, libtcod.LEFT, get_names_under_mouse())
# display names of objects under player on right side of panel
libtcod.console_print_ex(panel, SCREEN_WIDTH - 2, 0, libtcod.BKGND_NONE, libtcod.RIGHT, get_names_under_player())
# blit the contents of "panel" to root console
libtcod.console_blit(panel, 0, 0, SCREEN_WIDTH, PANEL_HEIGHT, 0, 0, PANEL_Y)
def render_bar(x, y, total_width, name, value, maximum, bar_color, back_color):
# render a bar (HP, XP, etc). first calculate width of bar
bar_width = int(float(value) / maximum * total_width)
# render background first
libtcod.console_set_default_background(panel, back_color)
libtcod.console_rect(panel, x, y, total_width, 1, False, libtcod.BKGND_SCREEN)
# now render the bar on top
libtcod.console_set_default_background(panel, bar_color)
if bar_width > 0:
libtcod.console_rect(panel, x, y, bar_width, 1, False, libtcod.BKGND_SCREEN)
# finally, some centered text with the values
libtcod.console_set_default_foreground(panel, libtcod.white)
libtcod.console_print_ex(panel, x + total_width / 2, y, libtcod.BKGND_NONE, libtcod.CENTER,
name + ': ' + str(value) + '/' + str(maximum))
def message(new_msg, color=libtcod.white):
# split the message if necessary, among multiple lines
new_msg_lines = textwrap.wrap(new_msg, MSG_WIDTH)
for line in new_msg_lines:
# if the bugger is full, remove the first line to make room for the new one.
if len(game_msgs) == MSG_HEIGHT:
del game_msgs[0]
# add the new line as a tuple, with the text and color
game_msgs.append((line, color))
def create_h_tunnel(x1, x2, y):
global map
for x in range(min(x1, x2), max(x1, x2) + 1):
map[x][y].blocked = False
map[x][y].block_sight = False
def create_v_tunnel(y1, y2, x):
global map
# vertical tunnel
for y in range(min(y1, y2), max(y1, y2) + 1):
map[x][y].blocked = False
map[x][y].block_sight = False
def is_blocked(x, y):
global map
global objects
# first test the map tile
if map[x][y].blocked:
return True
# now check for blocking objects
for object in objects:
if object.blocks and object.x == x and object.y == y:
return True
return False
def closest_monster(max_range, exclusions):
# find closest enemy, up to a max range and in player FOV
# exclusions argument MUST BE A LIST
closest_enemy = None
closest_dist = max_range + 1 # start with slightly more than max range
for obj in objects:
if obj.fighter and obj not in exclusions and libtcod.map_is_in_fov(fov_map, obj.x, obj.y):
# calculate distance between this object and the player
dist = player.distance_to(obj)
if dist < closest_dist: # it's closer so remember it
closest_enemy = obj
closest_dist = dist
return closest_enemy
def player_death(player):
# the game ended!
global game_state
message('You died!', libtcod.red)
render_all()
game_state = 'dead'
# for added effect, transform player into a corpse!
player.char = '%'
player.color = libtcod.white
new_score(player)
def monster_death(monster):
# transform it into a nasty corpse! it doesn't block, can't be
# attacked, and doesn't move
message(monster.name.title() + ' is dead! You gain ' + str(monster.fighter.xp) + ' experience points.',
libtcod.orange)
monster.char = '%'
monster.color = libtcod.dark_red
monster.blocks = False
monster.fighter = None
monster.ai = None
monster.name = 'remains of ' + monster.name
monster.send_to_back()
def get_equipped_in_slot(slot):
for obj in inventory:
if obj.equipment and obj.equipment.slot == slot and obj.equipment.is_equipped:
return obj.equipment
return None
def get_all_equipped(obj):
if obj == player:
equipped_list = []
for item in inventory:
if item.equipment and item.equipment.is_equipped:
equipped_list.append(item.equipment)
return equipped_list
else:
return [] # other objects have no equipment
def check_level_up():
# see if the player's experience is enough to level-up
level_up_xp = LEVEL_UP_BASE + (player.level * LEVEL_UP_FACTOR)
if player.fighter.xp >= level_up_xp:
# it is! *ding* level up
player.level += 1
player.fighter.xp -= level_up_xp
message('Your battle experience has hardened you further. You reached level ' + str(player.level) + '!',
libtcod.yellow)
render_all() # re-render console so that message plays before menu
# check player level, roll 1d10 for new HP if 6 or less, or just +3 (see H&H rulebook)
if player.level <= 6:
hit_die = hhtable.rolldice(1, 10)
else:
hit_die = 3
player.fighter.max_hp += hit_die
player.fighter.hp += hit_die
# after level six, to_hit and damage only improve on even levels.
if player.level <= 6 or player.level % 2 == 0:
player.fighter.base_to_hit += 1
player.fighter.base_damage += 1
# ############################################
# Initialization & Main Loop
# ############################################
libtcod.console_set_custom_font('terminal16x16_gs_ro.png',
libtcod.FONT_TYPE_GREYSCALE | libtcod.FONT_LAYOUT_ASCII_INROW)
libtcod.console_init_root(SCREEN_WIDTH, SCREEN_HEIGHT, 'Hulks and Horrors', False)
libtcod.sys_set_fps(LIMIT_FPS)
panel = libtcod.console_new(SCREEN_WIDTH, PANEL_HEIGHT)
con = libtcod.console_new(MAP_WIDTH, MAP_HEIGHT)
main_menu(firstrun=True)
|
jarcane/handhRL
|
handhrl.py
|
Python
|
gpl-3.0
| 66,404
|
# Copyright 2019-2020 by Christopher C. Little.
# This file is part of Abydos.
#
# Abydos is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Abydos is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Abydos. If not, see <http://www.gnu.org/licenses/>.
"""abydos.tests.fingerprint.test_fingerprint_extract_position_frequence.
This module contains unit tests for abydos.fingerprint.ExtractPositionFrequency
"""
import unittest
from abydos.fingerprint import ExtractPositionFrequency
class ExtractPositionFrequencyTestCases(unittest.TestCase):
"""Test ExtractPositionFrequency functions.
abydos.fingerprint.ExtractPositionFrequency
"""
fp = ExtractPositionFrequency()
def test_extract_position_frequence_fingerprint(self):
"""Test abydos.fingerprint.ExtractPositionFrequency."""
# Base case
self.assertEqual(self.fp.fingerprint(''), '')
# Test cases from paper
self.assertEqual(self.fp.fingerprint('Wilkinson'), 'WKON')
if __name__ == '__main__':
unittest.main()
|
chrislit/abydos
|
tests/fingerprint/test_fingerprint_extract_position_frequency.py
|
Python
|
gpl-3.0
| 1,479
|
# Copyright (c) 2015 SUSE Linux GmbH. All rights reserved.
#
# This file is part of kiwi.
#
# kiwi is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# kiwi is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with kiwi. If not, see <http://www.gnu.org/licenses/>
#
from tempfile import NamedTemporaryFile
from typing import Optional, Any
from collections.abc import Iterable
# project
from kiwi.command import Command
from kiwi.defaults import Defaults
from kiwi.exceptions import KiwiShellVariableValueError
class Shell:
"""
**Special character handling for shell evaluated code**
"""
@staticmethod
def quote(message):
"""
Quote characters which have a special meaning for bash
but should be used as normal characters. actually I had
planned to use pipes.quote but it does not quote as I
had expected it. e.g 'name_wit_a_$' does not quote the $
so we do it on our own for the scope of kiwi
:param str message: message text
:return: quoted text
:rtype: str
"""
# \\ quoting must be first in the list
quote_characters = ['\\', '$', '"', '`', '!']
for quote in quote_characters:
message = message.replace(quote, '\\' + quote)
return message
@staticmethod
def quote_key_value_file(filename):
"""
Quote given input file which has to be of the form
key=value to be able to become sourced by the shell
:param str filename: file path name
:return: quoted text
:rtype: str
"""
temp_copy = NamedTemporaryFile()
Command.run(['cp', filename, temp_copy.name])
Shell.run_common_function('baseQuoteFile', [temp_copy.name])
with open(temp_copy.name) as quoted:
return quoted.read().splitlines()
@staticmethod
def run_common_function(name, parameters):
"""
Run a function implemented in config/functions.sh
:param str name: function name
:param list parameters: function arguments
"""
Command.run(
[
'bash', '-c',
'source ' + ''.join(
[
Defaults.get_common_functions_file(),
'; ', name, ' ', ' '.join(parameters)
]
)
]
)
@staticmethod
def format_to_variable_value(value: Optional[Any]) -> str:
"""
Format given variable value to return a string value
representation that can be sourced by shell scripts.
If the provided value is not representable as a string
(list, dict, tuple etc) an exception is raised
:param any value: a python variable
:raises KiwiShellVariableValueError: if value is an iterable
:return: string value representation
:rtype: str
"""
if value is None:
return ''
if isinstance(value, bool):
return format(value).lower()
elif isinstance(value, str):
return value
elif isinstance(value, bytes):
return format(value.decode())
elif isinstance(value, Iterable):
# we will have a hard time to turn an iterable (list, dict ...)
# into a useful string
raise KiwiShellVariableValueError(
'Value cannot be {0}'.format(type(value))
)
return format(value)
|
b1-systems/kiwi
|
kiwi/system/shell.py
|
Python
|
gpl-3.0
| 3,930
|
# -*- coding: utf-8 -*-
# Copyright 2012 Vincent Jacques
# vincent@vincent-jacques.net
# This file is part of PyGithub. http://vincent-jacques.net/PyGithub
# PyGithub is free software: you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License
# as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version.
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details.
# You should have received a copy of the GNU Lesser General Public License along with PyGithub. If not, see <http://www.gnu.org/licenses/>.
import github.GithubObject
class IssuePullRequest(github.GithubObject.BasicGithubObject):
@property
def diff_url(self):
return self._NoneIfNotSet(self._diff_url)
@property
def html_url(self):
return self._NoneIfNotSet(self._html_url)
@property
def patch_url(self):
return self._NoneIfNotSet(self._patch_url)
def _initAttributes(self):
self._diff_url = github.GithubObject.NotSet
self._html_url = github.GithubObject.NotSet
self._patch_url = github.GithubObject.NotSet
def _useAttributes(self, attributes):
if "diff_url" in attributes: # pragma no branch
assert attributes["diff_url"] is None or isinstance(attributes["diff_url"], (str, unicode)), attributes["diff_url"]
self._diff_url = attributes["diff_url"]
if "html_url" in attributes: # pragma no branch
assert attributes["html_url"] is None or isinstance(attributes["html_url"], (str, unicode)), attributes["html_url"]
self._html_url = attributes["html_url"]
if "patch_url" in attributes: # pragma no branch
assert attributes["patch_url"] is None or isinstance(attributes["patch_url"], (str, unicode)), attributes["patch_url"]
self._patch_url = attributes["patch_url"]
|
andresriancho/PyGithub
|
github/IssuePullRequest.py
|
Python
|
gpl-3.0
| 2,100
|
import django_tables2 as tables
import os
from django_tables2.utils import A, AttributeDict
from django.utils.html import format_html, escape
from datetime import date
from .models import Transfer,Experiment,Frog,Permit, Notes, Document
class ExperimentTable(tables.Table):
#selection = tables.CheckBoxColumn(accessor="pk", orderable=False)
id = tables.LinkColumn('frogs:experiment_detail', text='View', args=[A('pk')], verbose_name='')
transfer_date = tables.DateColumn(verbose_name='Date Received',
accessor=A('transferid.transfer_date'), format='d-M-Y')
expt_from = tables.DateColumn(verbose_name='Expt from', format='d-M-Y')
expt_to = tables.DateColumn(verbose_name='Expt To', format='d-M-Y')
frogid = tables.Column(verbose_name='Frog ID', accessor=A('transferid.operationid.frogid.frogid'))
species = tables.Column(verbose_name = 'Species',
accessor = A('transferid.operationid.frogid.species'))
received = tables.Column(verbose_name='Received (ml)')
transferred = tables.Column(verbose_name='Transferred (ml)')
used = tables.Column(verbose_name='Used (ml)')
location = tables.Column(verbose_name='Experiment Location', accessor=A('location'))
def render_expt_disposed(self, value):
val = bool(value)
if val:
return format_html('<span style="color:green">✔</span>')
else:
return format_html('<span style="color:red">❗</span>')
class Meta:
model = Experiment
attrs = {"class": "ui-responsive table table-hover"}
fields = ['location','transfer_date','frogid','species','received','transferred','used','expt_from','expt_to','expt_disposed','id']
class DisposalTable(tables.Table):
id = tables.LinkColumn('frogs:experiment_detail', text='View', args=[A('pk')], verbose_name='')
frogid = tables.Column(verbose_name='Frog ID', accessor=A('transferid.operationid.frogid.frogid'))
qen = tables.Column(verbose_name='QEN', accessor=A('transferid.operationid.frogid.qen'))
disposal_date = tables.DateColumn(verbose_name="Disposal Date", format='d-M-Y')
location = tables.Column(verbose_name='Experiment Location', accessor=A('location'))
def render_autoclave_indicator(self, value):
val = bool(value)
if val:
return format_html('<span style="color:green">✔</span>')
else:
return format_html('<span style="color:red">❗</span>')
def render_autoclave_complete(self, value):
val = bool(value)
if val:
return format_html('<span style="color:green">✔</span>')
else:
return format_html('<span style="color:red">❗</span>')
class Meta:
model = Experiment
attrs = {"class": "ui-responsive table table-hover"}
fields = ['location','disposal_date','qen','frogid','waste_type','waste_content','waste_qty','autoclave_indicator','autoclave_complete','disposal_sentby','id']
class TransferTable(tables.Table):
id = tables.LinkColumn('frogs:transfer_detail', text='View', args=[A('pk')], verbose_name='')
frogid = tables.LinkColumn('frogs:frog_detail', accessor=A('operationid.frogid.frogid'), args=[A('operationid.frogid.pk')],verbose_name='Frog ID')
species = tables.Column(verbose_name='Species', accessor=A('operationid.frogid.species'))
qen = tables.Column(verbose_name='QEN', accessor=A('operationid.frogid.qen'))
sop = tables.Column(verbose_name='Transfer Approval', accessor=A('transferapproval.sop'))
transfer_date = tables.DateColumn(verbose_name="Transfer Date", format='d-M-Y')
class Meta:
model = Transfer
attrs = {"class": "ui-responsive table table-hover"}
fields = ['frogid','species','qen','volume','transporter','method','transfer_date','transferapproval', 'sop','id']
class FrogTable(tables.Table):
#selectfrog = tables.CheckBoxColumn(accessor='pk')
frogid = tables.LinkColumn('frogs:frog_detail', args=[A('pk')])
get_disposed = tables.Column(verbose_name="Disposed", accessor=A('get_disposed'), orderable=False)
def render_condition(self, value):
val = bool(value)
if val:
return format_html('<span style="color:red">❗</span>')
else:
return format_html('<span></span>')
def render_get_disposed(self, value):
if value == 1: #dead and disposed
return format_html('<span style="color:green">✔</span>')
elif value == 2: #dead but not disposed
return format_html('<span style="color:red">❗</span>')
else: #alive
return format_html('<span></span>')
class Meta:
model = Frog
attrs = {"class": "ui-responsive table table-hover"}
fields = ['frogid','tankid','gender','species','current_location','condition','remarks','qen','death','get_disposed']
# order_by_field = 'frogid' #cannot sort with this on??
sortable = False
#Generic filtered table
class FilteredSingleTableView(tables.SingleTableView):
filter_class = None
def get_table_data(self):
data = super(FilteredSingleTableView, self).get_table_data()
self.filter = self.filter_class(self.request.GET, queryset=data)
return self.filter.qs
def get_context_data(self, **kwargs):
context = super(FilteredSingleTableView, self).get_context_data(**kwargs)
context['filter'] = self.filter
return context
class PermitTable(tables.Table):
id = tables.LinkColumn('frogs:permit_detail', text='View', args=[A('pk')], verbose_name='' )
arrival_date = tables.DateColumn(format='d-M-Y')
def render_color(self, value):
#print("DEBUG: COlor=", value)
return format_html("<span style='display:block; background-color:%s; font-size:0.8em; padding:8px;'>%s</span>" % (value, value))
class Meta:
model = Permit
attrs = {"class": "ui-responsive table table-hover"}
fields = ['aqis','qen','color','prefix','females','males', 'arrival_date','species','supplier','country','id']
order_by_field = 'arrival_date'
sortable = True
## Used in Frog Log Report
class SummingColumn(tables.Column):
def render_footer(self, bound_column, table):
return sum(bound_column.accessor.resolve(row) for row in table.data)
class PermitReportTable(tables.Table):
aqis = tables.LinkColumn('frogs:permit_detail', accessor=A('aqis'), args=[A('pk')], verbose_name='AQIS Permit #' )
qen = tables.Column(footer="Total Frogs:")
get_totalfrogs = SummingColumn(verbose_name="Shipped/Born")
frogs_deceased = SummingColumn(verbose_name="Deceased")
frogs_disposed = SummingColumn(verbose_name="Disposed")
get_females_remaining = SummingColumn(verbose_name="Live Female")
get_males_remaining = SummingColumn(verbose_name="Live Male")
arrival_date = tables.DateColumn(format='d-M-Y')
class Meta:
model = Permit
attrs = {"class": "ui-responsive table table-hover"}
fields = ['aqis','qen', 'arrival_date','get_totalfrogs','get_females_remaining','get_males_remaining','frogs_deceased', 'frogs_disposed']
order_by_field = 'arrival_date'
sortable = True
class OperationTable(tables.Table):
frogid = tables.LinkColumn('frogs:frog_detail', accessor=A('frogid'), args=[A('pk')],verbose_name='Frog ID')
num_operations = tables.Column(verbose_name="Num Ops", accessor=A('num_operations'), orderable=False)
last_operation = tables.DateColumn(verbose_name="Last Operation", format='d-M-Y', accessor=A('last_operation'), orderable=False)
next_operation = tables.Column(verbose_name="Next Op due", accessor=A('next_operation'), orderable=False)
def render_next_operation(self, value):
if not value:
return format_html('<span style="color:blue">Max ops</span>')
delta = value - date.today()
if delta.days <= 0:
return format_html('<span style="color:green">OK</span>')
elif delta.days < 1: #note this is not active
return format_html('<span style="color:green">%s %s ago</span>' % (abs(delta.days),("day" if abs(delta.days) == 1 else "days")))
elif delta.days == 1:
return format_html('<span style="color:red">Tomorrow</span>')
elif delta.days > 1:
return format_html('<span style="color:red">In %s days</span>' % delta.days)
def render_condition(self,value):
val = bool(value)
if val:
return format_html('<span style="color:red">❗</span>')
else:
return format_html('<span></span>')
class Meta:
model = Frog
attrs = {"class": "ui-responsive table table-hover"}
fields = ['frogid', 'num_operations', 'last_operation', 'next_operation', 'condition', 'remarks', 'tankid']
order_by_field = '-next_operation'
sortable = True
class NotesTable(tables.Table):
note_date = tables.LinkColumn('frogs:notes_detail', accessor=A('note_date'), args=[A('pk')], verbose_name='Date' )
class Meta:
model = Notes
attrs = {"class": "ui-responsive table table-hover"}
fields = ['note_date','notes_species','notes','initials']
order_by_field = '-note_date'
sortable = True
class DocumentTable(tables.Table):
id = tables.LinkColumn('frogs:documents_detail', text='View', args=[A('pk')], verbose_name='')
created = tables.DateTimeColumn(verbose_name="Uploaded", format='d-M-Y hh:mm', accessor=A('docfile'), orderable=True)
size = tables.Column(verbose_name="Size (kB)",accessor=A('docfile'), orderable=True)
#def render_docfile(self,value):
# return value.name[2:]
def render_created(self,value):
#print("DEBUG: File=", value.storage.created_time(value.name))
return value.storage.created_time(value.name)
def render_size(self,value):
return value.storage.size(value.name)/1000
class Meta:
model = Document
attrs = {"class": "ui-responsive table table-hover"}
fields = ['order','docfile','description','created','size','archive','id']
sortable = True
order_by_field = 'order'
|
QBI-Software/FrogDB
|
frogs/tables.py
|
Python
|
gpl-3.0
| 10,287
|
#!/usr/bin/env python
#
# Copyright (C) 2017 - Massachusetts Institute of Technology (MIT)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Noise Module. Handles all noise associated works
Field convention is to convert the spectra into some sort of flux and then photon count
Then use sqrt of photon count as noise base?
or a defined noise floor?
or ... stuff.
What exactly is JWST Pandexo simulation doing anyway?
"""
import os
import sys
import numpy as np
DIR = os.path.abspath(os.path.dirname(__file__))
sys.path.insert(0, os.path.join(DIR, '../..'))
from SEAS_Utils.common_utils.constants import *
def convolve_spectra():
pass
def simple_noise():
pass
class Noise():
def __init__(self):
pass
def get_noise(self):
pass
class Poisson_Noise(Noise):
def __init__(self, poisson, shape):
poissonNoise = np.random.poisson(poisson, shape).astype(float)
return poissonNoise
class Shot_Noise(Poisson_Noise):
"""
shot noise is most frequently observed with small currents or low light intensities that have been amplified.
SNR = sqrt(N), where N is average number of event
"""
def __init__(self):
pass
class Photon_Noise():
def __init__(self, user_input):
self.user_input = user_input
def blackbody_lam(self, wav, T):
""" Blackbody as a function of wavelength (m) and temperature (K).
returns units of erg/s/cm^2/cm/Steradian
"""
a = 2*HPlanck*CLight**2
b = HPlanck*CLight/(wav*BoltK*T)
intensity = a/((wav**5)*(np.exp(b)-1.0))
return intensity
def calculate_noise(self, R_atmosphere):
R_Star = self.user_input["Star"]["R_Star"]
R_planet = self.user_input["Planet"]["R_Planet"]
if R_Star < 1000:
R_Star *= R_Sun
if R_planet < 1000:
R_planet *= R_Earth
R_obs = self.user_input["Telescope"]["Aperture"]
#R_atmosphere = self.user_input["Planet"]["R_Atmosphere"]
Distance = self.user_input["Telescope"]["Distance"]
Duration = self.user_input["Telescope"]["Duration"]
Quantum = self.user_input["Telescope"]["Quantum_Efficiency"]
T_Star = self.user_input["Star"]["T"]
Noise_M = self.user_input["Observation_Effects"]["Noise"]["Multiplier"]
# calculate number of photons
B_Body = self.blackbody_lam(self.bin_centers*10**-6, T_Star)
Bin_width = self.bin_width*10**-6
A_Star = np.pi*R_Star**2
Psi_Tele = np.pi*R_obs**2/Distance**2
E_Total = B_Body*Bin_width*A_Star*Psi_Tele*Duration
num_photon = (E_Total*self.bin_centers*10**-6)/(HPlanck*c)*Quantum
signal = (2*R_planet*R_atmosphere)/R_Star**2
photon_noise = Noise_M/np.sqrt(num_photon)
SNR = signal/photon_noise
return signal, photon_noise, SNR
def determine_bin(self):
lambda_init = self.user_input["Telescope"]["min_wavelength"]
lambda_max = self.user_input["Telescope"]["max_wavelength"]
bin_width_init = self.user_input["Observation_Effects"]["bin_width"]
bin_exponent = self.user_input["Observation_Effects"]["bin_exponent"]
bin_edges,bin_width,bin_centers = [],[],[]
i=0
lambda_current = lambda_init
bin_edges.append(lambda_current)
while True:
new_bin_width = bin_width_init*(lambda_current/lambda_init)**(bin_exponent)
lambda_center = lambda_current+0.5*new_bin_width
lambda_current += new_bin_width
if lambda_center > lambda_max:
break
bin_edges.append(lambda_current)
bin_centers.append(lambda_center)
bin_width.append(new_bin_width)
i+=1
self.bin_edges = np.array(bin_edges)
self.bin_width = np.array(bin_width)
self.bin_centers = np.array(bin_centers)
return bin_edges, bin_width, bin_centers
class Gaussian_Noise(Noise):
def __init__(self, multiplier=1, length=10):
self.length = length
self.multiplier = multiplier
def get_noise(self):
return np.random.randn(self.length)*self.multiplier
"""
mu, sigma = 8560, 20 # mean and standard deviation
s = np.random.normal(mu, sigma, 1000)
import matplotlib.pyplot as plt
count, bins, ignored = plt.hist(s, 250, normed=True)
print count, bins
func = 1/(sigma * np.sqrt(2 * np.pi)) *np.exp( - (bins - mu)**2 / (2 * sigma**2) )
plt.plot(bins, func, linewidth=2, color='r')
plt.show()
"""
class Uniform_Noise(Noise):
def __init__(self):
return np.random.random()
class Laplace_Noise(Noise):
def __init__(self):
pass
class Lorentz_Noise(Noise):
def __init__(self):
pass
class Perlin_Noise(Noise):
"""
reference https://pypi.python.org/pypi/noise/
"""
def __init__(self):
pass
class Telescope_Noise(Noise):
def __init__(self):
pass
def add_jitter(self):
pass
|
azariven/BioSig_SEAS
|
SEAS_Main/observation_effects/noise.py
|
Python
|
gpl-3.0
| 6,126
|
import sys
import os
sys.path.append(os.path.join(sys.path[0],"..",".."))
import argparse
from KicadModTree import *
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Commandline tool for generating ring pads.')
parser.add_argument('-n', '--name', metavar='fp_name', type=str,
help='Name of the generated footprint. default: output', default='output')
parser.add_argument('--at', type=float, nargs=2, help='position of the pad, default: at origin', default=[0,0], metavar=('x', 'y'))
parser.add_argument('-v', '--verbose', action='count', help='set debug level')
parser.add_argument('-i', '--inner_diameter', type=float, help='inside diameter', required=True)
parser.add_argument('-o', '--outer_diameter', type=float, help='outside diameter', required=True)
parser.add_argument('-p', '--number', type=str, help='the pin number, default: 1', default='1')
parser.add_argument('--anchor_count', type=int, help='number of anchor (trace connection points), default: 4', default=4)
parser.add_argument('--paste_count', type=int, help='number of paste areas, default: 4', default=4)
parser.add_argument('--paste_round_radius_radio', type=float, help='round radius ratio for the paste pads', default=0.25)
parser.add_argument('--paste_clearance', type=float, help='clearance between paste areas', nargs='?')
parser.add_argument('--mask_margin', type=float, help='soldermask margin, default:0', default=0)
parser.add_argument('--paste_margin', type=float, help='solderpaste margin, default:0 (means controlled by footprint or board setup)', default=0)
args = parser.parse_args()
kicad_mod = Footprint(args.name)
kicad_mod.append(
RingPad(
number=args.number, at=args.at,
size=args.outer_diameter, inner_diameter=args.inner_diameter,
num_anchor=args.anchor_count, num_paste_zones=args.paste_count,
solder_paste_margin=args.paste_margin, solder_mask_margin=args.mask_margin,
paste_round_radius_radio=args.paste_round_radius_radio,
paste_to_paste_clearance=args.paste_clearance))
file_handler = KicadFileHandler(kicad_mod)
file_handler.writeFile(args.name + '.kicad_mod')
|
pointhi/kicad-footprint-generator
|
scripts/PadGenerator/RingPad.py
|
Python
|
gpl-3.0
| 2,221
|
# -*- coding: utf-8 -*-
'''
Created on 6 mai 2012
'''
from events.event import Event
from missions.mission import Mission
from robots.robot import Robot
class UIMission(Mission):
def __init__(self, robot, can, ui):
super(self.__class__,self).__init__(robot, can, ui)
self.state = "repos"
def process_event(self, event):
if self.state == "repos":
if event.name == "ui":
if event.type == "calibrate":
if "calibrate_rangefinder" in self.missions:
self.missions["calibrate_rangefinder"].start(event.id)
else:
self.logger.error("The mission \'calibrate_rangefinder\' is not loaded")
elif event.type == "get":
if event.mission in self.missions:
ans = getattr(self.missions[event.mission], event.attribute)
self.ui.send("answer %s" % (ans.__str__()))
else:
self.ui.send("exception mission %s not found" % (event.mission))
elif event.type == "end":
print("Stopping the ia")
self.ui.send("stopping")
self.ui.stop()
self.can.stop()
#self.inter.stop()
# TODO: killer les autres threads
elif event.type == "init":
Robot.side = event.side
self.ui.send("answer done")
self.send_event(Event("ui", "start", self.missions["start"]))
elif event.type == "message":
self.logger.info("UI says: %s" % event.message)
elif event.type == "positioning":
if "positioning1" in self.missions:
self.missions["positioning1"].start()
else:
self.logger.error("The mission \'positioning\' is not loaded")
elif event.type == "set":
if event.mission in self.missions:
value = event.value
# TODO: double check selon le type de l'attribut
try:
setattr(self.missions[event.mission], event.attribute, value)
except AttributeError as e:
self.ui.send("exception %s" % e)
else:
self.ui.send("answer done")
else:
self.ui.send("exception mission %s not found" % (event.mission))
elif event.type == "test":
if event.test == "forward":
self.missions["forward"].start(self, 1000)
self.ui.send("answer done")
|
7Robot/cerveau
|
ia/missions/common/ui.py
|
Python
|
gpl-3.0
| 3,138
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file './Plugins/VcsPlugins/vcsMercurial/HgNewProjectOptionsDialog.ui'
#
# Created: Tue Nov 18 17:53:57 2014
# by: PyQt5 UI code generator 5.3.2
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_HgNewProjectOptionsDialog(object):
def setupUi(self, HgNewProjectOptionsDialog):
HgNewProjectOptionsDialog.setObjectName("HgNewProjectOptionsDialog")
HgNewProjectOptionsDialog.resize(562, 221)
HgNewProjectOptionsDialog.setSizeGripEnabled(True)
self.gridLayout = QtWidgets.QGridLayout(HgNewProjectOptionsDialog)
self.gridLayout.setObjectName("gridLayout")
self.textLabel1 = QtWidgets.QLabel(HgNewProjectOptionsDialog)
self.textLabel1.setObjectName("textLabel1")
self.gridLayout.addWidget(self.textLabel1, 0, 0, 1, 1)
self.protocolCombo = QtWidgets.QComboBox(HgNewProjectOptionsDialog)
self.protocolCombo.setObjectName("protocolCombo")
self.gridLayout.addWidget(self.protocolCombo, 0, 1, 1, 2)
self.TextLabel2 = QtWidgets.QLabel(HgNewProjectOptionsDialog)
self.TextLabel2.setObjectName("TextLabel2")
self.gridLayout.addWidget(self.TextLabel2, 1, 0, 1, 1)
self.vcsUrlEdit = QtWidgets.QLineEdit(HgNewProjectOptionsDialog)
self.vcsUrlEdit.setObjectName("vcsUrlEdit")
self.gridLayout.addWidget(self.vcsUrlEdit, 1, 1, 1, 1)
self.vcsUrlButton = QtWidgets.QToolButton(HgNewProjectOptionsDialog)
self.vcsUrlButton.setObjectName("vcsUrlButton")
self.gridLayout.addWidget(self.vcsUrlButton, 1, 2, 1, 1)
self.vcsRevisionLabel = QtWidgets.QLabel(HgNewProjectOptionsDialog)
self.vcsRevisionLabel.setObjectName("vcsRevisionLabel")
self.gridLayout.addWidget(self.vcsRevisionLabel, 2, 0, 1, 1)
self.vcsRevisionEdit = QtWidgets.QLineEdit(HgNewProjectOptionsDialog)
self.vcsRevisionEdit.setWhatsThis("")
self.vcsRevisionEdit.setObjectName("vcsRevisionEdit")
self.gridLayout.addWidget(self.vcsRevisionEdit, 2, 1, 1, 2)
self.TextLabel4 = QtWidgets.QLabel(HgNewProjectOptionsDialog)
self.TextLabel4.setObjectName("TextLabel4")
self.gridLayout.addWidget(self.TextLabel4, 3, 0, 1, 1)
self.vcsProjectDirEdit = QtWidgets.QLineEdit(HgNewProjectOptionsDialog)
self.vcsProjectDirEdit.setObjectName("vcsProjectDirEdit")
self.gridLayout.addWidget(self.vcsProjectDirEdit, 3, 1, 1, 1)
self.projectDirButton = QtWidgets.QToolButton(HgNewProjectOptionsDialog)
self.projectDirButton.setObjectName("projectDirButton")
self.gridLayout.addWidget(self.projectDirButton, 3, 2, 1, 1)
self.largeCheckBox = QtWidgets.QCheckBox(HgNewProjectOptionsDialog)
self.largeCheckBox.setObjectName("largeCheckBox")
self.gridLayout.addWidget(self.largeCheckBox, 4, 0, 1, 3)
self.lfNoteLabel = QtWidgets.QLabel(HgNewProjectOptionsDialog)
self.lfNoteLabel.setWordWrap(True)
self.lfNoteLabel.setObjectName("lfNoteLabel")
self.gridLayout.addWidget(self.lfNoteLabel, 5, 0, 1, 3)
self.buttonBox = QtWidgets.QDialogButtonBox(HgNewProjectOptionsDialog)
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(QtWidgets.QDialogButtonBox.Cancel|QtWidgets.QDialogButtonBox.Ok)
self.buttonBox.setObjectName("buttonBox")
self.gridLayout.addWidget(self.buttonBox, 6, 0, 1, 3)
self.textLabel1.setBuddy(self.protocolCombo)
self.TextLabel2.setBuddy(self.vcsUrlEdit)
self.vcsRevisionLabel.setBuddy(self.vcsRevisionEdit)
self.TextLabel4.setBuddy(self.vcsProjectDirEdit)
self.retranslateUi(HgNewProjectOptionsDialog)
self.buttonBox.accepted.connect(HgNewProjectOptionsDialog.accept)
self.buttonBox.rejected.connect(HgNewProjectOptionsDialog.reject)
QtCore.QMetaObject.connectSlotsByName(HgNewProjectOptionsDialog)
HgNewProjectOptionsDialog.setTabOrder(self.protocolCombo, self.vcsUrlEdit)
HgNewProjectOptionsDialog.setTabOrder(self.vcsUrlEdit, self.vcsUrlButton)
HgNewProjectOptionsDialog.setTabOrder(self.vcsUrlButton, self.vcsRevisionEdit)
HgNewProjectOptionsDialog.setTabOrder(self.vcsRevisionEdit, self.vcsProjectDirEdit)
HgNewProjectOptionsDialog.setTabOrder(self.vcsProjectDirEdit, self.projectDirButton)
HgNewProjectOptionsDialog.setTabOrder(self.projectDirButton, self.largeCheckBox)
HgNewProjectOptionsDialog.setTabOrder(self.largeCheckBox, self.buttonBox)
def retranslateUi(self, HgNewProjectOptionsDialog):
_translate = QtCore.QCoreApplication.translate
HgNewProjectOptionsDialog.setWindowTitle(_translate("HgNewProjectOptionsDialog", "New Project from Repository"))
HgNewProjectOptionsDialog.setWhatsThis(_translate("HgNewProjectOptionsDialog", "<b>New Project from Repository Dialog</b>\n"
"<p>Enter the various repository infos into the entry fields. These values are used, when the new project is retrieved from the repository. If the checkbox is selected, the URL must end in the project name. A repository layout with project/tags, project/branches and project/trunk will be assumed. In this case, you may enter a tag or branch, which must look like tags/tagname or branches/branchname. If the checkbox is not selected, the URL must contain the complete path in the repository.</p>\n"
"<p>For remote repositories the URL must contain the hostname.</p>"))
self.textLabel1.setText(_translate("HgNewProjectOptionsDialog", "&Protocol:"))
self.protocolCombo.setToolTip(_translate("HgNewProjectOptionsDialog", "Select the protocol to access the repository"))
self.TextLabel2.setText(_translate("HgNewProjectOptionsDialog", "&URL:"))
self.vcsUrlEdit.setToolTip(_translate("HgNewProjectOptionsDialog", "Enter the url path of the repository (without protocol part)"))
self.vcsUrlButton.setToolTip(_translate("HgNewProjectOptionsDialog", "Select the repository url via a directory selection dialog"))
self.vcsRevisionLabel.setText(_translate("HgNewProjectOptionsDialog", "&Revision:"))
self.vcsRevisionEdit.setToolTip(_translate("HgNewProjectOptionsDialog", "Enter the revision the new project should be generated from"))
self.TextLabel4.setText(_translate("HgNewProjectOptionsDialog", "Project &Directory:"))
self.vcsProjectDirEdit.setToolTip(_translate("HgNewProjectOptionsDialog", "Enter the directory of the new project."))
self.vcsProjectDirEdit.setWhatsThis(_translate("HgNewProjectOptionsDialog", "<b>Project Directory</b>\n"
"<p>Enter the directory of the new project. It will be retrieved from \n"
"the repository and be placed in this directory.</p>"))
self.largeCheckBox.setText(_translate("HgNewProjectOptionsDialog", "Download all versions of all large files"))
self.lfNoteLabel.setText(_translate("HgNewProjectOptionsDialog", "<b>Note:</b> This option increases the download time and volume."))
|
davy39/eric
|
Plugins/VcsPlugins/vcsMercurial/Ui_HgNewProjectOptionsDialog.py
|
Python
|
gpl-3.0
| 7,137
|
#!/usr/bin/env python
""" MultiQC module to parse output from HISAT2 """
from __future__ import print_function
from collections import OrderedDict
import logging
import re
from multiqc import config
from multiqc.plots import bargraph
from multiqc.modules.base_module import BaseMultiqcModule
# Initialise the logger
log = logging.getLogger(__name__)
class MultiqcModule(BaseMultiqcModule):
""" HISAT2 module, parses stderr logs. """
def __init__(self):
# Initialise the parent object
super(MultiqcModule, self).__init__(name="HISAT2", anchor="hisat2",
href='https://ccb.jhu.edu/software/hisat2/',
info="is a fast and sensitive alignment program for mapping "\
"NGS reads (both DNA and RNA) against a reference genome or "\
"population of reference genomes.")
# Find and load any HISAT2 reports
self.hisat2_data = dict()
for f in self.find_log_files('hisat2', filehandles=True):
self.parse_hisat2_logs(f)
# Filter to strip out ignored sample names
self.hisat2_data = self.ignore_samples(self.hisat2_data)
if len(self.hisat2_data) == 0:
log.debug("Could not find any reports in {}".format(config.analysis_dir))
raise UserWarning
log.info("Found {} reports".format(len(self.hisat2_data)))
# Write parsed report data to a file
self.write_data_file(self.hisat2_data, 'multiqc_hisat2')
# Basic Stats Table
# Report table is immutable, so just updating it works
self.hisat2_general_stats_table()
# Alignment Rate Plot
self.hisat2_alignment_plot()
def parse_hisat2_logs(self, f):
"""
Parse statistics generated by HISAT2 >= v2.1.0 that has been run
with the --new-summary option. Older versions or logs from runs without
that option are identical to that from bowtie2 and will be parsed
by that module.
"""
# Regexes
regexes = {
'unpaired_total': r"Total(?: unpaired)? reads: (\d+)",
'unpaired_aligned_none': r"Aligned 0 times?: (\d+) \([\d\.]+%\)",
'unpaired_aligned_one': r"Aligned 1 time: (\d+) \([\d\.]+%\)",
'unpaired_aligned_multi': r"Aligned >1 times: (\d+) \([\d\.]+%\)",
'paired_total': r"Total pairs: (\d+)",
'paired_aligned_none': r"Aligned concordantly or discordantly 0 time: (\d+) \([\d\.]+%\)",
'paired_aligned_one': r"Aligned concordantly 1 time: (\d+) \([\d\.]+%\)",
'paired_aligned_multi': r"Aligned concordantly >1 times: (\d+) \([\d\.]+%\)",
'paired_aligned_discord_one': r"Aligned discordantly 1 time: (\d+) \([\d\.]+%\)",
}
# Go through log file line by line
s_name = f['s_name']
parsed_data = {}
for l in f['f']:
# Attempt in vain to find original hisat2 command, logged by another program
hscmd = re.search(r"hisat2 .+ -[1U] ([^\s,]+)", l)
if hscmd:
s_name = self.clean_s_name(hscmd.group(1), f['root'])
log.debug("Found a HISAT2 command, updating sample name to '{}'".format(s_name))
# Run through all regexes
for k, r in regexes.items():
match = re.search(r, l)
if match:
parsed_data[k] = int(match.group(1))
# Overall alignment rate
overall = re.search(r"Overall alignment rate: ([\d\.]+)%", l)
if overall:
parsed_data['overall_alignment_rate'] = float(overall.group(1))
# Save parsed data
if s_name in self.hisat2_data:
log.debug("Duplicate sample name found! Overwriting: {}".format(s_name))
self.add_data_source(f, s_name)
self.hisat2_data[s_name] = parsed_data
# Reset in case we find more in this log file
s_name = f['s_name']
parsed_data = {}
def hisat2_general_stats_table(self):
""" Take the parsed stats from the HISAT2 report and add it to the
basic stats table at the top of the report """
headers = OrderedDict()
headers['overall_alignment_rate'] = {
'title': '% Aligned',
'description': 'overall alignment rate',
'max': 100,
'min': 0,
'suffix': '%',
'scale': 'YlGn'
}
self.general_stats_addcols(self.hisat2_data, headers)
def hisat2_alignment_plot (self):
""" Make the HighCharts HTML to plot the alignment rates """
# Config for the plot
config = {
'ylab': '# Reads',
'cpswitch_counts_label': 'Number of Reads'
}
# Split the data into SE and PE
sedata = {}
pedata = {}
for s_name, data in self.hisat2_data.items():
if 'paired_total' in data:
# Save half 'pairs' of mate counts
m_keys = ['unpaired_total', 'unpaired_aligned_none', 'unpaired_aligned_one', 'unpaired_aligned_multi']
for k in m_keys:
if k in data:
data[k] = float(data[k]) / 2.0
pedata[s_name] = data
else:
sedata[s_name] = data
# Two plots, don't mix SE with PE
if len(sedata) > 0:
sekeys = OrderedDict()
sekeys['unpaired_aligned_one'] = { 'color': '#20568f', 'name': 'SE mapped uniquely' }
sekeys['unpaired_aligned_multi'] = { 'color': '#f7a35c', 'name': 'SE multimapped' }
sekeys['unpaired_aligned_none'] = { 'color': '#981919', 'name': 'SE not aligned' }
config['id'] = 'hisat2_se_plot'
config['title'] = 'HISAT2 SE Alignment Scores'
self.add_section(
plot = bargraph.plot(sedata, sekeys, config)
)
if len(pedata) > 0:
pekeys = OrderedDict()
pekeys['paired_aligned_one'] = { 'color': '#20568f', 'name': 'PE mapped uniquely' }
pekeys['paired_aligned_discord_one'] = { 'color': '#5c94ca', 'name': 'PE mapped discordantly uniquely' }
pekeys['unpaired_aligned_one'] = { 'color': '#95ceff', 'name': 'PE one mate mapped uniquely' }
pekeys['paired_aligned_multi'] = { 'color': '#f7a35c', 'name': 'PE multimapped' }
pekeys['unpaired_aligned_multi'] = { 'color': '#ffeb75', 'name': 'PE one mate multimapped' }
pekeys['unpaired_aligned_none'] = { 'color': '#981919', 'name': 'PE neither mate aligned' }
config['id'] = 'hisat2_pe_plot'
config['title'] = 'HISAT2 PE Alignment Scores'
self.add_section(
description = '<em>Please note that single mate alignment counts are halved to tally with pair counts properly.</em>',
plot = bargraph.plot(pedata, pekeys, config)
)
|
robinandeer/MultiQC
|
multiqc/modules/hisat2/hisat2.py
|
Python
|
gpl-3.0
| 6,980
|
#!/usr/bin/env python
# encoding: utf-8
"""
Created by Ben Scott on '26/01/2017'.
"""
import pytz
import datetime
def utc_timestamp():
now = datetime.datetime.utcnow()
return now.replace(tzinfo=pytz.utc)
|
sparkd/fuse
|
fuse/utils.py
|
Python
|
gpl-3.0
| 215
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: aci_tenant_span_dst_group
short_description: Manage SPAN destination groups on Cisco ACI fabrics (span:DestGrp)
description:
- Manage SPAN destination groups on Cisco ACI fabrics.
- More information from the internal APIC class I(span:DestGrp) at
U(https://developer.cisco.com/docs/apic-mim-ref/).
author:
- Dag Wieers (@dagwieers)
version_added: '2.4'
notes:
- The C(tenant) used must exist before using this module in your playbook.
The M(aci_tenant) module can be used for this.
options:
dst_group:
description:
- The name of the SPAN destination group.
required: yes
aliases: [ name ]
description:
description:
- The description of the SPAN destination group.
aliases: [ descr ]
tenant:
description:
- The name of the tenant.
required: yes
aliases: [ tenant_name ]
state:
description:
- Use C(present) or C(absent) for adding or removing.
- Use C(query) for listing an object or multiple objects.
choices: [ absent, present, query ]
default: present
extends_documentation_fragment: aci
'''
# FIXME: Add more, better examples
EXAMPLES = r'''
- aci_tenant_span_dst_group:
hostname: '{{ inventory_hostname }}'
username: '{{ username }}'
password: '{{ password }}'
dst_group: '{{ dst_group }}'
description: '{{ descr }}'
tenant: '{{ tenant }}'
'''
RETURN = r'''
#
'''
from ansible.module_utils.network.aci.aci import ACIModule, aci_argument_spec
from ansible.module_utils.basic import AnsibleModule
def main():
argument_spec = aci_argument_spec
argument_spec.update(
dst_group=dict(type='str', required=False, aliases=['name']), # Not required for querying all objects
tenant=dict(type='str', required=False, aliases=['tenant_name']), # Not required for querying all objects
description=dict(type='str', aliases=['descr']),
state=dict(type='str', default='present', choices=['absent', 'present', 'query']),
method=dict(type='str', choices=['delete', 'get', 'post'], aliases=['action'], removed_in_version='2.6'), # Deprecated starting from v2.6
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
required_if=[
['state', 'absent', ['dst_group', 'tenant']],
['state', 'present', ['dst_group', 'tenant']],
],
)
dst_group = module.params['dst_group']
description = module.params['description']
state = module.params['state']
tenant = module.params['tenant']
aci = ACIModule(module)
aci.construct_url(
root_class=dict(
aci_class='fvTenant',
aci_rn='tn-{0}'.format(tenant),
filter_target='eq(fvTenant.name, "{0}")'.format(tenant),
module_object=tenant,
),
subclass_1=dict(
aci_class='spanDestGrp',
aci_rn='destgrp-{0}'.format(dst_group),
filter_target='eq(spanDestGrp.name, "{0}")'.format(dst_group),
module_object=dst_group,
),
)
aci.get_existing()
if state == 'present':
# Filter out module parameters with null values
aci.payload(
aci_class='spanDestGrp',
class_config=dict(
name=dst_group,
descr=description,
),
)
# Generate config diff which will be used as POST request body
aci.get_diff(aci_class='spanDestGrp')
# Submit changes if module not in check_mode and the proposed is different than existing
aci.post_config()
elif state == 'absent':
aci.delete_config()
module.exit_json(**aci.result)
if __name__ == "__main__":
main()
|
haad/ansible
|
lib/ansible/modules/network/aci/aci_tenant_span_dst_group.py
|
Python
|
gpl-3.0
| 4,099
|
from flask import Flask, render_template, request
from flask_socketio import SocketIO, send, emit, join_room, leave_room
from threading import Thread
import serial
import time
import json
app = Flask(__name__)
app.config['SECRET_KEY'] = 'secret!'
socketio = SocketIO(app)
arduino_conn = serial.Serial("/dev/ttyUSB0", 9600, timeout=1)
arduino_conn.flushInput()
arduino_conn.flushOutput()
cook = False
@app.route('/')
def hello_world():
print('root called')
return render_template('index.html')
@app.route('/cook/', methods=['GET'])
def cook():
type = request.args.get('type')
print("TYPE: %s"%json.dumps(type))
print("Cook Egg Before")
global cook
cook = True
arduino_conn.write("1")
print("Cook Egg After")
return json.dumps({"message":"Cooking an Egg"})
def serial_comm():
global cook
while True:
if cook:
print "Writing to pi"
arduino_conn.write("1")
cook = False
else:
time.sleep(2)
# t1 = Thread(target=main_routine)
#t2 = Thread(target=serial_comm)
# t1.start()
#t2.start()
#
if not arduino_conn.is_open:
arduino_conn.open()
socketio.run(app, host='0.0.0.0')
|
robinonsay/Eggsy
|
app.py
|
Python
|
gpl-3.0
| 1,188
|
teste = input ('digite algo: ')
print (teste)
|
magaum/python_dos_irmao
|
imprimir_dados_digitados_no_shell.py
|
Python
|
gpl-3.0
| 46
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
'''
Copyright © 2016 by Michael Keil
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import logging
import epuck
import eCommunication
class selector:
'''
Selector ePuck
'''
def __init__(self, bot):
"""
Constructor
:parameter: bot
:pType: epuck.robot
"""
if isinstance(bot, epuck.robot):
self._robot = bot
self._communication = eCommunication.communication(self._robot)
else:
logging.error('Parameter has to be of type robot')
raise ValueError("Parameter has to be of type robot")
def getValues(self):
"""
Getter selector values
:return: selector values
:pType: string
"""
if not self._robot.getConnectionStatus():
logging.exception('No connection available')
raise Exception, 'No connection available'
response = self._communication.send_receive('c').split(",")
t = response[0]
response = tuple(response[1:len(response)])
if t == "c":
# Selector
logging.debug(response)
return response[0]
else:
logging.warn('WARNING: Wrong return value')
return False
|
RL-LDV-TUM/RobotAPI
|
robot/eSelector.py
|
Python
|
gpl-3.0
| 2,017
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'OEILocation.gpgin'
db.delete_column(u'spoc_oeilocation', 'gpgin')
def backwards(self, orm):
# Adding field 'OEILocation.gpgin'
db.add_column(u'spoc_oeilocation', 'gpgin',
self.gf('django.db.models.fields.CharField')(max_length=10, null=True, blank=True),
keep_default=False)
models = {
u'spoc.fews_oei_gemalen': {
'GPGIN': ('django.db.models.fields.CharField', [], {'max_length': '24', 'null': 'True', 'blank': 'True'}),
'GPGINWP': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'GPGINZP': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'GPGUIT': ('django.db.models.fields.CharField', [], {'max_length': '24', 'null': 'True', 'blank': 'True'}),
'GPGUITWP': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'GPGUITZP': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'HYPERLINK': ('django.db.models.fields.CharField', [], {'max_length': '240', 'null': 'True', 'blank': 'True'}),
'ID_INT': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64', 'primary_key': 'True'}),
'KGMAANDR': ('django.db.models.fields.IntegerField', [], {'max_length': '5', 'null': 'True', 'blank': 'True'}),
'KGMAAPOM': ('django.db.models.fields.IntegerField', [], {'max_length': '5', 'null': 'True', 'blank': 'True'}),
'KGMAFSL1': ('django.db.models.fields.IntegerField', [], {'max_length': '5', 'null': 'True', 'blank': 'True'}),
'KGMAFSL2': ('django.db.models.fields.IntegerField', [], {'max_length': '5', 'null': 'True', 'blank': 'True'}),
'KGMBYPAS': ('django.db.models.fields.IntegerField', [], {'max_length': '5', 'null': 'True', 'blank': 'True'}),
'KGMFUNPA': ('django.db.models.fields.IntegerField', [], {'max_length': '5', 'null': 'True', 'blank': 'True'}),
'KGMIDENT': ('django.db.models.fields.CharField', [], {'max_length': '24'}),
'KGMINLAT': ('django.db.models.fields.IntegerField', [], {'max_length': '5', 'null': 'True', 'blank': 'True'}),
'KGMJAAR': ('django.db.models.fields.IntegerField', [], {'max_length': '5', 'null': 'True', 'blank': 'True'}),
'KGMKEREN': ('django.db.models.fields.IntegerField', [], {'max_length': '5', 'null': 'True', 'blank': 'True'}),
'KGMLOZBU': ('django.db.models.fields.IntegerField', [], {'max_length': '5', 'null': 'True', 'blank': 'True'}),
'KGMNAAM': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'KGMNAPD': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'KGMOMSCH': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'KGMPASBR': ('django.db.models.fields.IntegerField', [], {'max_length': '5', 'null': 'True', 'blank': 'True'}),
'KGMSOORT': ('django.db.models.fields.IntegerField', [], {'max_length': '5', 'null': 'True', 'blank': 'True'}),
'KGMSTATU': ('django.db.models.fields.IntegerField', [], {'max_length': '5', 'null': 'True', 'blank': 'True'}),
'MEMO': ('django.db.models.fields.TextField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'METBRON': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'METINWDAT': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'METINWWYZ': ('django.db.models.fields.IntegerField', [], {'max_length': '5', 'null': 'True', 'blank': 'True'}),
'METOPMERK': ('django.db.models.fields.CharField', [], {'max_length': '254', 'null': 'True', 'blank': 'True'}),
'Meta': {'object_name': 'FEWS_OEI_GEMALEN', 'db_table': "u'FEWS_OEI_GEMALEN'", 'managed': 'False'},
'OBJDERDE': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'OPMERKING': ('django.db.models.fields.CharField', [], {'max_length': '254', 'null': 'True', 'blank': 'True'}),
'RICHTING': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '8', 'decimal_places': '2', 'blank': 'True'}),
'X': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'Y': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'})
},
u'spoc.fews_oei_meetpunten': {
'GPG': ('django.db.models.fields.CharField', [], {'max_length': '24', 'null': 'True', 'blank': 'True'}),
'GPGWP': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'GPGZP': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'ID_INT': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64', 'primary_key': 'True'}),
'METBRON': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'METINWDAT': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'METINWWYZ': ('django.db.models.fields.IntegerField', [], {'max_length': '5', 'null': 'True', 'blank': 'True'}),
'METOPMERK': ('django.db.models.fields.CharField', [], {'max_length': '254', 'null': 'True', 'blank': 'True'}),
'MPNDATEI': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'MPNDATIN': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'MPNDEBMT': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'MPNIDENT': ('django.db.models.fields.CharField', [], {'max_length': '24'}),
'MPNNAAM': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'MPNSOORT': ('django.db.models.fields.IntegerField', [], {'max_length': '5', 'null': 'True', 'blank': 'True'}),
'MPNSTATU': ('django.db.models.fields.IntegerField', [], {'max_length': '5', 'null': 'True', 'blank': 'True'}),
'MPNSYS': ('django.db.models.fields.TextField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'MPN_ID': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'Meta': {'object_name': 'FEWS_OEI_MEETPUNTEN', 'db_table': "u'FEWS_OEI_MEETPUNTEN'", 'managed': 'False'},
'X': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'Y': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'})
},
u'spoc.fews_oei_sluizen': {
'GPGIN': ('django.db.models.fields.CharField', [], {'max_length': '24', 'null': 'True', 'blank': 'True'}),
'GPGINWP': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'GPGINZP': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'GPGUIT': ('django.db.models.fields.CharField', [], {'max_length': '24', 'null': 'True', 'blank': 'True'}),
'GPGUITWP': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'GPGUITZP': ('django.db.models.fields.CharField', [], {'max_length': '24'}),
'HYPERLINK': ('django.db.models.fields.CharField', [], {'max_length': '240', 'null': 'True', 'blank': 'True'}),
'ID_INT': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64', 'primary_key': 'True'}),
'KSLBOKBE': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '8', 'decimal_places': '2', 'blank': 'True'}),
'KSLBOKBO': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '8', 'decimal_places': '2', 'blank': 'True'}),
'KSLFUNPA': ('django.db.models.fields.IntegerField', [], {'max_length': '5', 'null': 'True', 'blank': 'True'}),
'KSLIDENT': ('django.db.models.fields.CharField', [], {'max_length': '24'}),
'KSLINLAT': ('django.db.models.fields.IntegerField', [], {'max_length': '5', 'null': 'True', 'blank': 'True'}),
'KSLKOLBR': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '8', 'decimal_places': '2', 'blank': 'True'}),
'KSLKOLHG': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '8', 'decimal_places': '2', 'blank': 'True'}),
'KSLLENGT': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '8', 'decimal_places': '2', 'blank': 'True'}),
'KSLNAAM': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'KSLOMSCH': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'KSLPASBR': ('django.db.models.fields.IntegerField', [], {'max_length': '5', 'null': 'True', 'blank': 'True'}),
'KSLSOORT': ('django.db.models.fields.IntegerField', [], {'max_length': '5', 'null': 'True', 'blank': 'True'}),
'KSLSTATU': ('django.db.models.fields.IntegerField', [], {'max_length': '5', 'null': 'True', 'blank': 'True'}),
'MEMO': ('django.db.models.fields.TextField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'METBRON': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'METINWDAT': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'METINWWYZ': ('django.db.models.fields.IntegerField', [], {'max_length': '5', 'null': 'True', 'blank': 'True'}),
'METOPMERK': ('django.db.models.fields.CharField', [], {'max_length': '254', 'null': 'True', 'blank': 'True'}),
'Meta': {'object_name': 'FEWS_OEI_SLUIZEN', 'db_table': "u'FEWS_OEI_SLUIZEN'", 'managed': 'False'},
'OBJDERDE': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'OPMERKING': ('django.db.models.fields.CharField', [], {'max_length': '254', 'null': 'True', 'blank': 'True'}),
'RICHTING': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '8', 'decimal_places': '2', 'blank': 'True'}),
'X': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'Y': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'})
},
u'spoc.fews_oei_stuwen': {
'GPGBES': ('django.db.models.fields.CharField', [], {'max_length': '24', 'null': 'True', 'blank': 'True'}),
'GPGBESWP': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'GPGBESZP': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'GPGBOS': ('django.db.models.fields.CharField', [], {'max_length': '24', 'null': 'True', 'blank': 'True'}),
'GPGBOSWP': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'GPGBOSZP': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'HYPERLINK': ('django.db.models.fields.CharField', [], {'max_length': '240', 'null': 'True', 'blank': 'True'}),
'ID_INT': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64', 'primary_key': 'True'}),
'KSTAANT': ('django.db.models.fields.IntegerField', [], {'max_length': '5', 'null': 'True', 'blank': 'True'}),
'KSTBREED': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '8', 'decimal_places': '2', 'blank': 'True'}),
'KSTDSBRE': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '8', 'decimal_places': '2', 'blank': 'True'}),
'KSTFUNCT': ('django.db.models.fields.IntegerField', [], {'max_length': '5', 'null': 'True', 'blank': 'True'}),
'KSTHOOGT': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '8', 'decimal_places': '2', 'blank': 'True'}),
'KSTIDENT': ('django.db.models.fields.CharField', [], {'max_length': '24'}),
'KSTINLAT': ('django.db.models.fields.IntegerField', [], {'max_length': '5', 'null': 'True', 'blank': 'True'}),
'KSTJAAR': ('django.db.models.fields.IntegerField', [], {'max_length': '4', 'null': 'True', 'blank': 'True'}),
'KSTKRVRM': ('django.db.models.fields.IntegerField', [], {'max_length': '5', 'null': 'True', 'blank': 'True'}),
'KSTMAXKH': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '8', 'decimal_places': '2', 'blank': 'True'}),
'KSTMINKH': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '8', 'decimal_places': '2', 'blank': 'True'}),
'KSTNAAM': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'KSTNAPD': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'KSTNAPH': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '4', 'blank': 'True'}),
'KSTOMSCH': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'KSTPASBR': ('django.db.models.fields.IntegerField', [], {'max_length': '5', 'null': 'True', 'blank': 'True'}),
'KSTREGEL': ('django.db.models.fields.IntegerField', [], {'max_length': '5', 'null': 'True', 'blank': 'True'}),
'KSTSOORT': ('django.db.models.fields.IntegerField', [], {'max_length': '5', 'null': 'True', 'blank': 'True'}),
'KSTSTATU': ('django.db.models.fields.IntegerField', [], {'max_length': '5', 'null': 'True', 'blank': 'True'}),
'MEMO': ('django.db.models.fields.TextField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'METBRON': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'METINWDAT': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'METINWWYZ': ('django.db.models.fields.IntegerField', [], {'max_length': '5', 'null': 'True', 'blank': 'True'}),
'METOPMERK': ('django.db.models.fields.CharField', [], {'max_length': '254', 'null': 'True', 'blank': 'True'}),
'Meta': {'object_name': 'FEWS_OEI_STUWEN', 'db_table': "u'FEWS_OEI_STUWEN'", 'managed': 'False'},
'OBJDERDE': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'OPMERKING': ('django.db.models.fields.CharField', [], {'max_length': '254', 'null': 'True', 'blank': 'True'}),
'RICHTING': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '8', 'decimal_places': '2', 'blank': 'True'}),
'X': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'Y': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'})
},
u'spoc.field': {
'Meta': {'unique_together': "((u'field_type', u'prefix'),)", 'object_name': 'Field'},
'field_type': ('django.db.models.fields.CharField', [], {'max_length': '15'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'prefix': ('django.db.models.fields.CharField', [], {'max_length': '1'})
},
u'spoc.header': {
'Meta': {'ordering': "[u'location__locationid']", 'unique_together': "((u'location', u'parameter'),)", 'object_name': 'Header'},
'begintime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'endtime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'location': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'headers'", 'to': u"orm['spoc.ScadaLocation']"}),
'parameter': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['spoc.Parameter']", 'null': 'True', 'blank': 'True'}),
'unit': ('django.db.models.fields.CharField', [], {'max_length': '30', 'null': 'True', 'blank': 'True'})
},
u'spoc.headerformula': {
'Meta': {'object_name': 'HeaderFormula'},
'coef1': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '4', 'blank': 'True'}),
'coef2': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '4', 'blank': 'True'}),
'coef3': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '4', 'blank': 'True'}),
'coef4': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '4', 'blank': 'True'}),
'coef5': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '4', 'blank': 'True'}),
'coef6': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '4', 'blank': 'True'}),
'coef7': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '4', 'blank': 'True'}),
'coef8': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '4', 'blank': 'True'}),
'dstart': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'dstop': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'formula_type': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'header': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['spoc.Header']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'spoc.location': {
'Meta': {'ordering': "[u'created']", 'object_name': 'Location'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'fews': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'forward': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'oei_location': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['spoc.OEILocation']", 'null': 'True', 'blank': 'True'}),
'scada_location': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['spoc.ScadaLocation']", 'null': 'True', 'blank': 'True'}),
'visible': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
u'spoc.locationsort': {
'Meta': {'ordering': "[u'sort']", 'object_name': 'LocationSort'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'sort': ('django.db.models.fields.CharField', [], {'max_length': '30'})
},
u'spoc.oeilocation': {
'Meta': {'ordering': "[u'locationname']", 'object_name': 'OEILocation'},
'datumbg': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'debitf': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'gpginwp': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '4', 'blank': 'True'}),
'gpginzp': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '4', 'blank': 'True'}),
'gpguit': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'gpguitwp': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '4', 'blank': 'True'}),
'gpguitzp': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '4', 'blank': 'True'}),
'inlaatf': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'locationid': ('django.db.models.fields.CharField', [], {'max_length': '100', 'primary_key': 'True'}),
'locationname': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'objectid': ('django.db.models.fields.IntegerField', [], {}),
'regelbg': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'sort': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['spoc.LocationSort']", 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'x': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'y': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'})
},
u'spoc.parameter': {
'Meta': {'ordering': "[u'id']", 'object_name': 'Parameter'},
'id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'})
},
u'spoc.scadalocation': {
'Meta': {'ordering': "[u'locationname']", 'object_name': 'ScadaLocation'},
'locationid': ('django.db.models.fields.CharField', [], {'max_length': '100', 'primary_key': 'True'}),
'locationname': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'source': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['spoc.Source']", 'null': 'True', 'blank': 'True'})
},
u'spoc.source': {
'Meta': {'ordering': "[u'name']", 'object_name': 'Source'},
'directory': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'primary_key': 'True'}),
'source_type': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'spoc.validation': {
'Meta': {'object_name': 'Validation'},
'field': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['spoc.ValidationField']"}),
'header': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['spoc.Header']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'value': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '4', 'blank': 'True'})
},
u'spoc.validationfield': {
'Meta': {'unique_together': "((u'field', u'parameter'),)", 'object_name': 'ValidationField'},
'field': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['spoc.Field']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'parameter': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['spoc.Parameter']"})
}
}
complete_apps = ['spoc']
|
nens/spoc
|
spoc/migrations/0019_auto__del_field_oeilocation_gpgin.py
|
Python
|
gpl-3.0
| 24,485
|
# -*- coding: utf-8 -*-
"""
Created on Wed Sep 18 15:35:39 2013
@author: Alison Kirkby
plots edi files (res/phase vs period) for all edis in a directory and saves out as png files
"""
import os
import mtpy.imaging.plotresponse as mtpr
from tests import EDI_DATA_DIR, EDI_DATA_DIR2
from tests.imaging import ImageTestCase, plt_wait
class Test_PlotResponse(ImageTestCase):
def test_edi_files(self):
# path to edis
epath = EDI_DATA_DIR
elst = [os.path.join(epath, edi) for edi in os.listdir(epath) if (edi.endswith('.edi'))]
for efile in elst[:3]:
# eo = mtedi.Edi(efile)
pr = mtpr.PlotResponse(fn=efile,
plot_num=2,
plot_tipper='yri',
plot_pt='y')
plt_wait(1)
figfile = os.path.join(self._temp_dir, os.path.basename(efile)[:-4] + '.png')
pr.save_plot(figfile)
assert (os.path.exists(figfile))
def test_edi_files2(self):
# path to edis
epath = EDI_DATA_DIR2
elst=[os.path.join(epath,edi) for edi in os.listdir(epath) if (edi.endswith('.edi'))]
for efile in elst[-1:]:
# eo = mtedi.Edi(efile)
pr = mtpr.PlotResponse(fn=efile,
plot_num=2,
plot_tipper='yri',
plot_pt='y')
plt_wait(1)
figfile = os.path.join(self._temp_dir, os.path.basename(efile)[:-4] + '.png')
pr.save_plot(figfile)
assert (os.path.exists(figfile))
|
MTgeophysics/mtpy
|
tests/imaging/test_plotResponse.py
|
Python
|
gpl-3.0
| 1,651
|
# -*- coding: utf-8 -*-
"""
apt related tasks
"""
import os
import time
import codecs
from nuka.tasks import http
from nuka.task import Task
import logging as log
GPG_HEADER = b'-----BEGIN PGP PUBLIC KEY BLOCK-----'
def apt_watcher(delay, fd):
"""watcher for apt using APT::Status-Fd"""
def watcher(task, process):
start = time.time()
inc = delay
new_line = last_sent = None
while True:
if task.is_alive(process):
value = time.time() - start
if value > inc:
line = fd.readline()
while line:
if line.startswith(('dlstatus:', 'pmstatus:')):
line = line.strip()
new_line = line
line = fd.readline()
if new_line != last_sent:
last_sent = new_line
inc += delay
task.send_progress(new_line.split(':', 3)[-1])
yield
else:
# process is dead
yield
return watcher
class source(Task):
"""add an apt source"""
def __init__(self, name=None, src=None, key=None, update=True, **kwargs):
super(source, self).__init__(name=name, src=src, key=key,
update=update, **kwargs)
def add_key(self, key):
if isinstance(key, str):
if key.startswith('http'):
res = http.fetch(src=key).do()
dst = res['dst']
else:
dst = key
with open(dst, 'rb') as fd:
data = fd.read()
if key.endswith('.gpg'):
name = os.path.basename(key)
else:
name = self.args['name']
fname = '/etc/apt/trusted.gpg.d/{0}.gpg'.format(name)
if GPG_HEADER in data:
self.sh('gpg --dearmor > {0}'.format(fname),
shell=True, stdin=data)
else:
with open(fname, 'wb') as fd:
fd.write(data)
elif isinstance(key, tuple):
keyserver, keyid = key
self.sh([
'apt-key', 'adv',
'--keyserver', keyserver,
'--recv-keys', keyid])
def do(self):
name = self.args['name']
src = self.args['src'].strip()
src += '\n'
dst = os.path.join('/etc/apt/sources.list.d', name + '.list')
changed = True
if os.path.isfile(dst):
with codecs.open(dst, 'r', 'utf8') as fd:
if fd.read() == src:
changed = False
if changed:
key = self.args['key']
if not isinstance(key, tuple):
key = str(key)
if key is not None:
self.add_key(key)
with codecs.open(dst, 'w', 'utf8') as fd:
fd.write(src)
if self.args['update']:
cmd = [
'apt-get', 'update',
'-oDir::Etc::sourcelist=' + dst,
'-oDir::Etc::sourceparts=-',
'-oAPT::Get::List-Cleanup=0'
]
self.sh(cmd)
return dict(rc=0, changed=changed)
def diff(self):
name = self.args['name']
src = self.args['src'].strip()
src += '\n'
dst = os.path.join('/etc/apt/sources.list.d', name + '.list')
if os.path.isfile(dst):
with codecs.open(dst, 'r', 'utf8') as fd:
old_data = fd.read()
else:
old_data = ''
if old_data != src:
diff = self.texts_diff(old_data, src, fromfile=dst)
else:
diff = u''
return dict(rc=0, diff=diff)
class update(Task):
"""apt get update"""
timestamp_file = '/root/.last-apt-get-update'
def __init__(self, cache=None, **kwargs):
kwargs.setdefault('name', '')
kwargs.update(cache=cache)
super(update, self).__init__(**kwargs)
def do(self):
cache = self.args['cache']
timestamp_file = self.args.get('timestamp_file', self.timestamp_file)
if cache:
try:
mtime = os.path.getmtime(timestamp_file)
except OSError:
need_update = True
else:
need_update = time.time() > mtime + cache
else:
need_update = True
if need_update:
kwargs = {}
args = ['apt-get', '--force-yes', '-y', '--fix-missing']
watch = self.args.get('watch')
if watch:
r, w = os.pipe2(os.O_NONBLOCK)
kwargs['stdout'] = os.fdopen(w)
kwargs['watcher'] = apt_watcher(watch, os.fdopen(r))
kwargs['short_args'] = ['apt-get', 'update']
args.extend(['-oAPT::Status-Fd=1', 'update'])
res = self.sh(args, **kwargs)
res['stdout'] = ''
else:
res = self.sh(args + ['update'], **kwargs)
if cache:
with codecs.open(timestamp_file, 'w', 'utf8') as fd:
fd.write(str(time.time()))
res['changed'] = True
else:
res = dict(rc=0, changed=False)
return res
class list(Task):
ignore_errors = True
def __init__(self, update_cache=None, **kwargs):
kwargs.update(update_cache=update_cache)
super(list, self).__init__(**kwargs)
def do(self):
update_cache = self.args.get('update_cache')
if update_cache is not None:
res = update(cache=update_cache).do()
res = self.sh(['apt-get', 'upgrade', '-qq', '-s'], check=False)
return res
class search(Task):
def __init__(self, packages, **kwargs):
kwargs.setdefault('name', ', '.join(packages or []))
kwargs.update(packages=packages)
super(search, self).__init__(**kwargs)
def do(self):
query = self.sh(['dpkg-query',
'-f', "'${Package}#${Status}#${Version}~\n'",
'-W'] + self.args['packages'],
check=False)
if query['rc'] == 1:
# not an error for dpkg-query
query['rc'] = 0
return query
class upgrade(Task):
ignore_errors = True
def __init__(self, packages=None, debconf=None,
debian_frontend='noninteractive', **kwargs):
kwargs.setdefault('name', ', '.join(packages or []))
kwargs.update(packages=packages, debconf=debconf,
debian_frontend=debian_frontend)
super(upgrade, self).__init__(**kwargs)
def do(self):
self.sh(['rm', '/var/lib/apt/lists/partial/*'], check=False)
env = {}
for k in ('debian_priority', 'debian_frontend'):
v = self.args.get(k)
if v:
env[k.upper()] = v
kwargs = {'env': env}
# no specific package :
if not self.args['packages']:
res = self.sh([
'apt-get', '-qq', '-y',
'-oDpkg::Options::=--force-confdef',
'-oDpkg::Options::=--force-confold', 'upgrade'
], check=False, **kwargs)
return res
else:
to_upgrade = []
miss_packages = []
# we check for all package it they are endeed installed
for package in self.args['packages']:
is_present = self.sh(['dpkg-query',
'-f', '\'${Status}\'',
'-W', package],
check=False)
log.warn(is_present['stdout'])
if is_present['rc'] or \
" installed" not in is_present['stdout']:
# we don't want installed package
miss_packages.append(package)
continue
to_upgrade.append(package)
if to_upgrade:
cmd = ['apt-get', '-qq', '-y',
'-oDpkg::Options::=--force-confdef',
'-oDpkg::Options::=--force-confold',
'--only-upgrade', 'install'
] + to_upgrade
res = self.sh(cmd, check=False, **kwargs)
else:
res = dict(rc=0, stdout='no upgrade')
res['changed'] = False
res['miss_packages'] = miss_packages
res['packages'] = to_upgrade
return res
class debconf_set_selections(Task):
"""debconf-set-selections"""
diff = False
def __init__(self, selections=None, **kwargs):
super(debconf_set_selections, self).__init__(
selections=selections, **kwargs)
def do(self):
selections = []
for selection in self.args['selections']:
selections.append(' '.join(selection))
res = self.sh('debconf-set-selections',
stdin='\n'.join(selections), check=True)
return res
class install(Task):
"""apt get install"""
debconf = {
'mysql-server': (
['mysql-server/root_password', 'password'],
['mysql-server/root_password_again', 'password'],
),
}
def __init__(self, packages=None, debconf=None,
debian_frontend='noninteractive', debian_priority=None,
update_cache=None, install_recommends=False, **kwargs):
kwargs.setdefault('name', ', '.join(packages or []))
kwargs.update(packages=packages, debconf=debconf,
debian_priority=debian_priority,
debian_frontend=debian_frontend,
update_cache=update_cache,
install_recommends=install_recommends,
)
super(install, self).__init__(**kwargs)
def get_packages_list(self, packages):
splited = dict([(p.split('/', 1)[0], p) for p in packages])
cmd = ['apt-cache', 'policy'] + [k for k in splited.keys()]
res = self.sh(cmd, check=False)
package = source = None
packages = {}
for line in res['stdout'].split('\n'):
sline = line.strip()
if not line.startswith(' '):
if package:
packages[package['name']] = package
package = {'name': line.strip()[:-1]}
source = None
elif sline.startswith(('Installed:', 'Candidate:')):
key, value = sline.split(':', 1)
value = value.strip()
if value.lower() == '(none)':
value = False
package[key.lower()] = value
elif sline.startswith('***'):
source = sline.split()[-1] + ' '
if source.startswith('0'):
package['source'] = splited[package['name']]
source = None
elif source and sline.startswith(source):
package['source'] = sline
source = None
installed = []
for name, fullname in splited.items():
package = packages.get(name, {})
if name in packages:
if package.get('installed'):
if '/' in fullname:
name, source = fullname.split('/', 1)
if source in package.get('source', ''):
installed.append(fullname)
else:
installed.append(fullname)
return installed
def do(self):
"""install packages"""
packages = self.args['packages']
debconf = self.args['debconf']
if not packages:
return dict(rc=1, stderr='no packages provided')
installed = self.get_packages_list(packages)
to_install = [p for p in packages if p not in installed]
if to_install:
watch = self.args.get('watch')
update_cache = self.args.get('update_cache')
if update_cache is not None:
update(cache=update_cache, watch=watch).do()
if debconf:
for p in to_install:
conf = debconf.get(p, [])
for i, c in enumerate(self.debconf.get(p, [])):
if isinstance(conf, list):
v = conf[i]
else:
v = conf
stdin = ' '.join([p] + c + [v])
self.sh(['debconf-set-selections'], stdin=stdin)
env = {}
for k in ('debian_priority', 'debian_frontend'):
v = self.args.get(k)
if v:
env[k.upper()] = v
kwargs = {'env': env}
args = ['apt-get', 'install', '-qqy',
'-oDpkg::Options::=--force-confold']
if not self.args.get('install_recommends'):
args.append('--no-install-recommends')
if watch:
r, w = os.pipe2(os.O_NONBLOCK)
kwargs['stdout'] = os.fdopen(w)
kwargs['watcher'] = apt_watcher(watch, os.fdopen(r))
kwargs['short_args'] = ['apt-get', 'install']
args.extend(['-oAPT::Status-Fd=1'] + packages)
res = self.sh(args, **kwargs)
res['stdout'] = ''
else:
res = self.sh(args + packages, **kwargs)
else:
res = dict(rc=0)
res['changed'] = to_install
return res
def diff(self):
packages = self.args['packages']
installed = self.get_packages_list(packages)
to_install = [p for p in packages if p not in installed]
installed = [p + '\n' for p in sorted(set(installed))]
packages = [p + '\n' for p in sorted(set(packages))]
diff = self.lists_diff(installed, packages)
return dict(rc=0, diff=diff, changed=to_install)
|
bearstech/nuka
|
nuka/tasks/apt.py
|
Python
|
gpl-3.0
| 14,188
|
import gzip
import urllib2
import zlib
import pytest
import simplejson as json
from django.conf import settings
from django.utils.six import BytesIO
from ..sampledata import SampleData
@pytest.fixture
def jobs_with_local_log(initial_data):
log = ("mozilla-inbound_ubuntu64_vm-debug_test-"
"mochitest-other-bm53-tests1-linux-build122")
sample_data = SampleData()
url = "file://{0}".format(
sample_data.get_log_path("{0}.txt.gz".format(log)))
job = sample_data.job_data[0]
# substitute the log url with a local url
job['job']['log_references'][0]['url'] = url
return [job]
@pytest.fixture
def jobs_with_local_mozlog_log(initial_data):
log = ("plain-chunked_raw.log")
sample_data = SampleData()
url = "file://{0}".format(
sample_data.get_log_path("{0}.gz".format(log)))
# sample url to test with a real log, during development
# url = "http://mozilla-releng-blobs.s3.amazonaws.com/blobs/try/sha512/6a690d565effa5a485a9385cc62eccd59feaa93fa6bb167073f012a105dc33aeaa02233daf081426b5363cd9affd007e42aea2265f47ddbc334a4493de1879b5"
job = sample_data.job_data[0]
# substitute the log url with a local url
job['job']['log_references'][0]['url'] = url
job['job']['log_references'][0]['name'] = 'mozlog_json'
return [job]
@pytest.fixture
def mock_mozlog_get_log_handler(monkeypatch):
def _get_log_handle(mockself, url):
response = urllib2.urlopen(
url,
timeout=settings.REQUESTS_TIMEOUT
)
return gzip.GzipFile(fileobj=BytesIO(response.read()))
import treeherder.etl.common
monkeypatch.setattr(treeherder.log_parser.artifactbuilders.MozlogArtifactBuilder,
'get_log_handle', _get_log_handle)
def test_parse_log(jm, initial_data, jobs_with_local_log, sample_resultset,
test_repository, mock_post_json, mock_fetch_json):
"""
check that at least 3 job_artifacts get inserted when running
a parse_log task for a successful job
"""
jm.store_result_set_data(sample_resultset)
jobs = jobs_with_local_log
for job in jobs:
# make this a successful job, to check it's still parsed for errors
job['job']['result'] = "success"
job['revision_hash'] = sample_resultset[0]['revision_hash']
jm.store_job_data(jobs)
job_id = jm.get_dhub().execute(
proc="jobs_test.selects.row_by_guid",
placeholders=[jobs[0]['job']['job_guid']]
)[0]['id']
job_artifacts = jm.get_dhub().execute(
proc="jobs_test.selects.job_artifact",
placeholders=[job_id]
)
jm.disconnect()
# we must have at least 3 artifacts:
# 1 for the log viewer
# 1 for the job artifact panel
# 1 for the bug suggestions
assert len(job_artifacts) >= 3
# json-log parsing is disabled due to bug 1152681.
@pytest.mark.xfail
def test_parse_mozlog_log(jm, initial_data, jobs_with_local_mozlog_log,
sample_resultset, test_repository, mock_post_json,
mock_fetch_json,
mock_mozlog_get_log_handler
):
"""
check parsing the structured log creates a ``structured-faults`` artifact
"""
jm.store_result_set_data(sample_resultset)
jobs = jobs_with_local_mozlog_log
for job in jobs:
job['job']['result'] = "testfailed"
job['revision_hash'] = sample_resultset[0]['revision_hash']
jm.store_job_data(jobs)
job_id = jm.get_dhub().execute(
proc="jobs_test.selects.row_by_guid",
placeholders=[jobs[0]['job']['job_guid']]
)[0]['id']
job_artifacts = jm.get_dhub().execute(
proc="jobs_test.selects.job_artifact",
placeholders=[job_id]
)
jm.disconnect()
artifact = [x for x in job_artifacts if x['name'] == 'json_log_summary']
assert len(artifact) >= 1
all_errors = json.loads(zlib.decompress(artifact[0]['blob']))['all_errors']
warnings = [x for x in all_errors if
x['action'] == 'log' and x['level'] == "WARNING"]
fails = [x for x in all_errors if
x['action'] == 'test_status' and x['status'] == "FAIL"]
assert len(warnings) == 106
assert len(fails) == 3
def test_bug_suggestions_artifact(jm, initial_data, jobs_with_local_log,
sample_resultset, test_repository, mock_post_json,
mock_fetch_json
):
"""
check that at least 3 job_artifacts get inserted when running
a parse_log task for a failed job, and that the number of
bug search terms/suggestions matches the number of error lines.
"""
jm.store_result_set_data(sample_resultset)
jobs = jobs_with_local_log
for job in jobs:
job['job']['result'] = "testfailed"
job['revision_hash'] = sample_resultset[0]['revision_hash']
jm.store_job_data(jobs)
job_id = jm.get_dhub().execute(
proc="jobs_test.selects.row_by_guid",
placeholders=[jobs[0]['job']['job_guid']]
)[0]['id']
job_artifacts = jm.get_dhub().execute(
proc="jobs_test.selects.job_artifact",
placeholders=[job_id]
)
jm.disconnect()
# we must have at least 3 artifacts:
# 1 for the log viewer
# 1 for the job artifact panel
# 1 for the bug suggestions
assert len(job_artifacts) >= 3
structured_log_artifact = [artifact for artifact in job_artifacts
if artifact["name"] == "text_log_summary"][0]
bug_suggestions_artifact = [artifact for artifact in job_artifacts
if artifact["name"] == "Bug suggestions"][0]
structured_log = json.loads(zlib.decompress(structured_log_artifact["blob"]))
all_errors = structured_log["step_data"]["all_errors"]
bug_suggestions = json.loads(zlib.decompress(bug_suggestions_artifact["blob"]))
# we must have one bugs item per error in bug_suggestions.
# errors with no bug suggestions will just have an empty
# bugs list
assert len(all_errors) == len(bug_suggestions)
# We really need to add some tests that check the values of each entry
# in bug_suggestions, but for now this is better than nothing.
expected_keys = set(["search", "search_terms", "bugs"])
for failure_line in bug_suggestions:
assert set(failure_line.keys()) == expected_keys
|
avih/treeherder
|
tests/log_parser/test_tasks.py
|
Python
|
mpl-2.0
| 6,463
|
#!/usr/bin/env python
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import os
import sys
import copy
from datetime import datetime
from functools import wraps
sys.path.insert(1, os.path.dirname(sys.path[0]))
from mozharness.base.errors import MakefileErrorList
from mozharness.base.script import BaseScript
from mozharness.base.transfer import TransferMixin
from mozharness.base.vcs.vcsbase import VCSMixin
from mozharness.mozilla.blob_upload import BlobUploadMixin, blobupload_config_options
from mozharness.mozilla.buildbot import BuildbotMixin
from mozharness.mozilla.building.hazards import HazardError, HazardAnalysis
from mozharness.mozilla.purge import PurgeMixin
from mozharness.mozilla.mock import MockMixin
from mozharness.mozilla.tooltool import TooltoolMixin
SUCCESS, WARNINGS, FAILURE, EXCEPTION, RETRY = xrange(5)
def requires(*queries):
"""Wrapper for detecting problems where some bit of information
required by the wrapped step is unavailable. Use it put prepending
@requires("foo"), which will check whether self.query_foo() returns
something useful."""
def make_wrapper(f):
@wraps(f)
def wrapper(self, *args, **kwargs):
for query in queries:
val = query(self)
goodval = not (val is None or "None" in str(val))
assert goodval, f.__name__ + " requires " + query.__name__ + " to return a value"
return f(self, *args, **kwargs)
return wrapper
return make_wrapper
nuisance_env_vars = ['TERMCAP', 'LS_COLORS', 'PWD', '_']
class SpidermonkeyBuild(MockMixin,
PurgeMixin, BaseScript,
VCSMixin, BuildbotMixin, TooltoolMixin, TransferMixin, BlobUploadMixin):
config_options = [
[["--repo"], {
"dest": "repo",
"help": "which gecko repo to get spidermonkey from",
}],
[["--source"], {
"dest": "source",
"help": "directory containing gecko source tree (instead of --repo)",
}],
[["--revision"], {
"dest": "revision",
}],
[["--branch"], {
"dest": "branch",
}],
[["--vcs-share-base"], {
"dest": "vcs_share_base",
"help": "base directory for shared repositories",
}],
[["-j"], {
"dest": "concurrency",
"type": int,
"default": 4,
"help": "number of simultaneous jobs used while building the shell " +
"(currently ignored for the analyzed build",
}] + copy.deepcopy(blobupload_config_options)
]
def __init__(self):
super(SpidermonkeyBuild, self).__init__(
config_options=self.config_options,
# other stuff
all_actions=[
'purge',
'checkout-tools',
# First, build an optimized JS shell for running the analysis
'checkout-source',
'get-blobs',
'clobber-shell',
'configure-shell',
'build-shell',
# Next, build a tree with the analysis plugin active. Note that
# we are using the same checkout for the JS shell build and the
# build of the source to be analyzed, which is a little
# unnecessary (no need to rebuild the JS shell all the time).
# (Different objdir, though.)
'clobber-analysis',
'setup-analysis',
'run-analysis',
'collect-analysis-output',
'upload-analysis',
'check-expectations',
],
default_actions=[
'purge',
'checkout-tools',
'checkout-source',
'get-blobs',
'clobber-shell',
'configure-shell',
'build-shell',
'clobber-analysis',
'setup-analysis',
'run-analysis',
'collect-analysis-output',
# Temporarily disabled, see bug 1211402
# 'upload-analysis',
'check-expectations',
],
config={
'default_vcs': 'hg',
'vcs_share_base': os.environ.get('HG_SHARE_BASE_DIR'),
'ccache': True,
'buildbot_json_path': os.environ.get('PROPERTIES_FILE'),
'tools_repo': 'https://hg.mozilla.org/build/tools',
'upload_ssh_server': None,
'upload_remote_basepath': None,
'enable_try_uploads': True,
'source': None,
'stage_product': 'firefox',
},
)
self.buildid = None
self.create_virtualenv()
self.analysis = HazardAnalysis()
def _pre_config_lock(self, rw_config):
if self.config['source']:
self.config['srcdir'] = self.config['source']
super(SpidermonkeyBuild, self)._pre_config_lock(rw_config)
if self.buildbot_config is None:
self.info("Reading buildbot build properties...")
self.read_buildbot_config()
if self.buildbot_config:
bb_props = [('mock_target', 'mock_target', None),
('hgurl', 'hgurl', None),
('clobberer_url', 'clobberer_url', 'https://api.pub.build.mozilla.org/clobberer/lastclobber'),
('force_clobber', 'force_clobber', None),
('branch', 'blob_upload_branch', None),
]
buildbot_props = self.buildbot_config.get('properties', {})
for bb_prop, cfg_prop, default in bb_props:
if not self.config.get(cfg_prop) and buildbot_props.get(bb_prop, default):
self.config[cfg_prop] = buildbot_props.get(bb_prop, default)
self.config['is_automation'] = True
else:
self.config['is_automation'] = False
self.config.setdefault('blob_upload_branch', 'devel')
dirs = self.query_abs_dirs()
replacements = self.config['env_replacements'].copy()
for k,v in replacements.items():
replacements[k] = v % dirs
self.env = self.query_env(replace_dict=replacements,
partial_env=self.config['partial_env'],
purge_env=nuisance_env_vars)
self.env['MOZ_UPLOAD_DIR'] = dirs['abs_blob_upload_dir']
self.env['TOOLTOOL_DIR'] = dirs['abs_work_dir']
def query_abs_dirs(self):
if self.abs_dirs:
return self.abs_dirs
abs_dirs = BaseScript.query_abs_dirs(self)
abs_work_dir = abs_dirs['abs_work_dir']
dirs = {
'shell_objdir':
os.path.join(abs_work_dir, self.config['shell-objdir']),
'mozharness_scriptdir':
os.path.abspath(os.path.dirname(__file__)),
'abs_analysis_dir':
os.path.join(abs_work_dir, self.config['analysis-dir']),
'abs_analyzed_objdir':
os.path.join(abs_work_dir, self.config['srcdir'], self.config['analysis-objdir']),
'analysis_scriptdir':
os.path.join(self.config['srcdir'], self.config['analysis-scriptdir']),
'abs_tools_dir':
os.path.join(abs_dirs['base_work_dir'], 'tools'),
'gecko_src':
os.path.join(abs_work_dir, self.config['srcdir']),
'abs_blob_upload_dir':
os.path.join(abs_work_dir, 'blobber_upload_dir'),
}
abs_dirs.update(dirs)
self.abs_dirs = abs_dirs
return self.abs_dirs
def query_repo(self):
if self.config.get('repo'):
return self.config['repo']
elif self.buildbot_config and 'properties' in self.buildbot_config:
return self.config['hgurl'] + self.buildbot_config['properties']['repo_path']
else:
return None
def query_revision(self):
if 'revision' in self.buildbot_properties:
revision = self.buildbot_properties['revision']
elif self.buildbot_config and 'sourcestamp' in self.buildbot_config:
revision = self.buildbot_config['sourcestamp']['revision']
else:
# Useful for local testing. In actual use, this would always be
# None.
revision = self.config.get('revision')
return revision
def query_branch(self):
if self.buildbot_config and 'properties' in self.buildbot_config:
return self.buildbot_config['properties']['branch']
elif 'branch' in self.config:
# Used for locally testing try vs non-try
return self.config['branch']
else:
return os.path.basename(self.query_repo())
def query_compiler_manifest(self):
dirs = self.query_abs_dirs()
manifest = os.path.join(dirs['abs_work_dir'], dirs['analysis_scriptdir'], self.config['compiler_manifest'])
if os.path.exists(manifest):
return manifest
return os.path.join(dirs['abs_work_dir'], self.config['compiler_manifest'])
def query_sixgill_manifest(self):
dirs = self.query_abs_dirs()
manifest = os.path.join(dirs['abs_work_dir'], dirs['analysis_scriptdir'], self.config['sixgill_manifest'])
if os.path.exists(manifest):
return manifest
return os.path.join(dirs['abs_work_dir'], self.config['sixgill_manifest'])
def query_buildid(self):
if self.buildid:
return self.buildid
if self.buildbot_config and 'properties' in self.buildbot_config:
self.buildid = self.buildbot_config['properties'].get('buildid')
if not self.buildid:
self.buildid = datetime.now().strftime("%Y%m%d%H%M%S")
return self.buildid
def query_upload_ssh_server(self):
if self.buildbot_config and 'properties' in self.buildbot_config:
return self.buildbot_config['properties']['upload_ssh_server']
else:
return self.config['upload_ssh_server']
def query_upload_ssh_key(self):
if self.buildbot_config and 'properties' in self.buildbot_config:
key = self.buildbot_config['properties']['upload_ssh_key']
else:
key = self.config['upload_ssh_key']
if self.mock_enabled and not key.startswith("/"):
key = "/home/mock_mozilla/.ssh/" + key
return key
def query_upload_ssh_user(self):
if self.buildbot_config and 'properties' in self.buildbot_config:
return self.buildbot_config['properties']['upload_ssh_user']
else:
return self.config['upload_ssh_user']
def query_product(self):
if self.buildbot_config and 'properties' in self.buildbot_config:
return self.buildbot_config['properties']['product']
else:
return self.config['product']
def query_upload_remote_basepath(self):
if self.config.get('upload_remote_basepath'):
return self.config['upload_remote_basepath']
else:
return "/pub/mozilla.org/{product}".format(
product=self.query_product(),
)
def query_upload_remote_baseuri(self):
baseuri = self.config.get('upload_remote_baseuri')
if self.buildbot_config and 'properties' in self.buildbot_config:
buildprops = self.buildbot_config['properties']
if 'upload_remote_baseuri' in buildprops:
baseuri = buildprops['upload_remote_baseuri']
return baseuri.strip("/") if baseuri else None
def query_target(self):
if self.buildbot_config and 'properties' in self.buildbot_config:
return self.buildbot_config['properties']['platform']
else:
return self.config.get('target')
def query_upload_path(self):
branch = self.query_branch()
common = {
'basepath': self.query_upload_remote_basepath(),
'branch': branch,
'target': self.query_target(),
}
if branch == 'try':
if not self.config['enable_try_uploads']:
return None
try:
user = self.buildbot_config['sourcestamp']['changes'][0]['who']
except (KeyError, TypeError):
user = "unknown"
return "{basepath}/try-builds/{user}-{rev}/{branch}-{target}".format(
user=user,
rev=self.query_revision(),
**common
)
else:
return "{basepath}/tinderbox-builds/{branch}-{target}/{buildid}".format(
buildid=self.query_buildid(),
**common
)
def query_do_upload(self):
if self.query_branch() == 'try':
return self.config.get('enable_try_uploads')
return True
# Actions {{{2
def purge(self):
dirs = self.query_abs_dirs()
self.info("purging, abs_upload_dir=" + dirs['abs_upload_dir'])
PurgeMixin.clobber(
self,
always_clobber_dirs=[
dirs['abs_upload_dir'],
],
)
def checkout_tools(self):
dirs = self.query_abs_dirs()
# If running from within a directory also passed as the --source dir,
# this has the danger of clobbering <source>/tools/
if self.config['source']:
srcdir = self.config['source']
if os.path.samefile(srcdir, os.path.dirname(dirs['abs_tools_dir'])):
raise Exception("Cannot run from source checkout to avoid overwriting subdirs")
rev = self.vcs_checkout(
vcs='hg',
branch="default",
repo=self.config['tools_repo'],
clean=False,
dest=dirs['abs_tools_dir'],
)
self.set_buildbot_property("tools_revision", rev, write_to_file=True)
def do_checkout_source(self):
# --source option means to use an existing source directory instead of checking one out.
if self.config['source']:
return
dirs = self.query_abs_dirs()
dest = dirs['gecko_src']
# Pre-create the directory to appease the share extension
if not os.path.exists(dest):
self.mkdir_p(dest)
rev = self.vcs_checkout(
repo=self.query_repo(),
dest=dest,
revision=self.query_revision(),
branch=self.config.get('branch'),
clean=True,
)
self.set_buildbot_property('source_revision', rev, write_to_file=True)
def checkout_source(self):
try:
self.do_checkout_source()
except Exception as e:
self.fatal("checkout failed: " + str(e), exit_code=RETRY)
def get_blobs(self):
work_dir = self.query_abs_dirs()['abs_work_dir']
if not os.path.exists(work_dir):
self.mkdir_p(work_dir)
self.tooltool_fetch(self.query_compiler_manifest(), output_dir=work_dir)
self.tooltool_fetch(self.query_sixgill_manifest(), output_dir=work_dir)
def clobber_shell(self):
self.analysis.clobber_shell(self)
def configure_shell(self):
self.enable_mock()
try:
self.analysis.configure_shell(self)
except HazardError as e:
self.fatal(e, exit_code=FAILURE)
self.disable_mock()
def build_shell(self):
self.enable_mock()
try:
self.analysis.build_shell(self)
except HazardError as e:
self.fatal(e, exit_code=FAILURE)
self.disable_mock()
def clobber_analysis(self):
self.analysis.clobber(self)
def setup_analysis(self):
self.analysis.setup(self)
def run_analysis(self):
self.enable_mock()
upload_dir = self.query_abs_dirs()['abs_blob_upload_dir']
if not os.path.exists(upload_dir):
self.mkdir_p(upload_dir)
env = self.env.copy()
env['MOZ_UPLOAD_DIR'] = upload_dir
try:
self.analysis.run(self, env=env, error_list=MakefileErrorList)
except HazardError as e:
self.fatal(e, exit_code=FAILURE)
self.disable_mock()
def collect_analysis_output(self):
self.analysis.collect_output(self)
def upload_analysis(self):
if not self.config['is_automation']:
return
if not self.query_do_upload():
self.info("Uploads disabled for this build. Skipping...")
return
self.enable_mock()
try:
self.analysis.upload_results(self)
except HazardError as e:
self.error(e)
self.return_code = WARNINGS
self.disable_mock()
def check_expectations(self):
try:
self.analysis.check_expectations(self)
except HazardError as e:
self.fatal(e, exit_code=FAILURE)
# main {{{1
if __name__ == '__main__':
myScript = SpidermonkeyBuild()
myScript.run_and_exit()
|
Yukarumya/Yukarum-Redfoxes
|
testing/mozharness/scripts/spidermonkey_build.py
|
Python
|
mpl-2.0
| 17,369
|
"""
各领域中自定义错误类型的基类
有时候并不关注具体的错误,只需要知道错误的类型,这时候可以对这里定义的基类进行异常处理
"""
from everyclass.server.utils.api_helpers import STATUS_CODE_PERMISSION_DENIED, STATUS_CODE_INVALID_REQUEST, STATUS_CODE_INTERNAL_ERROR
class BizException(Exception):
"""所有业务错误类的基类,初始化时需要携带status_message和业务status_code"""
def __init__(self, status_message: str, status_code: int):
self.status_code = status_code
self.status_message = status_message
class PermissionException(BizException):
"""权限相关错误"""
def __init__(self, status_message, status_code: int = None):
super().__init__(status_message, status_code if status_code else STATUS_CODE_PERMISSION_DENIED)
class InvalidRequestException(BizException):
"""请求无效"""
def __init__(self, status_message, status_code: int = None):
super().__init__(status_message, status_code if status_code else STATUS_CODE_INVALID_REQUEST)
class InternalError(BizException):
"""内部错误"""
def __init__(self, status_message, status_code: int = None):
super().__init__(status_message, status_code if status_code else STATUS_CODE_INTERNAL_ERROR)
|
fr0der1c/EveryClass-server
|
everyclass/server/utils/base_exceptions.py
|
Python
|
mpl-2.0
| 1,307
|
# This is your project's main settings file that can be committed to your
# repo. If you need to override a setting locally, use settings_local.py
from funfactory.settings_base import *
import dj_database_url
from decouple import config, Csv
ALLOWED_HOSTS = config('ALLOWED_HOSTS', '', cast=Csv())
# This unset DATABASE_ROUTERS from funfactory because we're not
# interested in using multiple database for the webapp part.
DATABASE_ROUTERS = ()
# Name of the top-level module where you put all your apps.
# If you did not install Playdoh with the funfactory installer script
# you may need to edit this value. See the docs about installing from a
# clone.
PROJECT_MODULE = 'crashstats'
# Defines the views served for root URLs.
ROOT_URLCONF = '%s.urls' % PROJECT_MODULE
INSTALLED_APPS = (
'funfactory',
'compressor',
'django_browserid',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.staticfiles',
'commonware.response.cookies',
'south', # important that django_nose comes AFTER this one
'django_nose',
'session_csrf',
# Application base, containing global templates.
'%s.base' % PROJECT_MODULE,
# Example code. Can (and should) be removed for actual projects.
'%s.dataservice' % PROJECT_MODULE,
'%s.crashstats' % PROJECT_MODULE,
'%s.api' % PROJECT_MODULE,
'%s.manage' % PROJECT_MODULE,
'%s.supersearch' % PROJECT_MODULE,
'%s.signature' % PROJECT_MODULE,
'%s.auth' % PROJECT_MODULE,
'%s.tokens' % PROJECT_MODULE,
'%s.symbols' % PROJECT_MODULE,
'django.contrib.messages',
'raven.contrib.django.raven_compat',
'waffle',
'eventlog',
)
funfactory_JINJA_CONFIG = JINJA_CONFIG # that from funfactory
def JINJA_CONFIG():
# different from that in funfactory in that we don't want to
# load the `tower` extension
config = funfactory_JINJA_CONFIG()
config['extensions'].remove('tower.template.i18n')
return config
def COMPRESS_JINJA2_GET_ENVIRONMENT():
from jingo import env
from compressor.contrib.jinja2ext import CompressorExtension
env.add_extension(CompressorExtension)
return env
# Because Jinja2 is the default template loader, add any non-Jinja templated
# apps here:
JINGO_EXCLUDE_APPS = (
'browserid',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'session_csrf.CsrfMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'commonware.middleware.FrameOptionsHeader',
'waffle.middleware.WaffleMiddleware',
'ratelimit.middleware.RatelimitMiddleware',
'%s.tokens.middleware.APIAuthenticationMiddleware' % PROJECT_MODULE,
'%s.crashstats.middleware.Propagate400Errors' % PROJECT_MODULE,
)
# BrowserID configuration
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'django_browserid.auth.BrowserIDBackend',
)
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.debug',
'django.core.context_processors.media',
'django.core.context_processors.request',
'session_csrf.context_processor',
'django.contrib.messages.context_processors.messages',
'django.core.context_processors.request',
'crashstats.base.context_processors.google_analytics',
'crashstats.base.context_processors.pingdom_rum',
'crashstats.base.context_processors.browserid',
)
# Always generate a CSRF token for anonymous users.
ANON_ALWAYS = True
LOGGING = dict(loggers=dict(playdoh={'level': logging.DEBUG}))
# Some products have a different name in bugzilla and Socorro.
BUG_PRODUCT_MAP = {
'FennecAndroid': 'Firefox for Android',
'B2G': 'Firefox OS',
}
# Link to source if possible
VCS_MAPPINGS = {
'cvs': {
'cvs.mozilla.org': ('http://bonsai.mozilla.org/cvsblame.cgi?'
'file=%(file)s&rev=%(revision)s&'
'mark=%(line)s#%(line)s')
},
'hg': {
'hg.mozilla.org': ('http://hg.mozilla.org/%(repo)s'
'/annotate/%(revision)s/%(file)s#l%(line)s')
},
'git': {
'git.mozilla.org': ('http://git.mozilla.org/?p=%(repo)s;a=blob;'
'f=%(file)s;h=%(revision)s#l%(line)s'),
'github.com': ('https://github.com/%(repo)s/blob/%(revision)s/'
'%(file)s#L%(line)s')
}
}
# Identifies nightly releases
NIGHTLY_RELEASE_TYPES = (
'Aurora',
'Nightly',
)
# No need to load it because we don't do i18n in this project
USE_I18N = False
# True if old legacy URLs we handle should be permanent 301 redirects.
# Transitionally it might be safer to set this to False as we roll out the new
# django re-write of Socorro.
PERMANENT_LEGACY_REDIRECTS = True
LOGIN_URL = '/login/'
# Use memcached for session storage
SESSION_ENGINE = 'django.contrib.sessions.backends.cached_db'
# we don't need bcrypt since we don't store real passwords
PWD_ALGORITHM = 'sha512'
# must be set but not applicable because we don't use bcrypt
HMAC_KEYS = {'any': 'thing'}
# Types of query that can be run in search
QUERY_TYPES = (
'contains',
'is_exactly',
'starts_with',
'simple',
'exact', # for backward compatibility
'startswith', # for backward compatibility
)
# This is for backward compatibility with the PHP app.
QUERY_TYPES_MAP = {
'exact': 'is_exactly',
'startswith': 'starts_with',
}
# Maximum and default range of query that can be run in search
QUERY_RANGE_MAXIMUM_DAYS = 30
QUERY_RANGE_MAXIMUM_DAYS_ADMIN = 120
QUERY_RANGE_DEFAULT_DAYS = 14
# range unit values to allow in queries
RANGE_UNITS = (
'weeks',
'days',
'hours',
)
# process types to allow in queries
PROCESS_TYPES = (
'any',
'browser',
'plugin',
'content',
'all', # alias for 'any'
)
# hang types to allow in queries
HANG_TYPES = (
'any',
'crash',
'hang',
'all', # alias for 'any'
)
# plugin fields to allow in queries
PLUGIN_FIELDS = (
'filename',
'name',
)
# fields used in the simplified UI for Super Search
SIMPLE_SEARCH_FIELDS = (
'product',
'version',
'platform',
'process_type',
)
# the number of result filter on tcbs
TCBS_RESULT_COUNTS = (
'50',
'100',
'200',
'300'
)
# channels allowed in middleware calls,
# such as adu by signature
CHANNELS = (
'release',
'beta',
'aurora',
'nightly',
'esr'
)
# this is the max length of signatures in forms
SIGNATURE_MAX_LENGTH = 255
# We use django.contrib.messages for login, so let's use SessionStorage
# to avoid byte-big messages as cookies
MESSAGE_STORAGE = 'django.contrib.messages.storage.session.SessionStorage'
# A prefix that is sometimes prefixed on the crash ID when used elsewhere in
# the socorro eco-system.
CRASH_ID_PREFIX = 'bp-'
# If true, allow robots to spider the site
ENGAGE_ROBOTS = False
# Base URL for when we use the Bugzilla API
BZAPI_BASE_URL = 'https://bugzilla.mozilla.org/rest'
# Specify the middleware implementation to use in the middleware
# Leave empty to use the default
SEARCH_MIDDLEWARE_IMPL = None
# The index schema used in our elasticsearch databases, used in the
# Super Search Custom Query page.
ELASTICSEARCH_INDEX_SCHEMA = 'socorro%Y%W'
# Valid type for correlations reports
CORRELATION_REPORT_TYPES = (
'core-counts',
'interesting-addons',
'interesting-addons-with-versions',
'interesting-modules',
'interesting-modules-with-versions'
)
# Default number of crashes to show on the Exploitable Crashes report
EXPLOITABILITY_BATCH_SIZE = 250
# Default number of days to show in explosive crashes reports
EXPLOSIVE_REPORT_DAYS = 10
# how many seconds to sleep when getting a ConnectionError
MIDDLEWARE_RETRY_SLEEPTIME = 3
# how many times to re-attempt on ConnectionError after some sleep
MIDDLEWARE_RETRIES = 10
# Overridden so we can control the redirects better
BROWSERID_VERIFY_CLASS = '%s.auth.views.CustomBrowserIDVerify' % PROJECT_MODULE
# For a more friendly Persona pop-up
BROWSERID_REQUEST_ARGS = {'siteName': 'Mozilla Crash Reports'}
# Default number of days a token lasts until it expires
TOKENS_DEFAULT_EXPIRATION_DAYS = 90
# Store all dates timezone aware
USE_TZ = True
# Default for how many items to display in the admin batch tables
USERS_ADMIN_BATCH_SIZE = 10
EVENTS_ADMIN_BATCH_SIZE = 10
API_TOKENS_ADMIN_BATCH_SIZE = 10
# Individual strings that can't be allowed in any of the lines in the
# content of a symbols archive file.
DISALLOWED_SYMBOLS_SNIPPETS = (
# https://bugzilla.mozilla.org/show_bug.cgi?id=1012672
'qcom/proprietary',
)
# Rate limit for when using the Web API for anonymous hits
API_RATE_LIMIT = '100/m'
# Rate limit when using the supersearch web interface
RATELIMIT_SUPERSEARCH = '10/m'
# Path to the view that gets executed if you hit upon a ratelimit block
RATELIMIT_VIEW = 'crashstats.crashstats.views.ratelimit_blocked'
# When we pull platforms from the Platforms API we later decide which of
# these to display at various points in the UI.
DISPLAY_OS_NAMES = ['Windows', 'Mac OS X', 'Linux']
# When this is true, every 400 Bad Request error we get from the middleware
# is propagated onto the client who caused the request in the webapp.
PROPAGATE_MIDDLEWARE_400_ERRORS = True
DATASERVICE_CONFIG_BASE = {
'resource': {
'postgresql': {
'transaction_executor_class':
'socorro.database.transaction_executor'
'.TransactionExecutorWithLimitedBackoff',
'backoff_delays': "0, 3",
},
},
'secrets': {
'postgresql': {
'database_password': 'aPassword',
'database_username': 'test',
},
}
}
# We don't want to test the migrations when we run tests.
# We trust that syncdb matches what you'd get if you install
# all the migrations.
SOUTH_TESTS_MIGRATE = False
# To extend any settings from above here's an example:
# INSTALLED_APPS = base.INSTALLED_APPS + ['debug_toolbar']
# Recipients of traceback emails and other notifications.
ADMINS = (
# ('Your Name', 'your_email@domain.com'),
)
MANAGERS = ADMINS
# import logging
# LOGGING = dict(loggers=dict(playdoh={'level': logging.DEBUG}))
# If you run crashstats behind a load balancer, your `REMOTE_ADDR` header
# will be that of the load balancer instead of the actual user.
# The solution is to instead rely on the `X-Forwarded-For` header.
# You ONLY want this if you know you can trust `X-Forwarded-For`.
# (Note! Make sure you uncomment the line `from . import base` at
# the top of this file first)
# base.MIDDLEWARE_CLASSES += (
# 'crashstats.crashstats.middleware.SetRemoteAddrFromForwardedFor',
# )
# When you don't have permission to upload Symbols you might be confused
# what to do next. On the page that explains that you don't have permission
# there's a chance to put a link
# SYMBOLS_PERMISSION_HINT_LINK = {
# 'url': 'https://bugzilla.mozilla.org/enter_bug.cgi?product=Socorro&'
# 'component=General&groups=client-services-security',
# 'label': 'File a bug in bugzilla'
# }
# To change the configuration for any dataservice object, you may set
# parameters in the DATASERVICE_CONFIG_BASE which is used by dataservice
# app. Detailed config is documented in each dataservice object imported
# by the app.
#
# Below is an example of changing the api_whitelist for the Bugs service
# We convert the dict to a string, as configman prefers a string here.
# import json
# DATASERVICE_CONFIG_BASE.update({
# 'services': {
# 'Bugs': {
# 'api_whitelist': json.dumps({
# 'hits': ('id','signature',)
# })
# }
# }
# })
# to override the content-type of specific file extensinos:
SYMBOLS_MIME_OVERRIDES = {
'sym': 'text/plain'
}
# ------------------------------------------------
# Below are settings that can be overridden using
# environment variables.
CACHE_MIDDLEWARE = config('CACHE_MIDDLEWARE', False, cast=bool)
# creates "./models-cache" dir
# only applicable if CACHE_MIDDLEWARE is True
CACHE_MIDDLEWARE_FILES = config('CACHE_MIDDLEWARE_FILES', True, cast=bool)
# Socorro middleware instance to use
MWARE_BASE_URL = config('MWARE_BASE_URL', 'http://localhost:5200')
MWARE_USERNAME = config('MWARE_USERNAME', None)
MWARE_PASSWORD = config('MWARE_PASSWORD', None)
# HTTP/1.1 Host header to pass - in case this is a VHost
MWARE_HTTP_HOST = config('MWARE_HTTP_HOST', None)
DEFAULT_PRODUCT = config('DEFAULT_PRODUCT', 'WaterWolf')
# can be changed from null to log to test something locally
# or if using the debug toolbar, you might give toolbar a try
STATSD_CLIENT = config('STATSD_CLIENT', 'django_statsd.clients.null')
# for local development these don't matter
STATSD_HOST = config('STATSD_HOST', 'localhost')
STATSD_PORT = config('STATSD_PORT', 8125, cast=int)
STATSD_PREFIX = config('STATSD_PREFIX', None)
# Enable this to be able to run tests
# NB: Disable this caching mechanism in production environment as
# it will break work of anonymous CSRF if there is more than one
# web server thread.
# Comment out to use memcache from settings/base.py
CACHES = {
'default': {
# use django.core.cache.backends.locmem.LocMemCache for prod
'BACKEND': config(
'CACHE_BACKEND',
'django.core.cache.backends.memcached.MemcachedCache',
),
# fox2mike suggest to use IP instead of localhost
'LOCATION': config('CACHE_LOCATION', '127.0.0.1:11211'),
'TIMEOUT': config('CACHE_TIMEOUT', 500),
'KEY_PREFIX': config('CACHE_KEY_PREFIX', 'crashstats'),
}
}
TIME_ZONE = config('TIME_ZONE', 'UTC')
# Only use the old way of settings DATABASES IF you haven't fully migrated yet
if (
not config('DATABASE_URL', None) and (
config('DATABASE_ENGINE', None) or
config('DATABASE_NAME', None) or
config('DATABASE_USER', None) or
config('DATABASE_PASSWORD', None) or
config('DATABASE_PORT', None)
)
):
# Database credentials set up the old way
import warnings
warnings.warn(
"Use DATABASE_URL instead of depending on DATABASE_* settings",
DeprecationWarning
)
DATABASES = {
'default': {
# use django.db.backends.postgresql_psycopg for production
'ENGINE': config('DATABASE_ENGINE', 'django.db.backends.sqlite3'),
'NAME': config('DATABASE_NAME', 'sqlite.crashstats.db'),
'USER': config('DATABASE_USER', ''),
'PASSWORD': config('DATABASE_PASSWORD', ''),
'HOST': config('DATABASE_HOST', ''),
'PORT': config('DATABASE_PORT', ''),
'OPTIONS': {
},
# 'TEST_CHARSET': 'utf8',
# 'TEST_COLLATION': 'utf8_general_ci',
},
# 'slave': {
# ...
# },
}
else:
DATABASES = {
'default': config(
'DATABASE_URL',
'sqlite://sqlite.crashstats.db',
cast=dj_database_url.parse
)
}
# Uncomment this and set to all slave DBs in use on the site.
SLAVE_DATABASES = config('SLAVE_DATABASES', '', cast=Csv())
# Debugging displays nice error messages, but leaks memory. Set this to False
# on all server instances and True only for development.
DEBUG = TEMPLATE_DEBUG = config('DEBUG', False, cast=bool)
# Set this to True to make debugging AJAX requests easier; development-only!
DEBUG_PROPAGATE_EXCEPTIONS = config(
'DEBUG_PROPAGATE_EXCEPTIONS',
False,
cast=bool
)
COMPRESS_ENABLED = config('COMPRESS_ENABLED', True, cast=bool)
# By default compression is done in runtime, if you enable
# offline compression, running the test suite will be 10 times faster
# but you'll need to remember to first run:
# ./manage.py collectstatic --noinput
# ./manage.py compress --force --engine=jinja2
# at least once every time any of the static files change.
COMPRESS_OFFLINE = config('COMPRESS_OFFLINE', True, cast=bool)
# Make this unique, and don't share it with anybody. It cannot be blank.
# FIXME remove this default when we are out of PHX
SECRET_KEY = config('SECRET_KEY', 'this must be changed!!')
# Log settings
# Make this unique to your project.
SYSLOG_TAG = config('SYSLOG_TAG', 'http_app_playdoh')
# Common Event Format logging parameters
CEF_PRODUCT = config('CEF_PRODUCT', 'Playdoh')
CEF_VENDOR = config('CEF_VENDOR', 'Mozilla')
# If you intend to run WITHOUT HTTPS, such as local development,
# then set this to False
SESSION_COOKIE_SECURE = config('SESSION_COOKIE_SECURE', True, cast=bool)
# To get your Sentry key, go to https://errormill.mozilla.org/
RAVEN_CONFIG = {
'dsn': config('RAVEN_DSN', '') # see https://errormill.mozilla.org/
}
# Specify the middleware implementation to use in the middleware
SEARCH_MIDDLEWARE_IMPL = config('SEARCH_MIDDLEWARE_IMPL', 'elasticsearch')
# If you intend to run with DEBUG=False, this must match the URL
# you're using
BROWSERID_AUDIENCES = config(
'BROWSERID_AUDIENCES',
'http://localhost:8000',
cast=Csv()
)
# Optional Google Analytics ID (UA-XXXXX-X)
GOOGLE_ANALYTICS_ID = config('GOOGLE_ANALYTICS_ID', None)
# Root domain. Required iff you're providing an analytics ID.
GOOGLE_ANALYTICS_DOMAIN = config('GOOGLE_ANALYTICS_DOMAIN', 'auto')
# Optional Pingdom Real User Monitoring ID
PINGDOM_RUM_ID = config('PINGDOM_RUM_ID', None)
# Set to True enable analysis of all model fetches
ANALYZE_MODEL_FETCHES = config('ANALYZE_MODEL_FETCHES', False, cast=bool)
# Dataservice API configuration
# Extend dataservices settings from settings/base.py here
# At a minimum, you'll probably want to change db username/password. All
# dataservice objects inherit resource configuration and so can all
# have their database resource configuration set once in 'secrets.postgresql'
# and 'resource.postgresql' keys.
DATASERVICE_CONFIG_BASE.update({
'secrets': {
'postgresql': {
'database_password': config(
'DATASERVICE_DATABASE_PASSWORD',
'aPassword'
),
'database_username': config(
'DATASERVICE_DATABASE_USERNAME',
'breakpad_rw'
),
'database_hostname': config(
'DATASERVICE_DATABASE_HOSTNAME',
'localhost'
),
'database_name': config(
'DATASERVICE_DATABASE_NAME',
'breakpad'
),
'database_port': config(
'DATASERVICE_DATABASE_PORT',
'5432'
),
}
}
})
# Credentials for being able to make an S3 connection
AWS_ACCESS_KEY = config('AWS_ACCESS_KEY', '')
AWS_SECRET_ACCESS_KEY = config('AWS_SECRET_ACCESS_KEY', '')
# Information for uploading symbols to S3
SYMBOLS_BUCKET_DEFAULT_NAME = config('SYMBOLS_BUCKET_DEFAULT_NAME', '')
# To set overriding exceptions by email, use:
SYMBOLS_BUCKET_EXCEPTIONS = {
# e.g.
# 'joe.bloggs@example.com': 'private-crashes.my-bucket'
# or you can specify it as a tuple of (name, location)
# 'joe@example.com': ('my-bucket', 'USWest1')
config('SYMBOLS_BUCKET_EXCEPTIONS_USER', ''):
config('SYMBOLS_BUCKET_EXCEPTIONS_BUCKET', '')
}
SYMBOLS_FILE_PREFIX = config('SYMBOLS_FILE_PREFIX', 'v1')
# e.g. "us-west-2" see boto.s3.connection.Location
# Only needed if the bucket has never been created
SYMBOLS_BUCKET_DEFAULT_LOCATION = config(
'SYMBOLS_BUCKET_DEFAULT_LOCATION',
None
)
|
rhelmer/socorro
|
webapp-django/crashstats/settings/base.py
|
Python
|
mpl-2.0
| 19,647
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-07-24 03:21
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0002_post_description'),
]
operations = [
migrations.AlterField(
model_name='post',
name='description',
field=models.TextField(blank=True, max_length=500),
),
]
|
iannesbitt/iannesbitt.org
|
blog/migrations/0003_auto_20160723_2321.py
|
Python
|
mpl-2.0
| 461
|
# -*- coding: utf-8 -*-
# Copyright (c) 2016, Pierre Saikaly (saikalypierre@gmail.com)
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
#===========================#
# created on 07 sept 2016
#===========================#
# Import Python dependencies :
# ----------------------------
import os
import sys
import time
import argparse
# Script :
# --------
def CheckEurostagState(folder):
"""
Reads eurostagOutputFile and check if the situation at the beginning is stable
or unstable
Inputs :
- eurostagOutputFile : file.out containing the informations
Outputs :
- 0 -> Situation is stable
- 1 -> Situation is unstable
Used in :
- main
"""
etat = "ETAT D'EQUILIBRE A 0.10000D-02 VERIFIE POUR LES EQUATIONS MACHINE"
etat2 = "ETAT D'EQUILIBRE A 0.10000D-02 NON VERIFIE DANS LES EQUATIONS SUIVANTES"
eurostagOutputFile = os.path.join(folder,"sim_pre_fault.out")
# Checking Eurostag status :
# --------------------------
if etat in open(eurostagOutputFile, 'U').read():
# Eurostag returns stable
status = 0
else:
# Eurostag returns unstable :
# ---------------------------
equi_value = []
with open(eurostagOutputFile, 'U') as outfile:
lines = outfile.readlines()
for i, line in enumerate(lines):
if etat2 in line:
k = i+5
while (lines[k]!="1\n" and lines[k]!="\n"):
machine_name = lines[k].split()[0]
value_line = lines[k].split()[-2]
value_real = float(value_line.split("D")[0])*10**int(value_line.split("D")[1])
if (abs(value_real) > 0.01):
with open('ecart_groupe.csv','a') as egf:
egf.write(folder + ';' + machine_name + ';' + str(value_real) + '\n')
equi_value.append(abs(value_real))
k += 1
# Checking tolerance :
# --------------------
if (len(equi_value) > 0):
if (max(equi_value) < 0.01):
status = 0 # Stable state within tolerance
else:
status = 1 # Unstable state
else:
status = 0 # Stable state
return status
def main():
"""
Get list of eurostag simulation folders and check if thoses situations are
stable or not. The outputs are stored in eurostag_status.txt file
"""
# Creating output file :
# ----------------------
print "Beginning writing outputs for folders in directory..."
with open('eurostag_status.txt','w') as f:
f.write("# " + time.strftime("%d/%m/%y") + "\n")
with open('ecart_groupe.csv','w') as egf:
egf.write(time.strftime("%d/%m/%y") + "\n")
egf.write("Situations;groupe;valeur\n")
# Checking folders and writing output :
# -------------------------------------
with open('eurostag_status.txt','a') as f:
for folder in os.listdir('.'):
if folder.startswith('itesla_eurostag_stabilization_'):
f.write(folder + " " + str(CheckEurostagState(folder)) + '\n')
print "Done writing outputs in eurostag_status.txt"
print "Done analysis in ecart_groupe.csv"
if __name__=='__main__':
parser = argparse.ArgumentParser(description='Check eurostag status of working directory of specific folder')
parser.add_argument('-p','--eur_path', help='path of eurostag folder', required=False)
args = parser.parse_args()
if (args.eur_path):
with open('ecart_groupe.csv','w') as egf:
egf.write(time.strftime("%d/%m/%y") + "\n")
egf.write("Situations;groupe;valeur\n")
print args.eur_path, ' ', CheckEurostagState(args.eur_path)
print "Done analysis in ecart_groupe.csv"
else:
main()
|
PierSaik/RTE-MSDS
|
EurostagState.py
|
Python
|
mpl-2.0
| 3,686
|
from ott.utils.dao.base import BaseDao
from ott.utils import file_utils
from ott.utils import date_utils
from .alerts_dao import AlertsDao
from sqlalchemy.orm import object_session
from gtfsdb import Route, CurrentRoutes, util
try: Route.make_geom_lazy()
except: pass
import os
import logging
log = logging.getLogger(__file__)
class RouteDao(BaseDao):
""" RouteDao data object ready for marshaling into JSON
"""
def __init__(self, route, alerts, show_geo=False):
super(RouteDao, self).__init__()
self.copy(route, show_geo)
self.set_alerts(alerts)
def copy(self, r, show_geo):
self.name = r.route_name
self.route_id = r.route_id
self.short_name = r.route_short_name
self.sort_order = r.route_sort_order
self.url = getattr(r, 'route_url', None)
self.add_route_dirs(r)
if show_geo:
self.geom = self.orm_to_geojson(r)
def add_route_dirs(self, route):
""" add the direction names to route
"""
# step 0: two direction name vars
dir0 = None
dir1 = None
# step 1: figure out what (if any) 'primary' direction names for this route exist in directions '0' and '1'
try:
for d in route.directions:
if d.direction_id == 0:
dir0 = d.direction_name
elif d.direction_id == 1:
dir1 = d.direction_name
except:
pass
# step 2: assign direction names (or default null values) to route
self.copy_dirs(dir0, dir1)
# TODO we shuld really havea DirectionDao (spellin' intentional)
def copy_dirs(self, dir0=None, dir1=None):
self.direction_0 = dir0
self.direction_1 = dir1
@classmethod
def from_route_orm(cls, route, agency="TODO", detailed=False, show_alerts=False, show_geo=False):
alerts = []
try:
if show_alerts:
alerts = AlertsDao.get_route_alerts(object_session(route), route.route_id)
except Exception as e:
log.warn(e)
ret_val = RouteDao(route, alerts, show_geo)
return ret_val
@classmethod
def from_route_id(cls, session, route_id, agency="TODO", detailed=False, show_alerts=False, show_geo=False):
""" make a RouteDao from a route_id and session
"""
log.info("query Route table")
route = session.query(Route).filter(Route.route_id == route_id).one()
return cls.from_route_orm(route, agency=agency, detailed=detailed, show_alerts=show_alerts, show_geo=show_geo)
class RouteListDao(BaseDao):
""" List of RouteDao data objects ... both list and RouteDao content ready for marshaling into JSON
"""
def __init__(self, routes):
super(RouteListDao, self).__init__()
self.routes = routes
self.count = len(routes)
@classmethod
def route_list(cls, session, agency="TODO", detailed=False, show_alerts=False, show_geo=False):
""" make a list of RouteDao objects by query to the database
"""
ret_val = None
log.info("query Route table")
route_list = []
routes = cls._active_routes(session)
for r in routes:
rte = RouteDao.from_route_orm(route=r, agency=agency, detailed=detailed, show_alerts=show_alerts, show_geo=show_geo)
route_list.append(rte)
ret_val = RouteListDao(route_list)
return ret_val
@classmethod
def _active_routes(cls, session, agency_id=None, date=None):
"""
find route list from gtfsdb based on input date (default is server date)
"""
# import pdb; pdb.set_trace()
ret_val = []
# step 1: grab all routes
routes = session.query(Route).order_by(Route.route_sort_order).all()
# step 2: get a valid date
date = date_utils.str_to_date(date)
if date:
for r in routes:
if r:
# step 2a: filter based on begin and/or end dates
if r.start_date or r.end_date:
if r.start_date and r.end_date:
if r.start_date <= date <= r.end_date:
ret_val.append(r)
elif r.start_date and r.start_date <= date:
ret_val.append(r)
elif r.end_date and date <= r.end_date:
ret_val.append(r)
else:
# invalid Route. dates; can't determine active status, so just pass the route as 'active'
ret_val.append(r)
else:
# step 2b: if no good input (default) date, just assign pull all routes into ret_val
ret_val = routes
return ret_val
class CurrentRoutesListDao(RouteListDao):
"""
List of RouteDao data objects ... directly from the CurrentRoutes tables
"""
def __init__(self, routes):
super(CurrentRoutesListDao, self).__init__(routes)
@classmethod
def _active_routes(cls, session, agency_id=None, date=None):
#import pdb; pdb.set_trace()
try:
ret_val = CurrentRoutes.query_routes(session)
except:
ret_val = super(CurrentRoutesListDao, cls)._active_routes(session, date)
return ret_val
|
OpenTransitTools/data
|
ott/data/dao/route_dao.py
|
Python
|
mpl-2.0
| 5,386
|