repo_name stringlengths 5 100 | path stringlengths 4 231 | language stringclasses 1 value | license stringclasses 15 values | size int64 6 947k | score float64 0 0.34 | prefix stringlengths 0 8.16k | middle stringlengths 3 512 | suffix stringlengths 0 8.17k |
|---|---|---|---|---|---|---|---|---|
prospero78/pyPC | pak_pc/pak_gui/pak_win_idc/__init__.py | Python | lgpl-3.0 | 127 | 0 | # -*- coding: utf | 8 -*-
''' |
Инициализация пакета интерфейса дисковго кластера.
'''
|
robcarver17/pysystemtrade | sysdata/mongodb/mongo_process_control.py | Python | gpl-3.0 | 1,876 | 0.001599 | from sysobjects.production.process_control import controlProcess
from sysdata.production.process_control_data import controlProcessData
| from syscore.objects import arg_not_supplied, missing_data
from sysdata.mongodb.mongo_generic import mongoDataWithSingleKey
from syslogdiag.log_to_screen import logtoscreen
PROCESS_CONTROL_COLLECTION = "process_control"
PROCESS_CO | NTROL_KEY = "process_name"
class mongoControlProcessData(controlProcessData):
"""
Read and write data class to get process control data
"""
def __init__(
self, mongo_db=arg_not_supplied, log=logtoscreen("mongoControlProcessData")
):
super().__init__(log=log)
self._mongo_data = mongoDataWithSingleKey(
PROCESS_CONTROL_COLLECTION, PROCESS_CONTROL_KEY, mongo_db=mongo_db
)
@property
def mongo_data(self):
return self._mongo_data
def __repr__(self):
return "Data connection for process control, mongodb %s" % str(self.mongo_data)
def get_list_of_process_names(self):
return self.mongo_data.get_list_of_keys()
def _get_control_for_process_name_without_default(self, process_name):
result_dict = self.mongo_data.get_result_dict_for_key_without_key_value(
process_name
)
if result_dict is missing_data:
return missing_data
control_object = controlProcess.from_dict(result_dict)
return control_object
def _modify_existing_control_for_process_name(
self, process_name, new_control_object
):
self.mongo_data.add_data(
process_name, new_control_object.as_dict(), allow_overwrite=True
)
def _add_control_for_process_name(self, process_name, new_control_object):
self.mongo_data.add_data(
process_name, new_control_object.as_dict(), allow_overwrite=False
)
|
Mafarricos/Mafarricos-xbmc-addons | plugin.video.videosinfantis/main.py | Python | gpl-2.0 | 4,836 | 0.0366 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
# by Mafarricos
# email: MafaStudios@gmail.com
# Thanks to enen92 and fightnight
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
##############BIBLIOTECAS A IMPORTAR E DEFINICOES####################
import urllib,urllib2,re,xbmcplugin,xbmcgui,xbmc,xbmcaddon
import videosinfantis
import ogatodasbotas
import canalpanda
import kidsyoutube
addon_id = 'plugin.video.videosinfantis'
selfAddon = xbmcaddon.Addon(id=addon_id)
addonfolder = selfAddon.getAddonInfo('path')
artfolder = '/resources/img/'
siteurl = 'http://videosinfantis.pt/'
siteurl2 = 'http://videos.ogatodasbotas.com/'
siteurl3 = 'http://canalpanda.pt/'
##################################################
#MENUS
def CATEGORIES():
kidsyoutube.CATEGORIESyou()
# kidsyoutube.addDir('Kids Youtube',siteurl,13,addonfolder+artfolder+'iconKyou.png',True,1,'',maxresults,startindex,'')
# videosinfantis.addDir('Videos Infantis',siteurl,4,addonfolder+artfolder+'iconVI.png',True)
# ogatodasbotas.addDir('O Gato das Botas',siteurl2,5,addonfolder+artfolder+'icongato.png',True)
# canalpanda.add | Dir('CanalPanda.pt',siteur | l3,12,addonfolder+artfolder+'iconpanda.png')
##################################################
#FUNCOES
def play(url,name):
if 'gatodasbotas' in url: url=ogatodasbotas.encontrar_tipo_da_fonte(url)
listitem = xbmcgui.ListItem()
listitem.setPath(url)
listitem.setInfo("Video", {"Title":name})
listitem.setProperty('IsPlayable', 'true')
try:
xbmcPlayer = xbmc.Player(xbmc.PLAYER_CORE_AUTO)
xbmcPlayer.play(url)
except:
pass
self.message("Couldn't play item.")
##############################
#GET PARAMS
def get_params():
param=[]
paramstring=sys.argv[2]
if len(paramstring)>=2:
params=sys.argv[2]
cleanedparams=params.replace('?','')
if (params[len(params)-1]=='/'): params=params[0:len(params)-2]
pairsofparams=cleanedparams.split('&')
param={}
for i in range(len(pairsofparams)):
splitparams={}
splitparams=pairsofparams[i].split('=')
if (len(splitparams))==2: param[splitparams[0]]=splitparams[1]
return param
params=get_params()
url=None
name=None
mode=None
iconimage=None
maxresults=None
startindex=None
try: url=urllib.unquote_plus(params["url"])
except: pass
try: name=urllib.unquote_plus(params["name"])
except: pass
try: mode=int(params["mode"])
except: pass
try: iconimage=urllib.unquote_plus(params["iconimage"])
except: pass
try: maxresults=int(params["maxresults"])
except: pass
try: startindex=int(params["startindex"])
except: pass
print "#Mode: "+str(mode)
print "#URL: "+str(url)
print "#Name: "+str(name)
print "#Iconimage: "+str(iconimage)
print "#MaxResults: "+str(maxresults)
print "#StartIndex: "+str(startindex)
#################################
#MODOS
if mode==None or url==None or len(url)<1: CATEGORIES()
elif mode==1: videosinfantis.listar_videos(url,siteurl)
elif mode==2: play(url,name)
elif mode==3: videosinfantis.pesquisa(siteurl)
elif mode==4: videosinfantis.CATEGORIESvi(siteurl)
elif mode==5: ogatodasbotas.CATEGORIESgato(siteurl2)
elif mode==6: ogatodasbotas.listar_videos(url)
elif mode==7: ogatodasbotas.listar_videos_category(url)
elif mode==8: canalpanda.programa_paginicial(url,'0',siteurl3)
elif mode==9: canalpanda.pesquisa(siteurl3)
elif mode==10:
filterorder=re.compile('filterorder=(.+?)&').findall(url)
filtervalue=re.compile('filtervalue=(.+?)&').findall(url)
if filterorder==[]: canalpanda.lista_de_videos(url,'',filtervalue[0],siteurl3)
else: canalpanda.lista_de_videos(url,filterorder[0],'',siteurl3)
elif mode==11: canalpanda.programa_paginicial(url,'1',siteurl3)
elif mode==12: canalpanda.CATEGORIESpanda(siteurl3)
elif mode==13: kidsyoutube.CATEGORIESyou()
elif mode==14: kidsyoutube.listchannel(name,url,maxresults,startindex)
elif mode==15: kidsyoutube.playlistchannel(name,url,maxresults,startindex)
elif mode==16: kidsyoutube.MenuCreate(name,url,maxresults,startindex)
elif mode==17: kidsyoutube.playlistListing(name,url,maxresults,startindex)
xbmcplugin.endOfDirectory(int(sys.argv[1])) |
toboso-team/toledo | toledo/__init__.py | Python | mit | 97 | 0 | from . import graphics
from . | import input
from . import util
from .controll | er import Controller
|
dougthor42/CodeSort | codesort/find_fold_points.py | Python | mit | 2,335 | 0 | # -*- coding: utf-8 -*-
"""
@name: find_fold_points.py
@vers: 0.1
@author: Douglas Thor
@created: Sun Jun 29 17:03:12 2014
@modified: Sun Jun 29 17:03:12 2014
@descr: Returns the fold points - where code gets indented and
dedented - of a .py file.
"""
from __future__ import print_function, division
import os.path
import tokenize
from StringIO import StringIO
def find_fold_points(block):
"""
Returns a list of (start_row, end_row, indent) tuples that denote fold
locations. Basically anywhere that there's an indent.
"""
token_whitelist = (tokenize.NL,
tokenize.NEWLINE,
tokenize.INDENT,
tokenize.DEDENT,
tokenize.COMMENT,
)
# temporary code that allows for running a block or a full file
if os.path.isfile(block):
with open(block) as open_file:
token_block = tokenize.generate_tokens(open_file)
else:
token_block = tokenize.generate_tokens(StringIO(block).readline)
indent_level = 0
nl_counter = 0
comment_counter = 0
indents = []
result = []
for toknum, _, srowcol, _, _ in token_block:
# Account for comments at the start of a block and newlines at the
# end of a block.
if toknum == tokenize.NL:
nl_counter += 1
if toknum == tokenize.COMMENT:
comment_counter += 1
if toknum == tokenize.INDENT:
indent_level += 1
indents.append(srowcol[0] - 1 - comment_counter)
if toknum == tokenize.DEDENT:
# the next DEDENT belongs to the most recent INDENT, so w | e pop off
# th | e last indent from the stack
indent_level -= 1
matched_indent = indents.pop()
result.append((matched_indent,
srowcol[0] - 1 - nl_counter,
indent_level + 1))
if toknum not in token_whitelist:
nl_counter = 0
comment_counter = 0
if len(indents) != 0:
raise ValueError("Number of DEDENTs does not match number of INDENTs.")
return result
if __name__ == "__main__":
pass
|
jmluy/xpython | exercises/practice/knapsack/knapsack.py | Python | mit | 51 | 0 | def maximum | _va | lue(maximum_weight, items):
pass
|
pipermerriam/web3.py | web3/utils/blocks.py | Python | mit | 1,270 | 0.001575 | from eth_utils import (
is_hex,
is_string,
is_integer,
remove_0x_prefix,
force_text,
)
def is_predefined_block_number(value):
if not is_string(value):
return False
return force_text(value) in {"latest", "pending", "earliest"}
def is_hex_encoded_block_hash(value):
if not is_string(value):
return False
return len(remove_0x_prefix(value)) == 64 and is_hex(value)
def is_hex_encoded_block_number(value):
if not is_string(value):
return False
elif is_hex_encoded_b | lock_hash(value):
return False
try:
value_as_int = int(value, 16)
except ValueError:
return False
return 0 <= value_as_int < 2**256
def select_method_for_block_identifier(value, if_hash, if_number, if_predefined):
if is_predefined_block_number(value):
return if_predefined
elif isinstance(value, bytes):
return if_hash
elif is_hex_encoded_block_hash(value):
return if_hash
elif is_integer(value) an | d (0 <= value < 2**256):
return if_number
elif is_hex_encoded_block_number(value):
return if_number
else:
raise ValueError(
"Value did not match any of the recognized block identifiers: {0}".format(value)
)
|
wanghe4096/website | aliyun/api/rest/Ecs20140526DeleteSnapshotRequest.py | Python | bsd-2-clause | 334 | 0.026946 | '''
Created by auto_sdk on 2015.04.21
'''
from aliyun.api.base import RestApi
class Ecs20140526DeleteSnapshotRequest(RestApi):
def __init__(self,domain='ecs.aliyuncs.com',port=80):
RestApi.__init__(self,domain, port) |
self.SnapshotId = None
def getapiname(self):
return 'ecs.aliyuncs.com.DeleteSnapshot.2014-0 | 5-26'
|
tapomayukh/projects_in_python | sandbox_tapo/src/skin_related/BMED_8813_HAP/Scaling/results/cross_validate_objects_BMED_8813_HAP_scaled_method_I.py | Python | mit | 3,959 | 0.017934 |
# Principal Component Analysis Code :
from numpy import mean,cov,double,cumsum,dot,linalg,array,rank,size,flipud
from pylab import *
import numpy as np
import matplotlib.pyplot as pp
#from enthought.mayavi import mlab
import scipy.ndimage as ni
import roslib; roslib.load_manifest('sandbox_tapo_darpa_m3')
import rospy
#import hrl_lib.mayavi2_util as mu
import hrl_lib.viz as hv
import hrl_lib.util as ut
import hrl_lib.matplotlib_util as mpu
import pickle
from mvpa.clfs.knn import kNN
from mvpa.datasets import Dataset
from mvpa.clfs.transerror import TransferError
from mvpa.misc.data_generators import normalFeatureDataset
from mvpa.algorithms.cvtranserror import CrossValidatedTransferError
from mvpa.datasets.splitters import NFoldSplitter
import sys
sys.path.insert(0, '/home/tapo/svn/robot1_data/usr/tapo/data_code/BMED_8813_HAP/Data')
from data_method_I import Fmat_original
def pca(X):
#get dimensions
num_data,dim = X.shape
#center data
mean_X = X.mean(axis=1)
M = (X-mean_X) # subtract the mean (along columns)
Mcov = cov(M)
###### Sanity Check ######
i=0
n=0
while i < 123:
j=0
while j < 90:
if X[i,j] != X[i,j]:
print X[i,j]
print i,j
n=n+1
j = j+1
i=i+1
print n
##########################
print 'PCA - COV-M | ethod used'
val,vec = linalg.eig(Mcov)
#return the projection matrix, the variance and the mean
return vec,val,mean_X, M, Mcov
if __name__ == '__main__':
Fmat = Fmat_original
# Checking the Data-Matrix
m_tot, n_tot = np.shape(Fmat)
print 'Total_Matrix_Shape:',m_tot,n_tot
eigvec_total, eigval_total, mean_data_total, B, C | = pca(Fmat)
#print eigvec_total
#print eigval_total
#print mean_data_total
m_eigval_total, n_eigval_total = np.shape(np.matrix(eigval_total))
m_eigvec_total, n_eigvec_total = np.shape(eigvec_total)
m_mean_data_total, n_mean_data_total = np.shape(np.matrix(mean_data_total))
print 'Eigenvalue Shape:',m_eigval_total, n_eigval_total
print 'Eigenvector Shape:',m_eigvec_total, n_eigvec_total
print 'Mean-Data Shape:',m_mean_data_total, n_mean_data_total
#Recall that the cumulative sum of the eigenvalues shows the level of variance accounted by each of the corresponding eigenvectors. On the x axis there is the number of eigenvalues used.
perc_total = cumsum(eigval_total)/sum(eigval_total)
# Reduced Eigen-Vector Matrix according to highest Eigenvalues..(Considering First 20 based on above figure)
W = eigvec_total[:,0:12]
m_W, n_W = np.shape(W)
print 'Reduced Dimension Eigenvector Shape:',m_W, n_W
#Projected Data:
Y = (W.T)*B
m_Y, n_Y = np.shape(Y.T)
print 'Transposed Projected Data Shape:', m_Y, n_Y
#Using PYMVPA
PCA_data = np.array(Y.T)
PCA_label_2 = ['Can-Edge-1']*5 + ['Book-Edge-1']*5 + ['Brown-Cardboard-Box-Edge-1']*5 + ['Cinder-Block-Edge-1']*5 + ['Tin-Box-Edge-1']*5 + ['White-Cardboard-Box-Edge-1']*5 + ['Can-Surface']*5 + ['Book-Surface']*5 + ['Brown-Cardboard-Box-Surface']*5 + ['Cinder-Block-Surface']*5 + ['Tin-Box-Surface']*5 + ['White-Cardboard-Box-Surface']*5 + ['Can-Edge-2']*5 + ['Book-Edge-2']*5 + ['Brown-Cardboard-Box-Edge-2']*5 + ['Cinder-Block-Edge-2']*5 + ['Tin-Box-Edge-2']*5 + ['White-Cardboard-Box-Edge-2']*5
clf = kNN(k=1)
terr = TransferError(clf)
ds1 = Dataset(samples=PCA_data,labels=PCA_label_2)
print ds1.samples.shape
cvterr = CrossValidatedTransferError(terr,NFoldSplitter(cvtype=1),enable_states=['confusion'])
error = cvterr(ds1)
print error
print cvterr.confusion.asstring(description=False)
figure(1)
cvterr.confusion.plot(numbers='True',numbers_alpha=2)
# Variances
figure(2)
title('Variances of PCs')
stem(range(len(perc_total)),perc_total,'--b')
axis([-0.3,30.3,0,1.2])
grid('True')
show()
|
titiwu/simpl | simpl/diagnose.py | Python | gpl-3.0 | 1,732 | 0 | # -*- coding: utf-8 -*-
"""
Created on Thu Jan 19 21:03:22 2017
Stolen from jasper
@author: mb
"""
import sys
import socket
import logging
if sys.version_info < (3, 3):
from distutils.spawn import find_executable
else:
from shutil import which as find_executable
def check_network_connection(server="www. | google.com"):
"""
Checks if jasper can connect a network server.
Arguments:
server -- (optional) the server to connect with (Default:
| "www.google.com")
Returns:
True or False
"""
logger = logging.getLogger(__name__)
logger.debug("Checking network connection to server '%s'...", server)
try:
# see if we can resolve the host name -- tells us if there is
# a DNS listening
host = socket.gethostbyname(server)
# connect to the host -- tells us if the host is actually
# reachable
socket.create_connection((host, 80), 2)
except Exception:
logger.debug("Network connection not working")
return False
else:
logger.debug("Network connection working")
return True
def check_executable(executable):
"""
Checks if an executable exists in $PATH.
Arguments:
executable -- the name of the executable (e.g. "echo")
Returns:
True or False
"""
logger = logging.getLogger(__name__)
logger.debug("Checking executable '%s'...", executable)
executable_path = find_executable(executable)
found = executable_path is not None
if found:
logger.debug("Executable '%s' found: '%s'", executable,
executable_path)
else:
logger.debug("Executable '%s' not found", executable)
return found
|
ktan2020/legacy-automation | win/Lib/site-packages/wx-3.0-msw/wx/tools/Editra/src/extern/pygments/styles/borland.py | Python | mit | 1,613 | 0 | # -*- coding: utf-8 -*-
"""
pygments.styles.borland
~~~~~~~~~~~~~~~~~~~~~~~
Style similar to the style used in the Borland IDEs.
:copyright: Copyright 2006-2010 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.style import Style
from pygments.token import Keyword, Name, Comment, String, Error, \
Number, Operator, Generic, Whitespace
class BorlandStyle(Style):
"""
Style similar to the style used in the borland IDEs.
"""
default_style = ''
s | tyles = {
Whitespace: '#bbbbbb',
Comment: 'italic #008800',
Comment.Preproc: 'noitalic #008080',
Comment.Special: 'noitalic bold',
String: '#0000FF',
String.Char: '#800080',
Number: '#0000FF',
Keyword: 'bold #000080',
Operator.Word: 'bold',
Name.Tag: 'bo | ld #000080',
Name.Attribute: '#FF0000',
Generic.Heading: '#999999',
Generic.Subheading: '#aaaaaa',
Generic.Deleted: 'bg:#ffdddd #000000',
Generic.Inserted: 'bg:#ddffdd #000000',
Generic.Error: '#aa0000',
Generic.Emph: 'italic',
Generic.Strong: 'bold',
Generic.Prompt: '#555555',
Generic.Output: '#888888',
Generic.Traceback: '#aa0000',
Error: 'bg:#e3d2d2 #a61717'
}
|
SphinxKnight/kuma | kuma/core/views.py | Python | mpl-2.0 | 1,861 | 0.001612 | from __future__ import unicode_literals
from django.conf import settings
from django.contrib.auth.models import AnonymousUser
from django.http import HttpResponse
from django.shortcuts import render
from django.views.decorators.cache import never_cache
from django.views.decorators.csrf import csrf_exempt
from django.vie | ws.decorators.http import require_POST
from .i18n import get_kuma_languages
@n | ever_cache
def _error_page(request, status):
"""
Render error pages with jinja2.
Sometimes, an error is raised by a middleware, and the request is not
fully populated with a user or language code. Add in good defaults.
"""
if not hasattr(request, 'user'):
request.user = AnonymousUser()
if not hasattr(request, 'LANGUAGE_CODE'):
request.LANGUAGE_CODE = 'en-US'
return render(request, '%d.html' % status, status=status)
@never_cache
@csrf_exempt
@require_POST
def set_language(request):
lang_code = request.POST.get("language")
response = HttpResponse(status=204)
if lang_code and lang_code in get_kuma_languages():
response.set_cookie(key=settings.LANGUAGE_COOKIE_NAME,
value=lang_code,
max_age=settings.LANGUAGE_COOKIE_AGE,
path=settings.LANGUAGE_COOKIE_PATH,
domain=settings.LANGUAGE_COOKIE_DOMAIN,
)
return response
handler403 = lambda request, exception=None: _error_page(request, 403)
handler404 = lambda request, exception=None: _error_page(request, 404)
handler500 = lambda request, exception=None: _error_page(request, 500)
@never_cache
def rate_limited(request, exception):
"""Render a rate-limited exception."""
response = render(request, '429.html', status=429)
response['Retry-After'] = '60'
return response
|
charany1/Bookie | dbversions/versions/11087341e403_add_private_bookmark_support_to_bmarks_.py | Python | agpl-3.0 | 906 | 0.003311 | """add private bookmark support to bmarks table
Revision ID: 11087341e403
Revises: 44dccb7b8b82
Create Date: 2014-05-23 07:18:38.743431
"""
# revision identifiers, used by Alembic.
revision = '11087341e403'
down_revision = '44dccb7b8b82'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.add_column('bmarks', sa.Column('is_private', sa.Boolean(), nullable=False, server_default=sa.sql.expression.true()))
# Update the existing bookmarks to be public.
connection = op.get_bind()
current_ | context = op.get_context()
meta = current_context.opts['target_metadata']
bmarks = sa.Table('bmarks', meta, autoload=True)
sel = sa.select([bmarks])
stmt = bmarks.update().\
values(is_private=False)
connection.execute(stmt)
def downgrade():
try:
| op.drop_column('bmarks', 'is_private')
except sa.exc.OperationalError as exc:
pass
|
t3dev/odoo | addons/account/tests/test_account_fiscal_year.py | Python | gpl-3.0 | 4,107 | 0.001217 | # -*- coding: utf-8 -*-
from odoo.addons.account.tests.account_test_classes import AccountingTestCase
import odoo.tests
from odoo import fields
from datetime import datetime
@odoo.tests.tagged('post_install', '-at_install')
class TestFiscalPosition(AccountingTestCase):
def check_compute_fiscal_year(self, company, date, expected_date_from, expected_date_to):
'''Compute the fiscal year at a certain date for the company passed as parameter.
Then, check if the result matches the 'expected_date_from'/'expected_date_to' dates.
:param company: The company.
:param date: The date belonging to the fiscal year.
:param expected_date_from: The expected date_from after computation.
:param expected_date_to: The expected date_to after computation.
'''
current_date = fields.Date.from_string(date)
res = company.compute_fiscalyear_dates(current_date)
self.assertEqual(res['date_from'], fields.Date.from_string(expected_date_from))
self.assertEqual(res['date_to'], fields.Date.from_string(expected_date_to))
def test_default_fiscal_year(self):
'''Basic case with a fiscal year xxxx-01-01 - xxxx-12-31.'''
company = self.env.ref('base.main_company')
company.fiscalyear_last_day = 31
company.fiscalyear_last_month = '12'
self.check_compute_fiscal_year(
company,
'2017-12-31',
'2017-01-01',
'2017-12-31',
)
self.check_compute_fiscal_year(
company,
'2017-01-01',
'2017-01-01',
'2017-12-31',
)
def test_leap_fiscal_year_1(self):
'''Case with a leap year ending the 29 February.'''
company = self.env.ref('base.main_company')
company.fiscalyear_last_day = 29
company.fiscalyear_last_month = '2'
self.check_compute_fiscal_year(
company,
'2016-02-29',
'2015-03-01',
'2016-02-29',
)
self.check_compute_fiscal_year(
company,
'2015-03-01',
'2015-03-01',
'2016-02-29',
)
def test_leap_fiscal_year_2(self):
'''Case with a leap year ending the 28 February.'''
company = self.env.ref('base.main_company')
company.fiscalyear_last_day = 28
company.fiscalyear_last_month = '2'
self.check_compute_fiscal_year(
company,
'2016-02-29',
'2015-03-01',
'2016-02-29',
)
self.check_compute_fiscal_year(
company,
'2016-03-01',
'2016-03-01',
'2017-02-28',
)
def test_custom_fiscal_year(self):
'''Case with custom fiscal years.'''
company = self.env.ref('base.main_company')
company.fiscalyear_last_day = 31
company.fiscalyear_last_month = '12'
# Create custom fiscal year covering the 6 first months of 2017.
self.env['account.fiscal.year'].create({
'name': '6 month 2017',
'date_from': '2017-01-01',
'date_to': '2017-05-31',
'company_id': company.id,
})
# Check before the custom fiscal year).
self.check_compute_fiscal_year(
company,
'2017-02-01',
'2017-01-01',
'2017-05-31',
)
# Check after the custom fiscal year.
self.check_compute_fiscal_year(
| company,
'2017-11-01',
'2017-06-01',
'2017-12-31',
)
# Create custom fiscal year covering the 3 last months of 2017.
self.env['account.fiscal.year'].create({
'name': 'last 3 month 2017',
'date_from': '2017-10-01',
'date_to': '2017-12-31',
'company_id': company.id, |
})
# Check inside the custom fiscal years.
self.check_compute_fiscal_year(
company,
'2017-07-01',
'2017-06-01',
'2017-09-30',
)
|
vejmelkam/wrfxpy | src/ingest/level0_source.py | Python | mit | 36,889 | 0.006154 | #
# Dalton Burke, CU Denver
#
# CONUS = [-124.7844079,-66.9513812,24.7433195,49.3457868]
from __future__ import absolute_import
from utils import ensure_dir, symlink_unless_exists
from .downloader import download_url, DownloadError, get_dList
# fast searching of dList
from bisect import bisect
from datetime import datetime, timedelta
from pyhdf import SD
import pytz
import requests
import os
import os.path as osp
import sys
import logging
from six.moves import map
from six.moves import range
class data_sourceError(Exception):
"""
Raised when a level0 source cannot retreive files
"""
pass
class data_source(object):
"""
Parent class of all data sources that implement common functionality, for example
- local validation (file size check)
- HDF retrieval with retries (smart check whether server implements http-range)
"""
def __init__(self, ingest_dir):
"""
Initialize level0 source with ingest directory (where level0 files are stored).
:param ingest_dir: root of level0 storage
"""
self.ingest_dir = osp.abspath(osp.expanduser(ingest_dir))
def retrieve_data(self, from_utc, to_utc, lonlat):
"""
Retrieves all data (geo and active fire) in the given time range and longitude/latitude box. This function is what end users will use to get data
:param from_utc: start time
:param to_utc: end time
:param lonlat: list of form [lowlon, highlon, lowlat, highlat] describing longitude/latitude box
:return: list of paths to local files that were retrieved
"""
# I think all data should be ingested into one directory, then whichever files
# are needed for a given job can be copied to a new folder with a job name
two_weeks_ago = datetime.utcnow() - timedelta(days=14)
manifest = []
if from_utc > two_weeks_ago:
manifest.extend(self.retrieve_l0(from_utc, to_utc))
elif to_utc < two_weeks_ago:
# filter geo_list on intersection with lonlat, the hdf library i'd want to use here isn't ready yet
geo_list = [x for x in self.retrieve_geo(from_utc, to_utc) if geo_intersects(self.ingest_dir + '/' + x, lonlat)]
# geo_list = retrieve_geo(from_utc, to_utc)
manifest.extend(geo_list)
manifest.extend(self.retrieve_af(geo_list))
else:
manifest.extend(self.retrieve_l0(two_weeks_ago + timedelta(minutes=10), to_utc))
# filter geo_list on intersection with lonlat
geo_list = [x for x in self.retrieve_geo(from_utc, two_weeks_ago) if geo_intersect(self.ingest_dir + '/' + x, lonlat)]
# geo_list = retrieve_geo(from_utc, two_weeks_ago)
manifest.extend(geo_list)
manifest.extend(self.retrieve_af(geo_list))
return manifest
def retrieve_geo(self, from_utc, to_utc, ref_utc = None):
"""
| Attempts to retrieve geolocation files in the time range
First, check if they're available locally, if unavailable proceed to download
: | param from_utc: start time
:param to_utc: end time
:return: a list of paths to local geolocation files
"""
pass
def compute_geo_manifest(from_utc, to_utc):
"""
Get list of geolocation file names for the given time frame
:param from_utc: start time UTC
:param to_utc: end time UTC
:return: list of file names as strings
"""
pass
def retrieve_af(self, geo_list):
"""
Attempts to retrieve active fire files in the time range and latitude/longitude box
:param geo_list: list containing the relevant geolocation file names
:return: a list of paths to the local active fire files
"""
pass
def compute_af_manifest(geo_list):
"""
get list of active fire file names from a set of geolocation files
:param geo_list: list containing geolocation file names
"""
pass
def retrieve_l0(self, from_utc, to_utc, ref_utc = None):
"""
Attempts to retrieve the firedata files for the time range.
It should be first verified whether the firedata files are available locally.
For any unavailable files, downloads should be initiated.
:param from_utc: start time
:param to_utc: end time
:return: a list of paths to local level0 files
"""
pass
def compute_l0_manifest(self, from_utc, to_utc):
"""
Compute list of files in the source for the given time frame
:param from_utc: time UTC format
:param to_utc: time UTC format
:return: list of file names as strings
"""
pass
def manifest_from_geo(self, geo_list, granule_name):
# prefix later tells us what url we should be looking at
prefix = ''
file_list = []
# pulls directory listing of each relevant page (determined by the 'prefix' of each geo file)
# this yields a super set of the active fire files we care about, we'll refine the list in the next part
for g in geo_list:
if g[:19] != prefix:
prefix = g[:19]
file_list.extend(get_dList(self.url_base_hdf + '/' + self.filepath_af + '/' + str(prefix[7:11]) + '/' + str(prefix[11:14])))
# manifest contains the final set of exact filenames we care about
manifest = []
# Search for what the name should look like and use that index to add that name to the manifest
# this takes n*log(n) time, which I think is pretty good
for g in geo_list:
manifest.append(file_list[bisect(file_list, granule_name + g[5:24] + '99999999999999.hdf') - 1])
return manifest
def download_file(self, url_base, rel_path, max_retries=3):
"""
Download a file and stream to <rel_path> in ingest_dir.
:param url_base: the base URL where the file is hosted
:param rel_path: the relative path of the file
:param max_retries: how many times we may retry to download the file
"""
url = url_base + '/' + rel_path
path = osp.join(self.ingest_dir, rel_path)
try:
# print 'downloading', url
download_url(url, path, max_retries)
# print 'done'
except DownloadError as e:
raise data_sourceError('data_source: failed to download file %s' % url)
def available_locally(self, path):
"""
Check in a level0 file is available locally and if it's file size checks out.
:param path: the level0 file path
"""
info_path = path + '.size'
if osp.exists(path) and osp.exists(info_path):
content_size = int(open(info_path).read())
return osp.getsize(path) == content_size
else:
return False
class MODIS_TERRA(data_source):
"""
750m data from the MODIS instrument on the Terra satellite
"""
def __init__(self, ingest_dir):
# if(satellite = 'terra'):
# self.geo_gran = 'MOD03'
# self.af_gran = 'MOD14'
# elif(satellite = 'aqua'):
# self.geo_gran = 'MYD03'
# self.af_gran = 'MYD14'
# else:
# raise Exception(ValueError)
super(MODIS_TERRA, self).__init__(ingest_dir)
def retrieve_geo(self, from_utc, to_utc):
"""
Attempts to retrieve geolocation files in the time range
First, check if they're available locally, if unavailable proceed to download
:param from_utc: start time
:param to_utc: end time
:return: a list of paths to local geolocation files
"""
manifest = self.compute_geo_manifest(from_utc, to_utc)
nonlocals = [x for x in manifest if not self.available_locally(osp.join(self.ingest_dir, x))]
logging.info('Retrieving geolocation data from %s' % (self.url_base_hdf + '/' + self.filepath_geo))
list(map(lambda x: self.download_file(self.url_base_hdf + '/' + self.filepath_geo + '/' + x[7:11] |
mekanix/flask-bootstrap-sql-rest | freenit/schemas/base.py | Python | gpl-3.0 | 236 | 0 | from marshmallow | import EXCLUDE, Schema
from ..fields.objectid import ID
class BaseSchema(Schema):
id = ID(description='ID', dump_only=True)
class Meta:
strict = True
ordered = True
| unknown = EXCLUDE
|
pytest-dev/pytest-qt | src/pytestqt/qt_compat.py | Python | mit | 6,366 | 0.001414 | """
Provide a common way to import Qt classes used by pytest-qt in a unique manner,
abstracting API differences between PyQt5 and PySide2/6.
.. note:: This module is not part of pytest-qt public API, hence its interface
may change between releases and users should not rely on it.
Based on from https://github.com/epage/PythonUtils.
"""
from collections import namedtuple
import os
import pytest
VersionTuple = namedtuple("VersionTuple", "qt_api, qt_api_version, runtime, compiled")
def _import(name):
"""Think call so we can mock it during testing"""
return __import__(name)
class _QtApi:
"""
Interface to the underlying Qt API currently configured for pytest-qt.
This object lazily loads all class references and other objects when the ``set_qt_api`` method
gets called, providing a uniform way to access the Qt classes.
"""
def __init__(self):
self._import_errors = {}
def _get_qt_api_from_env(self):
api = os.environ.get("PYTEST_QT_API")
supported_apis = [
"pyside6",
"pyside2",
"pyqt6",
"pyqt5",
]
if api is not None:
api = api.lower()
if api not in supported_apis: # pragma: no cover
msg = f"Invalid value for $PYTEST_QT_API: {api}, expected one of {supported_apis}"
raise pytest.UsageError(msg)
return api
def _guess_qt_api(self): # pragma: no cover
def _can_import(name):
try:
_import(name)
return True
except ModuleNotFoundError as e:
self._import_errors[name] = str(e)
return False
# Note, not importing only the root namespace because when uninstalling from conda,
# the namespace can still be there.
if _can_import("PySide6.QtCore"):
return "pyside6"
elif _can_import("PySide2.QtCore"):
return "pyside2"
elif _can_import("PyQt6.QtCore"):
return "pyqt6"
elif _can_import("PyQt5.QtCore"):
return "pyqt5"
return None
def set_qt_api(self, api):
self.pytest_qt_api = self._get_qt_api_from_env() or api or self._guess_qt_api()
self.is_pyside = self.pytest_qt_api in ["pyside2", "pyside6"]
self.is_pyqt = self.pytest_qt_api in ["pyqt5", "pyqt6"]
if not self.pytest_qt_api: # pragma: no cover
erro | rs = "\n".join(
f" {module}: {reason}"
for module, reason in sorted(self._import_errors.items())
)
msg = (
"pytest-qt requires either | PySide2, PySide6, PyQt5 or PyQt6 installed.\n"
+ errors
)
raise pytest.UsageError(msg)
_root_modules = {
"pyside6": "PySide6",
"pyside2": "PySide2",
"pyqt6": "PyQt6",
"pyqt5": "PyQt5",
}
_root_module = _root_modules[self.pytest_qt_api]
def _import_module(module_name):
m = __import__(_root_module, globals(), locals(), [module_name], 0)
return getattr(m, module_name)
self.QtCore = QtCore = _import_module("QtCore")
self.QtGui = _import_module("QtGui")
self.QtTest = _import_module("QtTest")
self.QtWidgets = _import_module("QtWidgets")
self._check_qt_api_version()
# qInfo is not exposed in PySide2/6 (#232)
if hasattr(QtCore, "QMessageLogger"):
self.qInfo = lambda msg: QtCore.QMessageLogger().info(msg)
elif hasattr(QtCore, "qInfo"):
self.qInfo = QtCore.qInfo
else:
self.qInfo = None
self.qDebug = QtCore.qDebug
self.qWarning = QtCore.qWarning
self.qCritical = QtCore.qCritical
self.qFatal = QtCore.qFatal
if self.is_pyside:
self.Signal = QtCore.Signal
self.Slot = QtCore.Slot
self.Property = QtCore.Property
elif self.is_pyqt:
self.Signal = QtCore.pyqtSignal
self.Slot = QtCore.pyqtSlot
self.Property = QtCore.pyqtProperty
else:
assert False, "Expected either is_pyqt or is_pyside"
def _check_qt_api_version(self):
if not self.is_pyqt:
# We support all PySide versions
return
if self.QtCore.PYQT_VERSION == 0x060000: # 6.0.0
raise pytest.UsageError(
"PyQt 6.0 is not supported by pytest-qt, use 6.1+ instead."
)
elif self.QtCore.PYQT_VERSION < 0x050B00: # 5.11.0
raise pytest.UsageError(
"PyQt < 5.11 is not supported by pytest-qt, use 5.11+ instead."
)
def exec(self, obj, *args, **kwargs):
# exec was a keyword in Python 2, so PySide2 (and also PySide6 6.0)
# name the corresponding method "exec_" instead.
#
# The old _exec() alias is removed in PyQt6 and also deprecated as of
# PySide 6.1:
# https://codereview.qt-project.org/c/pyside/pyside-setup/+/342095
if hasattr(obj, "exec"):
return obj.exec(*args, **kwargs)
return obj.exec_(*args, **kwargs)
def get_versions(self):
if self.pytest_qt_api == "pyside6":
import PySide6
version = PySide6.__version__
return VersionTuple(
"PySide6", version, self.QtCore.qVersion(), self.QtCore.__version__
)
elif self.pytest_qt_api == "pyside2":
import PySide2
version = PySide2.__version__
return VersionTuple(
"PySide2", version, self.QtCore.qVersion(), self.QtCore.__version__
)
elif self.pytest_qt_api == "pyqt6":
return VersionTuple(
"PyQt6",
self.QtCore.PYQT_VERSION_STR,
self.QtCore.qVersion(),
self.QtCore.QT_VERSION_STR,
)
elif self.pytest_qt_api == "pyqt5":
return VersionTuple(
"PyQt5",
self.QtCore.PYQT_VERSION_STR,
self.QtCore.qVersion(),
self.QtCore.QT_VERSION_STR,
)
assert False, f"Internal error, unknown pytest_qt_api: {self.pytest_qt_api}"
qt_api = _QtApi()
|
eadgarchen/tensorflow | tensorflow/python/layers/core_test.py | Python | apache-2.0 | 20,438 | 0.007877 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf.layers.core."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import numpy as np
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.layers import core as core_layers
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
class DenseTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes()
def testDenseProperties(self):
dense = core_layers.Dense(2, activation=nn_ops.relu, name='my_dense')
self.assertEqual(dense.units, 2)
self.assertEqual(dense.activation, nn_ops.relu)
self.assertEqual(dense.kernel_regularizer, None)
self.assertEqual(dense.bias_regularizer, None)
self.assertEqual(dense.activity_regularizer, None)
self.assertEqual(dense.use_bias, True)
# Test auto-naming
dense = core_layers.Dense(2, activation=nn_ops.relu)
dense.apply(random_ops.random_uniform((5, 2)))
self.assertEqual(dense.name, 'dense_1')
dense = core_layers.Dense(2, activation=nn_ops.relu)
dense.apply(random_ops.random_uniform((5, 2)))
self.assertEqual(dense.name, 'dense_2')
def testVariableInput(self):
with self.test_session():
v = variable_scope.get_variable(
'X', initializer=init_ops.zeros_initializer(), shape=(1, 1))
x = core_layers.Dense(1)(v)
variables.global_variables_initializer().run()
self.assertAllEqual(x.eval(), [[0.0]])
@test_util.run_in_graph_and_eager_modes()
def testCall(self):
dense = core_layers.Dense(2, activation=nn_ops.relu, name='my_dense')
inputs = random_ops.random_uniform((5, 4), seed=1)
outputs = dense(inputs)
self.assertListEqual([5, 2], outputs.get_shape().as_list())
self.assertListEqual(dense.variables, [dense.kernel, dense.bias])
self.assertListEqual(dense.trainable_variables,
[dense.kernel, dense.bias])
self.assertListEqual(dense.non_trainable_variables, [])
if context.in_graph_mode():
self.assertEqual(
len(ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)), 2)
self.assertEqual(dense.kernel.name, 'my_dense/kernel:0')
self.assertEqual(dense.bias.name, 'my_dense/bias:0')
@test_util.run_in_graph_and_eager_modes()
def testCallTensorDot(self):
dense = core_layers.Dense(2, activation=nn_ops.relu, name='my_dense')
inputs = random_ops.random_uniform((5, 4, 3), seed=1)
outputs = dense(inputs)
self.assertListEqual([5, 4, 2], outputs.get_shape().as_list())
@test_util.run_in_graph_and_eager_modes()
def testNoBias(self):
dense = core_layers.Dense(2, use_bias=False, name='my_dense')
inputs = random_ops.random_uniform((5, 2), seed=1)
_ = dense(inputs)
self.assertListEqual(dense.variables, [dense.kernel])
self.assertListEqual(dense.trainable_variables, [dense.kernel])
self.assertListEqual(dense.non_trainable_variables, [])
if context.in_graph_mode():
self.assertEqual(
len(ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)), 1)
self.assertEqual(dense.kernel.name, 'my_dense/kernel:0')
self.assertEqual(dense.bias, None)
@test_util.run_in_graph_and_eager_modes()
def testNonTrainable(self):
dense = core_layers.Dense(2, trainable=False, name='my_dense')
inputs = random_ops.random_uniform((5, 2), seed=1)
_ = dense(inputs)
self.assertListEqual(dense.variables, [dense.kernel, dense.bias])
self.assertListEqual(dense.non_trainable_variables,
[dense.kernel, dense.bias])
self.assertListEqual(dense.trainable_variables, [])
if context.in_graph_mode():
self.assertEqual(
len(ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)), 0)
@test_util.run_in_graph_and_eager_modes()
def testOutputShape(self):
dense = core_layers.Dense(7, activation=nn_ops.relu, name='my_dense')
inputs = random_ops.random_uniform((5, 3), seed=1)
outputs = dense.apply(inputs)
self.assertEqual(outputs.get_shape().as_list(), [5, 7])
inputs = random_ops.random_uniform((5, 2, 3), seed=1)
outputs = dense(inputs)
self.assertEqual(outputs.get_shape().as_list(), [5, 2, 7])
inputs = random_ops.random_uniform((1, 2, 4, 3), seed=1)
outputs = dense.apply(inputs)
self.assertEqual(outputs.get_shape().as_list(), [1, 2, 4, 7])
def testCallOnPlaceHolder(self):
inputs = array_ops.placeholder(dtype=dtypes.float32)
dense = core_layers.Dense(4, name='my_dense')
with self.assertRaises(ValueError):
dense(inputs)
inputs = array_ops.placeholder(dtype=dtypes.float32, shape=[None, None])
dense = core_layers.Dense(4, name='my_dense')
with self.assertRaises(ValueError):
dense(inputs)
inputs = array_ops.placeholder(
dtype=dtypes.float32, shape=[None, None, None])
dense = core_layers.Dense(4, name='my_dense')
with self.assertRaises(ValueError):
dense(inputs)
inputs = array_ops.placeholder(dtype=dtypes.float32, shape=[None, 3])
dense = core_layers.Dense(4, name='my_dense')
dense(inputs)
inputs = array_ops.placeholder(dtype=dtypes.float32, shape=[None, None, 3])
dense = core_layers.Dense(4, name='my_dense')
dense(inputs)
@test_util.run_in_graph_and_eager_modes()
def testActivation(self):
dense = core_layers.Dense(2, activation=nn_ops.relu, name='dense1')
inputs = random_ops.random_uniform((5, 3), seed=1)
outputs = dense(inputs)
if context.in_graph_mode():
self.assertEqual(outputs.op.name, 'den | se1/Relu')
dense = core_layers.Dense(2, name='dense2')
inputs = random_ops.random_uniform((5, 3), seed=1)
outputs = dense(inputs)
if context.in_graph_mode():
self.assertEqual(outputs.op.name, 'dense2/BiasAdd')
def testActivityRegularizer(self):
regularizer = lambda x: math_ops.reduce_sum(x) * 1e-3
dense = core_layers.Dense(
2, name='my_dense', activity_regularizer=regularizer)
| inputs = random_ops.random_uniform((5, 3), seed=1)
_ = dense(inputs)
loss_keys = ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)
self.assertEqual(len(loss_keys), 1)
self.assertListEqual(dense.losses, loss_keys)
def testKernelRegularizer(self):
regularizer = lambda x: math_ops.reduce_sum(x) * 1e-3
dense = core_layers.Dense(
2, name='my_dense', kernel_regularizer=regularizer)
inputs = random_ops.random_uniform((5, 3), seed=1)
_ = dense(inputs)
loss_keys = ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)
self.assertEqual(len(loss_keys), 1)
self.assertListEqual(dense.losses, loss_keys)
def testKernelRegularizerWithReuse(self):
regularizer = lambda x: math_ops.reduce_sum(x) * 1e-3
inputs = random_ops.random_uniform((5, 3), seed=1)
_ = core_layers.dense(
inputs, 2, name='my_dense', kernel_regularizer=regularizer)
self.assertEqual(
len(ops.get_collection(ops.GraphKeys.REGULARIZATI |
yaoguai/sanzang-utils | setup.py | Python | mit | 2,044 | 0 | #!/usr/bin/env python3
""" Sanzang Utils setup script for packaging and installation. """
from distutils.core import setup
with open('README.rst', 'r', encoding='utf-8') as fin:
LONG_DESCRIPTION = fin.read()
setup(
#
# B | asic information
#
name='sanzang-utils',
version='1.3.3',
author='yaoguai',
author_email='lapislazulitexts@gmail.com',
url='https://github.com/yaoguai/sanzang-utils',
license='MIT',
#
# Descriptions & classifiers
#
description='Machine Translation from Chinese, Japanese, or Korean.',
long_description=LONG_DESCRIPTION,
keywords='chinese japanese korean cjk asia language machine translation',
classifiers=[
'Development Sta | tus :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: Religion',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.0',
'Programming Language :: Python :: 3.1',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Topic :: Text Processing :: Linguistic',
'Topic :: Utilities'],
#
# Included Python files
#
scripts=[
'szu-ed',
'szu-r',
'szu-ss',
'szu-t'],
py_modules=[
'szu_ed',
'szu_r',
'szu_ss',
'szu_t'],
data_files=[
('share/doc/sanzang-utils', [
'AUTHORS.rst',
'LICENSE.rst',
'NEWS.rst',
'README.rst',
'TUTORIAL.html']),
('share/man/man1', [
'szu-ed.1',
'szu-r.1',
'szu-ss.1',
'szu-t.1'])]
)
|
rogerthat-platform/rogerthat-backend | src/rogerthat/api/location.py | Python | apache-2.0 | 2,005 | 0.002494 | # -*- coding: utf-8 -*-
# Copyright 2017 GIG Technology NV
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @@license_version:1.3@@
from mcfw.rpc import returns, arguments
from rogerthat.rpc.rpc import expose
from rogerthat.to.location import GetFriendLocationRequestTO, GetFriendLocationResponseTO, \
GetFriendsLocationResponseTO, GetFriendsLocationRequestTO, GetLocationRequestTO
from rogerthat.utils.app import create_app_user, get_app_i | d_from_app_user
@expose(('api',))
@returns(GetFriendLocationResponseTO)
@arguments(request=GetFriendLocationRequestTO)
def get_friend_location(request):
from rogerthat.rpc import users
from rogerthat.bizz.location import get_friend_location as bizz_get_friend_location
user = users.get_current_user()
bizz_get_friend_location(user, create_app_user(users.User(request.friend), get_app_id_ | from_app_user(user)),
target=GetLocationRequestTO.TARGET_MOBILE)
response = GetFriendLocationResponseTO()
response.location = None # for backwards compatibility reasons
return response
@expose(('api',))
@returns(GetFriendsLocationResponseTO)
@arguments(request=GetFriendsLocationRequestTO)
def get_friend_locations(request):
from rogerthat.rpc import users
from rogerthat.bizz.location import get_friend_locations as bizz_get_friend_locations
user = users.get_current_user()
response = GetFriendsLocationResponseTO()
response.locations = bizz_get_friend_locations(user)
return response
|
onshape-public/onshape-clients | python/onshape_client/oas/models/bt_spline_description2118_all_of.py | Python | mit | 5,381 | 0 | # coding: utf-8
"""
Onshape REST API
The Onshape REST API consumed by all clients. # noqa: E501
The version of the OpenAPI document: 1.113
Contact: api-support@onshape.zendesk.com
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import re # noqa: F401
import sys # noqa: F401
import six # noqa: F401
import nulltype # noqa: F401
from onshape_client.oas.model_utils import ( # noqa: F401
ModelComposed,
ModelNormal,
ModelSimple,
date,
datetime,
file_type,
int,
none_type,
str,
validate_get_composed_info,
)
class BTSplineDescription2118AllOf(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {}
validations = {}
additional_properties_type = None
@staticmethod
def openapi_types():
"""
This must be a class method so a model may have properties that are
of type self, this ensures that we don't create a cyclic import
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
"bt_type": (str,), # noqa: E501
"control_points": ([float],), # noqa: E501
"degree": (int,), # noqa: E501
"is_periodic": (bool,), # noqa: E501
"is_rational": (bool,), # noqa: E501
"knots": ([float],), # noqa: E501
}
@staticmethod
def discriminator():
return None
attribute_map = {
"bt_type": "btType", # noqa: E501
"control_points": "controlPoints", # noqa: E501
"degree": "degree", # noqa: E501
"is_periodic": "isPeriodic", # noqa: E501
"is_rational": "isRational", # noqa: E501
"knots": "knots", # noqa: E501
}
@staticmethod
def _composed_schemas():
return None
required_properties = set(
[
"_data_store",
"_check_type",
"_from_server",
"_path_to_item",
"_configuration",
]
)
def __init__(
self,
_check_type=True,
_from_server=False,
_path_to_item=(),
_configuration=None,
**kwargs
): # noqa: E501
"""bt_spline_description2118_all_of.BTSplineDescription2118AllOf - a model defined in OpenAPI
Ke | yword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_t | o_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_from_server (bool): True if the data is from the server
False if the data is from the client (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
bt_type (str): [optional] # noqa: E501
control_points ([float]): [optional] # noqa: E501
degree (int): [optional] # noqa: E501
is_periodic (bool): [optional] # noqa: E501
is_rational (bool): [optional] # noqa: E501
knots ([float]): [optional] # noqa: E501
"""
self._data_store = {}
self._check_type = _check_type
self._from_server = _from_server
self._path_to_item = _path_to_item
self._configuration = _configuration
for var_name, var_value in six.iteritems(kwargs):
if (
var_name not in self.attribute_map
and self._configuration is not None
and self._configuration.discard_unknown_keys
and self.additional_properties_type is None
):
# discard variable.
continue
setattr(self, var_name, var_value)
|
beiko-lab/gengis | bin/Lib/site-packages/pybioclim-20131009220535_53eb0b0-py2.7-win32.egg/pybioclim/__init__.py | Python | gpl-3.0 | 97 | 0 | from config import *
from get_values impor | t *
from map_d | ata import *
from read_data import *
|
jessamynsmith/eggtimer-server | selenium/test_signup.py | Python | mit | 6,622 | 0.000755 | # -*- coding: iso-8859-15 -*-
import datetime
import selenium_settings
from base_test import SeleniumBaseTest
class SignupTest(SeleniumBaseTest):
PASSWORD = 's3l3n1uM'
PASSWORD2 = 'sel3n1uM2'
PASSWORD3 = 'sel3n1uM3'
def setUp(self):
super(SignupTest, self).setUp()
self.admin_login()
self.delete_entities(self.admin_url + 'periods/user/',
'Select user to change',
selenium_settings.EMAIL_USERNAME.split('@')[0],
'Delete selected users')
self.admin_logout()
self.USERNAME = selenium_settings.EMAIL_USERNAME.replace('@', '+%s@' % self.guid)
self.NEW_USERNAME = self.USERNAME.replace('@', '1@')
self.signup_url = self.base_url + 'accounts/signup/'
self.login_url = self.base_url + 'accounts/login/'
self.user_information = {
'id_email': self.USERNAME,
}
self.signup_fields = self.user_information.copy()
self.signup_fields.update({
'id_password1': self.PASSWORD,
'id_password2': self.PASSWORD,
})
self.organization_fields = {
'id_organization_name': u'Selenium Organization \xe5',
'id_job_title': u'Administrator \xe5',
}
def login(self, username, password):
fields = {
'id_login': username,
'id_password': password,
}
self.browser.get(self.login_url)
self.wait_for_load('Sign In')
self.fill_fields(fields)
self.submit_form()
def logout(self):
# For some reason clicking the menu fails
self.browser.get(self.base_url + 'accounts/logout')
self.wait_for_load('egg timer')
def click_menu_item(self, menu_item_text):
# The menu items are capitalized via CSS, so use .upper()
self.click_element_by_link_text(menu_item_text.upper())
def test_signup(self):
self.browser.get(self.login_url)
self.wait_for_load('Sign In')
self.click_element_by_link_text('sign up')
self.wait_for_load('Sign Up')
# Should fail if not filled in
self.submit_form()
self.assert_page_contains("This field is required.", 3)
# Fill in fields and re-submit
self.fill_fields(self.signup_fields)
self.submit_form()
title = datetime.datetime.now().strftime("%B %Y")
self.wait_for_load(title)
self.logout()
self.wait_for_load('Sign In')
# Try to sign up again with same info; should fail
self.browser.get(self.signup_url)
self.wait_for_load('Sign Up')
self.fill_fields(self.signup_fields)
self.submit_form()
self.wait_for_load('Sign Up')
self.assert_page | _contains("A use | r is already registered with this e-mail address.")
# Activate account
self.activate_user(self.USERNAME)
# Log in successfully
self.login(self.USERNAME, self.PASSWORD)
self.wait_for_load(title)
# TODO Fix and enable tests
# # Change password
# self.click_menu_item(self.user_information['id_first_name'])
# self.wait_for_load('aria-expanded="true"')
# self.click_menu_item('Change Password')
# self.wait_for_load('Change Password')
# self.submit_form()
# self.wait_for_load('This field is required.')
# self.assert_page_contains("This field is required.", 3)
# self.fill_fields({'id_oldpassword': 'bogusvalue'})
# self.submit_form()
# self.wait_for_load('Please type your current password.')
# fields = {
# 'id_oldpassword': self.PASSWORD,
# 'id_password1': self.PASSWORD2,
# 'id_password2': self.PASSWORD2,
# }
# self.fill_fields(fields)
# self.submit_form()
# self.wait_for_load('Change Password')
# self.assert_page_contains("This field is required.", 0)
#
# # Test logout
# self.logout()
#
# # Test reset password via email
# self.click_menu_item('Sign In')
# self.wait_for_load('Sign In')
# self.click_element_by_link_text('Forgot Password?')
# self.wait_for_load('Password Reset')
# self.fill_fields_by_name({'email': 'bogus@example.com'})
# self.submit_form()
# self.wait_for_load("The e-mail address is not assigned to any user account")
# self.fill_fields_by_name({'email': self.USERNAME})
# self.submit_form()
# self.wait_for_load('We have sent you an e-mail.')
# # Retrieve and use email
# email_text = self.retrieve_email(self.USERNAME, 'Password Reset E-mail')
# reset_link = self.extract_link_from_email(email_text)
# self.browser.get(reset_link)
# self.wait_for_load('Change Password')
# fields = {
# 'id_password1': self.PASSWORD3,
# 'id_password2': self.PASSWORD3,
# }
# self.fill_fields(fields)
# self.submit_form()
# self.assert_page_contains('Your password is now changed.')
#
# # Try to log in with old info
# self.login(self.USERNAME, self.PASSWORD)
# self.wait_for_load('The e-mail address and/or password you specified are not correct.')
#
# # Log in with updated info
# self.login(self.USERNAME, self.PASSWORD3)
# self.wait_for_load('Postings')
#
# # Update user information - no change
# self.click_menu_item('Profile')
# self.wait_for_load('aria-expanded="true"')
# self.click_menu_item('Contact Info')
# self.wait_for_load('Update Contact Information')
# self.assert_fields(self.user_information)
# self.submit_form()
# self.wait_for_load('Postings')
#
# # Update user information
# self.click_menu_item('Profile')
# self.wait_for_load('aria-expanded="true"')
# self.click_menu_item('Contact Info')
# self.wait_for_load('Update Contact Information')
# fields = {
# 'id_email': selenium_settings.EMAIL_USERNAME,
# }
# self.fill_fields(fields)
# self.submit_form()
# self.wait_for_load(title)
#
# # Ensure updated information was saved
# self.click_menu_item('Profile')
# self.wait_for_load('aria-expanded="true"')
# self.click_menu_item('Contact Info')
# self.wait_for_load('Update Contact Information')
# self.assert_fields(fields)
|
hankcs/HanLP | hanlp/components/mtl/tasks/ner/biaffine_ner.py | Python | apache-2.0 | 5,752 | 0.005216 | # -*- coding:utf-8 -*-
# Author: hankcs
# Date: 2020-08-05 01:49
import logging
from copy import copy
from typing import Dict, Any, Union, Iterable, List
import torch
from torch.utils.data import DataLoader
from hanlp.common.dataset import SamplerBuilder, PadSequenceDataLoader
from hanlp.common.transform import VocabDict, TransformList
from hanlp.components.mtl.tasks import Task
from hanlp.components.ner.biaffine_ner.biaffine_ner import BiaffineNamedEntityRecognizer
from hanlp.components.ner.biaffine_ner.biaffine_ner_model import BiaffineNamedEntityRecognitionDecoder
from hanlp.datasets.ner.loaders.json_ner import unpack_ner
from hanlp.layers.scalar_mix import ScalarMixWithDropoutBuilder
from hanlp.metrics.metric import Metric
from hanlp.metrics.mtl import MetricDict
from hanlp_common.util import merge_locals_kwargs
class BiaffineNamedEntityRecognition(Task, BiaffineNamedEntityRecognizer):
def __init__(self, trn: str = None, dev: str = None, tst: str = None, sampler_builder: SamplerBuilder = None,
dependencies: str = None, scalar_mix: ScalarMixWithDropoutBuilder = None, use_raw_hidden_states=False,
lr=None, separate_optimizer=False,
doc_level_offset=True, is_flat_ner=True, tagset=None, ret_tokens=' ',
ffnn_size=150, loss_reduction='mean', **kwargs) -> None:
"""An implementation of Named Entity Recognition as Dependency Parsing (:cite:`yu-etal-2020-named`). It treats
every possible span as a candidate of entity and predicts its entity label. Non-entity spans are assigned NULL
label to be excluded. The label prediction is done with a biaffine layer (:cite:`dozat:17a`). As it makes no
assumption about the spans, it naturally supports flat NER and nested NER.
Args:
trn: Path to training set.
dev: Path to dev set.
tst: Path to test set.
sampler_builder: A builder which builds a sampler.
dependencies: Its dependencies on other tasks.
scalar_mix: A builder which builds a `ScalarMixWithDropout` object.
use_raw_hidden_states: Whether to use raw hidden states from transformer without any pooling.
lr: Learning rate for this task.
separate_optimizer: Use customized separate optimizer for this task.
doc_level_offset: ``True`` to indicate the offsets in ``jsonlines`` are of document level.
is_flat_ner: ``True`` for flat NER, otherwise nested NER.
tagset: Optional tagset to prune entities outside of this tagset from datasets.
ret_tokens: A delimiter between tokens in entities so that the surface form of an entity can be rebuilt.
ffnn_size: Feedforward size for MLPs extracting the head/tail representations.
loss_reduction: The loss reduction used in aggregating losses.
**kwargs: Not used.
"""
super().__init__(**merge_locals_kwargs(locals(), kwargs))
self.vocabs = VocabDict()
def update_metrics(self, batch: Dict[str, Any],
output: Union[torch.Tensor, Dict[str, torch.Tensor], Iterable[torch.Tensor], Any],
prediction: Dict[str, Any], metric: Union[MetricDict, Metric]):
BiaffineNamedEntityRecognizer.update_metrics(self, batch, prediction, metric)
def decode_output(self,
output: Dict[str, Any],
mask: torch.BoolTensor,
batch: Dict[str, Any],
decoder,
**kwargs) -> Union[Dict[str, Any], Any]:
return self.get_pred_ner(batch['token'], output['candidate_ner_scores'])
def compute_loss(self, batch: Dict[str, Any],
output: Union[torch.Tensor, Dict[str, torch.Tensor], Iterable[torch.Tensor], Any], criterion) -> \
Union[torch.FloatTensor, Dict[str, torch.FloatTensor]]:
return output['loss']
def build_dataloader(self, data,
transform: TransformList = None,
training=False,
device=None,
logger: logging.Logger = None,
gradient_accumulation=1,
**kwargs) -> DataLoader:
transform = copy(transform)
transform.append(unpack_ner)
dataset = BiaffineNamedEntityRecognizer.build_dataset(self, data, self.vocabs, transform)
if self.vocabs.mutable:
BiaffineNamedEntityRecognizer.build_vocabs(self, dataset, logger, self.vocabs)
return PadSequenceDataLoader(
batch_sampler=self.sampler_builder.build(self.compute_lens(data, dataset), shuffle=training,
gradient_accumulation=gradient_accumulation),
device=device,
dataset=dataset)
def build_model(self, encoder_size, | training=True, **kwargs) -> torch.nn.Module:
return BiaffineNamedEntityRecognitionDecoder(encoder_size, self.config.ffnn_size, len(self.vocabs.label),
self.config.loss_reduction)
def build_metric(self, **kwargs):
return | BiaffineNamedEntityRecognizer.build_metric(self, **kwargs)
def input_is_flat(self, data) -> bool:
return BiaffineNamedEntityRecognizer.input_is_flat(data)
def prediction_to_result(self, prediction: Dict[str, Any], batch: Dict[str, Any]) -> List:
results = []
BiaffineNamedEntityRecognizer.prediction_to_result(batch['token'], prediction, results,
ret_tokens=self.config.get('ret_tokens', ' '))
return results
|
pedrox/meld | meld/ui/msgarea.py | Python | gpl-2.0 | 8,901 | 0.001573 | # This file is part of the Hotwire Shell user interface.
#
# Copyright (C) 2007,2008 Colin Walters <walters@verbum.org>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
# USA.
import logging
import gobject
import gtk
from .wraplabel import WrapLabel
_logger = logging.getLogger("hotwire.ui.MsgArea")
# This file is a Python translation of gedit/gedit/gedit-message-area.c
class MsgArea(gtk.HBox):
__gtype_name__ = "MsgArea"
__gsignals__ = {
"response" : (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, (gobject.TYPE_INT,)),
"close" : (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, [])
}
def __init__(self, buttons, **kwargs):
super(MsgArea, self).__init__(**kwargs)
self.__contents = None
self.__labels = []
self.__changing_style = False
self.__main_hbox = gtk.HBox(False, 16) # FIXME: use style properties
self.__main_hbox.show()
self.__main_hbox.set_border_width(8) # FIXME: use style properties
self.__action_area = gtk.VBox(True, 4); # FIXME: use style properties
self.__action_area.show()
self.__main_hbox.pack_end (self.__action_area, False, True, 0)
self.pack_start(self.__main_hbox, True, True, 0)
self.set_app_paintable(True)
self.connect("expose-event", self.__paint)
# Note that we connect to style-set on one of the internal
# widgets, not on the message area itself, since gtk does
# not deliver any further style-set signals for a widget on
# which the style has been forced with gtk_widget_set_style()
self.__main_hbox.connect("style-set", self.__on_style_set)
self.add_buttons(buttons)
def __get_response_data(self, w, create):
d = w.get_data('hotwire-msg-area-data')
if (d is None) and create:
d = {'respid': None}
w.set_data('hotwire-msg-area-data', d)
return d
def __find_button(self, respid):
children = self.__actionarea.get_children()
for child in children:
rd = self.__get_response_data(child, False)
if rd is not None and rd['respid'] == respid:
return child
def __close(self):
cancel = self.__find_button(gtk.RESPONSE_CANCEL)
if cancel is None:
return
self.response(gtk.RESPONSE_CANCEL)
def __paint(self, w, event):
gtk.Style.paint_flat_box(w.style,
w.window,
gtk.STATE_NORMAL,
gtk.SHADOW_OUT,
None,
w,
"tooltip",
w.allocation.x + 1,
w.allocation.y + 1,
w.allocation.width - 2,
w.allocation.height - 2)
return False
def __on_style_set(self, w, style):
if self.__changing_style:
return
# This is a hack needed to use the tooltip background color
window = gtk.Window(gtk.WINDOW_POPUP);
window.set_name("gtk-tooltip")
window.ensure_style()
style = window.get_style()
self.__changing_style = True
self.set_style(style)
for label in self.__labels:
label.set_style(style)
self.__changing_style = False
window.destroy()
self.queue_draw()
def __get_response_for_widget(self, w):
rd = self.__get_response_data(w, False)
if rd is None:
return gtk.RESPONSE_NONE
return rd['respid']
def __on_action_widget_activated(self, w):
response_id = self.__get_response_for_widget(w)
self.response(response_id)
def add_action_widget(self, child, respid):
rd = self.__get_response_data(child, True)
rd['respid'] = respid
if not isinstance(child, gtk.Button):
raise ValueError("Can only pack buttons as action widgets")
child.connect('clicked', self.__on_action_widget_activated)
if respid != gtk.RESPONSE_HELP:
self.__action_area.pack_start(child, False, False, 0)
else:
self.__action_area.pack_end(child, False, False, 0)
def set_contents(self, contents):
self.__contents = contents
self.__main_hbox.pack_start(contents, True, True, 0)
def add_button(self, btext, respid):
button = gtk.Button(stock=btext)
button.set_focus_on_click(False)
button.set_flags(gtk.CAN_DEFAULT)
button.show()
self.add_action_widget(button, respid)
return button
def add_buttons(self, args):
_logger.debug("init buttons: %r", args)
for (btext, respid) in args:
self.add_button(btext, respid)
def set_response_sensitive(self, respid, setting):
for child in self.__action_area.get_children():
rd = self.__get_response_data(child, False)
if rd is not None and rd['respid'] == respid:
child.set_sensitive(setting)
break
def set_default_response(self, respid):
for child in self.__action_area.get_children():
rd = self.__get_response_data(child, False)
if rd is not None and rd['respid'] == respid:
child.grab_default()
break
def response(self, respid):
self.emit('response', respid)
def add_stock_button_with_text(self, text, stockid, respid):
b = gtk.Button(label=text)
b.set_focus_on_click(False)
img = gtk.Image()
img.set_from_stock(stockid, gtk.ICON_SIZE_BUTTON)
b.set_image(img)
b.show_all()
self.add_action_widget(b, respid)
return b
def set_text_and_icon(self, stockid, primary_text, secondary_text=None):
hbox_content = gtk.HBox(False, 8)
hbox_content.show()
image = gtk.Image()
image.set_from_stock(stockid, gtk.ICON_SIZE_DIALOG)
image.show()
hbox_content.pack_start(image, False, False, 0)
image.set_alignment(0.5, 0.5)
vbox = gtk.VBox(False, 6)
vbox.show()
hbox_content.pack_start (vbox, True, True, 0)
self.__labels = []
primary_markup = "<b>%s</b>" % (primary_text,)
primary_label = WrapLabel(primary_markup)
primary_label.show()
vbox.pack_start(primary_label, True, True, 0)
primary_label.set_use_markup(True)
primary_label.set_line_wrap(True)
primary_label.set_alignment(0, 0.5)
primary_label.set_flags(gtk.CAN_FOCUS)
primary_label.set_selectable(True)
self.__labels.append(primary_label)
if secondary_text:
secondary_markup = "<small>%s</small>" | % (secondary_text,)
secondary_label = WrapLabel(secondary_markup)
secondary_label.show()
vbox.pack_start(secondary_label | , True, True, 0)
secondary_label.set_flags(gtk.CAN_FOCUS)
secondary_label.set_use_markup(True)
secondary_label.set_line_wrap(True)
secondary_label.set_selectable(True)
secondary_label.set_alignment(0, 0.5)
self.__labels.append(secondary_label)
self.set_contents(hbox_content)
class MsgAreaController(gtk.HBox):
__gtype_name__ = "MsgAreaController"
def __init__(self):
super(MsgAreaController, self).__init__()
self.__ |
liuzz1983/open_vision | openvision/datasets/utils.py | Python | mit | 1,945 | 0.005656 | class ImageClass():
"Stores the paths to images for a given class"
def __init__(self, name, image_paths):
self.name = | name
self.image_paths = image_paths
def __str__(self):
return self.name + ', ' + str(len(self.image_pa | ths)) + ' images'
def __len__(self):
return len(self.image_paths)
def get_dataset(paths):
dataset = []
for path in paths.split(':'):
path_exp = os.path.expanduser(path)
classes = os.listdir(path_exp)
classes.sort()
nrof_classes = len(classes)
for i in range(nrof_classes):
class_name = classes[i]
facedir = os.path.join(path_exp, class_name)
if os.path.isdir(facedir):
images = os.listdir(facedir)
image_paths = [os.path.join(facedir,img) for img in images]
dataset.append(ImageClass(class_name, image_paths))
return dataset
def split_dataset(dataset, split_ratio, mode):
if mode=='SPLIT_CLASSES':
nrof_classes = len(dataset)
class_indices = np.arange(nrof_classes)
np.random.shuffle(class_indices)
split = int(round(nrof_classes*split_ratio))
train_set = [dataset[i] for i in class_indices[0:split]]
test_set = [dataset[i] for i in class_indices[split:-1]]
elif mode=='SPLIT_IMAGES':
train_set = []
test_set = []
min_nrof_images = 2
for cls in dataset:
paths = cls.image_paths
np.random.shuffle(paths)
split = int(round(len(paths)*split_ratio))
if split<min_nrof_images:
continue # Not enough images for test set. Skip class...
train_set.append(ImageClass(cls.name, paths[0:split]))
test_set.append(ImageClass(cls.name, paths[split:-1]))
else:
raise ValueError('Invalid train/test split mode "%s"' % mode)
return train_set, test_set
|
nuxly/nuxly-odoo-addons | nuxly/models/__init__.py | Python | agpl-3.0 | 106 | 0.009434 | from . impor | t hr_analytic_timesheet
from . import global_time | sheet_state
from . import crm_lead_suivi_gdoc |
ericdill/databroker | databroker/tests/test_v2/generic.py | Python | bsd-3-clause | 11,121 | 0.00054 | import collections
import event_model
import itertools
from bluesky.plans import count
from intake.catalog.utils import RemoteCatalogError
import numpy
import ophyd.sim
import os
import pytest
import time
import uuid
def normalize(gen):
"""
Converted any pages to singles.
"""
for name, doc in gen:
if name == 'event_page':
for event in event_model.unpack_event_page(doc):
yield 'event', event
elif name == 'datum_page':
for datum in event_model.unpack_datum_page(doc):
yield 'datum', datum
else:
yield name, doc
def compare(a, b):
a = normalize(a)
b = normalize(b)
a_indexed = {}
b_indexed = {}
for name, doc in a:
if name == 'resource':
# Check for an extraneous duplicate key in old documents.
if 'id' in doc:
assert doc['id'] == doc['uid']
doc = doc.copy()
doc.pop('id')
if name == 'datum':
a_indexed[('datum', doc['datum_id'])] = doc
# v0 yields {'_name": 'RunStop'} if the stop doc is missing; v2 yields None.
elif name == 'stop' and doc is None or 'uid' not in doc:
a_indexed[(name, None)] = None
else:
a_indexed[(name, doc['uid'])] = doc
for name, doc in b:
if name == 'resource':
# Check for an extraneous duplicate key in old documents.
if 'id' in doc:
assert doc['id'] == doc['uid']
doc = doc.copy()
doc.pop('id')
if name == 'datum':
b_indexed[('datum', doc['datum_id'])] = doc
# v0 yields {'_name": 'RunStop'} if the stop doc is missing; v2 yields None.
elif name == 'stop' and doc is None or 'uid' not in doc:
b_indexed[(name, None)] = None
else:
b_indexed[(name, doc['uid'])] = doc
# Same number of each type of document?
a_counter = collections.Counter(name for name, uid in a_indexed)
b_counter = collections.Counter(name for name, uid in b_indexed)
assert a_counter == b_counter
# Same uids and names?
assert set(a_indexed) == set(b_indexed)
# Now delve into the documents themselves...
for (name, unique_id), a_doc in a_indexed.items():
b_doc = b_indexed[name, unique_id]
# Handle special case if 'stop' is None.
if name == 'stop' and unique_id is None:
assert b_doc is None or 'uid' not in b_doc
continue
# Same top-level keys?
assert set(a_doc) == set(b_doc)
# Same contents?
try:
a_doc == b_doc
except ValueError:
# We end up here if, for example, the dict contains numpy arrays.
event_model.sanitize_doc(a_doc) == event_model.sanitize_doc(b_doc)
def test_fixture(bundle):
"Simply open the Catalog created by the fixture."
def test_search(bundle):
"Test search and progressive (nested) search with Mongo queries."
cat = bundle.cat
# Make sure the Catalog is nonempty.
assert list(cat['xyz']())
# Null search should return full Catalog.
assert list(cat['xyz']()) == list(cat['xyz'].search({}))
# Progressive (i.e. nested) search:
result = (cat['xyz']
.search({'plan_name': 'scan'})
.search({'time': {'$gt': 0}}))
assert bundle.uid in result
def test_repr(bundle):
"Test that custom repr (with run uid) appears and is one line only."
entry = bundle.cat['xyz']()[bundle.uid]
assert bundle.uid in repr(entry)
run = entry()
assert bundle.uid in repr(run)
assert len(repr(run).splitlines()) == 1
def test_repr_pretty(bundle):
"Test the IPython _repr_pretty_ has uid and also stream names."
formatters = pytest.importorskip("IPython.core.formatters")
f = formatters.PlainTextFormatter()
entry = bundle.cat['xyz']()[bundle.uid]
assert bundle.uid in f(entry)
# Stream names should be displayed.
assert 'primary' in f(entry)
run = entry()
assert bundle.uid in f(run)
assert 'primary' in f(run)
def test_iteration(bundle):
cat = bundle.cat['xyz']( | )
list(cat)
def test_len(bundle):
"""
Test that Catalog implements __len__.
Otherwise intake will loop it as `sum(1 for _ in catalog)` which is likely
less efficient.
"""
cat = bundle.cat['xyz']()
len(cat) # If not implemented, will raise TypeError
def test_getitem_sugar(bundle):
cat = bundle.cat['xyz']()
# Test lookup by recency (e.g. -1 is latest)
c | at[-1]
with pytest.raises((IndexError, RemoteCatalogError)):
cat[-(1 + len(cat))] # There aren't this many entries
# Test lookup by integer, not globally-unique, 'scan_id'.
expected = cat[bundle.uid]()
scan_id = expected.metadata['start']['scan_id']
actual = cat[scan_id]()
assert actual.metadata['start']['uid'] == expected.metadata['start']['uid']
with pytest.raises((KeyError, RemoteCatalogError)):
cat[234234234234234234] # This scan_id doesn't exit.
# Test lookup by partial uid.
expected = cat[bundle.uid]()
uid = bundle.uid
for j in itertools.count(8, len(uid)):
trunc_uid = uid[:j]
try:
int(trunc_uid)
except ValueError:
break
else:
continue
else:
raise pytest.skip(
"got an all int (!?) uid, can not truncate and retrieve "
"due to intake not respecting types in getitem across the network.")
actual = cat[trunc_uid]()
assert actual.metadata['start']['uid'] == expected.metadata['start']['uid']
def test_run_read_not_implemented(bundle):
"Test that custom repr (with run uid) appears."
run = bundle.cat['xyz']()[bundle.uid]
with pytest.raises(NotImplementedError):
run.read()
with pytest.raises(NotImplementedError):
run.to_dask()
def test_run_metadata(bundle):
"Find 'start' and 'stop' in the Entry metadata."
run = bundle.cat['xyz']()[bundle.uid]
for key in ('start', 'stop'):
assert key in run.metadata # entry
assert key in run().metadata # datasource
def test_read_canonical(bundle):
"This is a deprecated synonym for documents()."
run = bundle.cat['xyz']()[bundle.uid]
filler = event_model.Filler({'NPY_SEQ': ophyd.sim.NumpySeqHandler},
inplace=False)
# Smoke test for back-compat alias
with pytest.warns(UserWarning):
next(run.read_canonical())
compare(run.documents(fill='yes'),
(filler(name, doc) for name, doc in bundle.docs))
def test_canonical(bundle):
"This is a deprecated synonym for documents()."
run = bundle.cat['xyz']()[bundle.uid]
filler = event_model.Filler({'NPY_SEQ': ophyd.sim.NumpySeqHandler},
inplace=False)
# Smoke test for back-compat alias
with pytest.warns(UserWarning):
next(run.canonical(fill="yes"))
compare(run.documents(fill='yes'),
(filler(name, doc) for name, doc in bundle.docs))
def test_documents_unfilled(bundle):
run = bundle.cat['xyz']()[bundle.uid]
run.documents(fill='no')
compare(run.documents(fill='no'), bundle.docs)
# Passing the run through the filler to check resource and datum are
# received before corresponding event.
filler = event_model.Filler({'NPY_SEQ': ophyd.sim.NumpySeqHandler},
inplace=False)
for name, doc in run.documents(fill='no'):
filler(name, doc)
def test_documents_delayed(bundle):
run = bundle.cat['xyz']()[bundle.uid]
filler = event_model.Filler({'NPY_SEQ': ophyd.sim.NumpySeqHandler},
inplace=False)
if bundle.remote:
with pytest.raises(NotImplementedError):
next(run.documents(fill='delayed'))
else:
compare(run.documents(fill='delayed'),
(filler(name, doc) for name, doc in bundle.docs))
def test_documents_duplicates(bundle):
run = bundle.cat['xyz']()[bundle.uid]
history = set()
run_start_uid = None
for name, |
eightnoteight/aschedule | tests/testing_ext.py | Python | mit | 2,983 | 0.002011 | # -*- coding: utf-8 -*-
from unittest.mock import patch
from datetime import timedelta
import unittest
import asyncio
import aschedule
class TestingExt(unittest.TestCase):
_multiprocess_shared_ = True
def setUp(self):
self.loop = asyncio.get_event_loop()
async def get_coro(self):
pass
def test_every_day(self):
self.every_patcher = patch('aschedule.ext.every')
self.addCleanup(self.every_patcher.stop)
self.every_mock = self.every_patcher.start()
from aschedule.ext import every_day
schedule1 = every_day(self.get_coro)
self.every_mock.assert_called_with(self.get_coro, timedelta=timedelta(days=1), loop=None)
schedule2 = every_day(self.get_coro, loop=self.loop)
self.every_mock.assert_called_with(self.get_coro, timedelta=timedelta(days=1), loop=self.loop)
self.loop.run_until_complete(asyncio.sleep(1))
schedule1._cancel(running_jobs=True)
schedule2._cancel(running_jobs=True)
self.loop.run_until_complete(asyncio.sleep(1))
self.assertEqual(2, self.every_mo | ck.call_count)
def test_every_week(self):
self.every_patcher = patch('aschedule.ext.every')
self.addCleanup(self.every_patcher.stop)
self.every_mock = self.every_patcher.start()
from aschedule.ext import every_week
schedule1 = every_week(self.get_coro)
self.every_mock.assert_called_with(self.get_coro, timedelta=timedelta(days=7), loop=None)
schedule2 = every_week(self.get_coro, loop=self.loop)
se | lf.every_mock.assert_called_with(self.get_coro, timedelta=timedelta(days=7), loop=self.loop)
self.loop.run_until_complete(asyncio.sleep(1))
schedule1._cancel(running_jobs=True)
schedule2._cancel(running_jobs=True)
self.loop.run_until_complete(asyncio.sleep(1))
self.assertEqual(2, self.every_mock.call_count)
def test_every_random_interval(self):
from aschedule.ext import every_random_interval
# scheduled executions: randrange(0, 5), randrange(5, 10), randrange(10, 15)
times = []
start = self.loop.time()
cancel_in_seconds = 16
# set the seed to avoid a scheduled execution on 16th second.
__import__('random').seed(38042)
async def record_times():
times.append(round(self.loop.time() - start))
schedule = every_random_interval(record_times, timedelta(seconds=5))
async def schedule_canceller():
await asyncio.sleep(cancel_in_seconds)
aschedule.cancel(schedule)
try:
self.loop.run_until_complete(
asyncio.gather(schedule_canceller(), schedule.future))
except asyncio.CancelledError:
pass
self.assertEqual(3, len(times))
for i, x in enumerate(times):
self.assertTrue(i * 5 <= x < (i + 1) * 5)
def tearDown(self):
asyncio.set_event_loop(self.loop)
|
mpetyx/pyrif | 3rdPartyLibraries/FuXi-master/examples/example7.py | Python | mit | 149 | 0 | from FuXi.Hor | n.HornRules import HornFromN3
rules = HornFromN3(
'http://www.agfa.com/w3c/euler/rdfs-rules.n3')
for rule in r | ules:
print(rule)
|
locolan/pokepy | pokepy/migrations/0002_auto_20150503_0002.py | Python | mit | 1,743 | 0 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('pokepy', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='pokemon',
name='abilities',
field=models.ManyToManyField(to='pokepy.Abilities'),
),
migrations.AddField(
model_name='pokemon',
name='description',
field=models.TextField(default=b'default'),
),
migrations.AddField(
model_name='pokemon',
name='egg_group',
field=models.CharField(default=b'default', max_length=50),
),
migrations.AddField(
model_name='pokemon',
name='games',
field=models.TextField(default=b'default'),
),
| migrations.AddField(
model_name='pokemon',
name='moves',
field=models.ManyToManyField(to='pokepy.Moves'),
),
migrations.AddField(
model_name='pokemon',
name='name',
field=models.CharField(default=b'digimon', max_length=50),
),
migrations.AddField(
| model_name='pokemon',
name='type1',
field=models.CharField(default=b'normal', max_length=50),
),
migrations.AddField(
model_name='pokemon',
name='type2',
field=models.CharField(default=b'normal', max_length=50),
),
migrations.AlterField(
model_name='pokemon',
name='id',
field=models.IntegerField(serialize=False, primary_key=True),
),
]
|
nmandavia/kafka-python | test/test_producer.py | Python | apache-2.0 | 8,543 | 0.002109 | # -*- coding: utf-8 -*-
import collections
import logging
import time
from mock import MagicMock, patch
from . import unittest
from kafka import KafkaClient, SimpleProducer
from kafka.common import (
AsyncProducerQueueFull, FailedPayloadsError, NotLeaderForPartitionError,
ProduceResponse, RetryOptions, TopicAndPartition
)
from kafka.producer.base import Producer, _send_upstream
from kafka.protocol import CODEC_NONE
import threading
try:
from queue import Empty, Queue
except ImportError:
from Queue import Empty, Queue
try:
xrange
except NameError:
xrange = range
class TestKafkaProducer(unittest.TestCase):
def test_producer_message_types(self):
producer = Producer(MagicMock())
topic = b"test-topic"
partition = 0
bad_data_types = (u'你怎么样?', 12, ['a', 'list'], ('a', 'tuple'), {'a': 'dict'})
for m in bad_data_types:
with self.assertRaises(TypeError):
logging.debug("attempting to send message of type %s", type(m))
producer.send_messages(topic, partition, m)
good_data_types = (b'a string!',)
for m in good_data_types:
# This should not raise an exception
producer.send_messages(topic, partition, m)
def test_topic_message_types(self):
client = MagicMock()
def partitions(topic):
return [0, 1]
client.get_partition_ids_for_topic = partitions
producer = SimpleProducer(client, random_start=False)
topic = b"test-topic"
producer.send_messages(topic, b'hi')
assert client.send_produce_request.called
@patch('kafka.producer.base._send_upstream')
def test_producer_async_queue_overfilled(self, mock):
queue_size = 2
producer = Producer(MagicMock(), async=True,
async_queue_maxsize=queue_size)
topic = b'test-topic'
partition = 0
message = b'test-message'
with self.assertRaises(AsyncProducerQueueFull):
message_list = [message] * (queue_size + 1)
producer.send_messages(topic, partition, *message_list)
self.assertEqual(producer.queue.qsize(), queue_size)
for _ in xrange(producer.queue.qsize()):
producer.queue.get()
def test_producer_sync_fail_on_error(self):
error = FailedPayloadsError('failure')
with patch.object(KafkaClient, 'load_metadata_for_topics'):
with patch.object(KafkaClient, 'get_partition_ids_for_topic', return_value=[0, 1]):
with patch.object(KafkaClient, '_send_broker_aware_request', return_value = [error]):
client = KafkaClient(MagicMock())
producer = SimpleProducer(client, async=False, sync_fail_on_error=False)
# This should not raise
(response,) = producer.send_messages('foobar', b'test message')
self.assertEqual(response, error)
producer = SimpleProducer(client, async=False, sync_fail_on_error=True)
with self.assertRaises(FailedPayloadsError):
producer.send_messages('foobar', b'test message')
class TestKafkaProducerSendUpstream(unittest.TestCase):
def setUp(self):
self.client = MagicMock()
self.queue = Queue()
def _run_process(self, retries_limit=3, sleep_timeout=1):
# run _send_upstream process with the queue
stop_event = threading.Event()
retry_options = RetryOptions(limit=retries_limit,
backoff_ms=50,
retry_on_timeouts=False)
self.thread = threading.Thread(
target=_send_upstream,
args=(self.queue, self.client, CODEC_NONE,
0.3, # batch time (seconds)
3, # batch length
Producer.ACK_AFTER_LOCAL_WRITE,
Producer.DEFAULT_ACK_TIMEOUT,
retry_options,
stop_event))
self.thread.daemon = True
self.thread.start()
time.sleep(sleep_timeout)
stop_event.set()
def test_wo_retries(self):
# lets create a queue and add 10 messages for 1 partition
for i in range(10):
self.queue.put((TopicAndPartition("test", 0), "msg %i", "key %i"))
self._run_process()
# the queue should be void at the end of the test
self.assertEqual(self.queue.empty(), True)
# there should be 4 non-void cals:
# 3 batches of 3 msgs each + 1 batch of 1 message
self.assertEqual(self.client.send_produce_request.call_count, 4)
def test_first_send_failed(self):
# lets create a queue and add 10 messages for 10 different partitions
# to show how retries should work ideally
for i in range(10):
self.queue.put((TopicAndPartition("test", i), "msg %i", "key %i"))
# Mock offsets counter for closure
offsets = collections.defaultdict(lambda: collections.defaultdict(lambda: 0))
self.client.is_first_time = True
def send_side_effect(reqs, *args, **kwargs):
if self.client.is_first_time:
self.client.is_first_time = False
return [FailedPayloadsError(req) for req in reqs]
responses = []
for req in reqs:
offset = offsets[req.topic][req.partition]
offsets[req.topic][req.partition] += len(req.messages)
responses.append(
ProduceResponse(req.topic, req.partition, 0, offset)
)
return responses
self.client.send_produce_request.side_effect = send_side_effect
self._run_process(2)
# the queue should be void at the end of the test
self.assertEqual(self.queue.empty(), True)
# there should be 5 non-void calls: 1st failed batch of 3 msgs
# plus 3 batches of 3 msgs each + 1 batch of 1 message
self.assertEqual(self.client.send_produce_request.call_count, 5)
def test_with_limited_retries(self):
# lets create a queue and add 10 messages for 10 different partitions
# to show how retries should work ideally
for i in range(10):
self.queue.put((TopicAndPartition("test", i), "msg %i" % i, "key %i" % i))
def send_side_effect(reqs, *args, **kwargs):
return [FailedPayloadsError(req) for req in reqs]
self.client.send_produce_request.side_effect = send_side_effect
self._run_process(3, 3)
# the queue should be void at the end of the test
self.assertEqual(self.queue.empty(), True)
# there should be 16 non-void calls:
# 3 initial batches of 3 msgs each + 1 initial batch of 1 msg +
# 3 retries of | the batches above = (1 + 3 retries) * 4 batches = 16
self.assertEqual(self.client.send_produce_request.call_count, 16)
def test_async_producer_not_leader(self):
for i in | range(10):
self.queue.put((TopicAndPartition("test", i), "msg %i", "key %i"))
# Mock offsets counter for closure
offsets = collections.defaultdict(lambda: collections.defaultdict(lambda: 0))
self.client.is_first_time = True
def send_side_effect(reqs, *args, **kwargs):
if self.client.is_first_time:
self.client.is_first_time = False
return [ProduceResponse(req.topic, req.partition,
NotLeaderForPartitionError.errno, -1)
for req in reqs]
responses = []
for req in reqs:
offset = offsets[req.topic][req.partition]
offsets[req.topic][req.partition] += len(req.messages)
responses.append(
ProduceResponse(req.topic, req.partition, 0, offset)
)
return responses
self.client.send_produce_request.side_effect = send_side_effect
self._run_process(2)
# the queue should be void at the end of the test
self |
open-synergy/opnsynid-hr | hr_employee_pob_from_home_address/__openerp__.py | Python | agpl-3.0 | 563 | 0 | # -*- coding: utf-8 -*-
# Copyright 2018 OpenSynergy Indonesia
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
# pylint: disable=locally-disabled, manifest-required-author
{
| "name": "Employee Place of Birth From Home Address",
"version": "8.0 | .2.0.0",
"category": "Human Resource",
"website": "https://simetri-sinergi.id",
"author": "PT. Simetri Sinergi Indonesia,OpenSynergy Indonesia",
"license": "AGPL-3",
"installable": True,
"depends": [
"hr",
"partner_place_of_birth",
],
"data": [],
}
|
GoogleCloudPlatform/psq | psq/psqworker.py | Python | apache-2.0 | 2,499 | 0 | #!/usr/bin/env python
# Copyright 2015 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from importlib import import_module
import logging
import os
import sys
import click
from colorlog import ColoredFormatter
logger = logging.getLogger(__name__)
def setup_logging(): # pragma: no cover
root_logger = logging.getLogger()
root_logger.setLevel(logging.INFO)
handler = logging.StreamHandler()
formatter = ColoredFormatter(
"%(log_color)s%(levelname)-8s%(reset)s %(asctime)s %(green)s%(name)s"
"%(reset)s %(message)s",
reset=True,
log_colors={
'DEBUG': 'cyan',
'INFO': 'blue',
'WARNING': 'yellow',
'ERROR': 'red',
'CRITICAL': 'red,bg_white',
}
)
handler.setFormatter(formatter)
root_logger.addHandler(handler)
def import_queue(location):
module, attr = location.rsplit('.', 1)
module = import_module(module)
queue = getattr(module, attr)
if hasattr(queue, '__call__'):
queue = queue()
return queue
@click.command()
@click.option(
'--path', '-p | ',
help='Import path. By default, this is the current working directory.')
@click.option(
'--pid',
help='Write the process ID to the specified file.')
@click.argument(
'queue',
nargs=1,
required=True)
def main(path, pid, queue):
"""
Standalone PSQ worker.
The queue argument must be the f | ull importable path to a psq.Queue
instance.
Example usage:
psqworker config.q
psqworker --path /opt/app queues.fast
"""
setup_logging()
if pid:
with open(os.path.expanduser(pid), "w") as f:
f.write(str(os.getpid()))
if not path:
path = os.getcwd()
sys.path.insert(0, path)
queue = import_queue(queue)
import psq
worker = psq.Worker(queue=queue)
worker.listen()
if __name__ == '__main__':
main()
|
viswimmer1/PythonGenerator | data/python_files/33855741/web_reserver-bak.py | Python | gpl-2.0 | 1,001 | 0.026973 | import web
import json
import datetime
import time
import uuid
#from mimerender import mimerender
#import mimerender
from onsa_jeroen import *
render_xml = lambda result: "<result>%s</result>"%result
render_json = lambda **result: json.dumps(result,sort_keys=True,indent=4)
render_html = lambda result: "<html><body>%s</body></html>"%result
render_txt = lambda result: result
def syncmyCall(func):
global result
result=None
def sync_func(*args, **kwargs):
global result
d=defer.maybeDeferred(func, *args, **kwargs)
while 1:
reactor.doSelect(1)
print result
time. | sleep(1)
#return result
return sync_func
@syncmyCall
@defer.inlineCallbacks
def query (nsa):
global result
client,client_nsa = createClient()
nsa = getNSA(nsa)
qr = yield client.query(client_nsa, nsa, None, "Summary", connection_ids = [] )
#result = qr
result = "blaaa"
print query("uva4 | k")
#if __name__ == "__main__":
|
named-data/ndn-atmos | lib/ndn_cmmap_translators/atmos2ndn_parser/conf_file_parser.py | Python | gpl-3.0 | 4,328 | 0.008549 | #!/usr/bin/env python3
# -*- Mode:python; c-file-style:"gnu"; indent-tabs-mode:nil -*- */
#
# Copyright (c) 2015, Colorado State University.
#
# This file is part of ndn-atmos.
#
# ndn-atmos is free software: you can redistribute it and/or modify it under the
# terms of the GNU Lesser General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later version.
#
# ndn-atmos is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
# PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details.
#
# You should have received copies of the GNU General Public License and GNU Lesser
# General Public License along with ndn-atmos, e.g., in COPYING.md file. If not, see
# <http://www.gnu.org/licenses/>.
#
# See AUTHORS.md for complete list of ndn-atmos authors and contributors.
'''This is the config file parser module.
Input = object with command line parameters.
Output = list of components for different config sections'''
import configparser
import sys, traceback
class ParseConf(object):
'''parses the name schema file and returns name mappings for translated output'''
def __init__(self, confName):
self.confName = confName
if __debug__:
print("Config file name: %s" %(self.confName))
self.filenameMap = []
self.ndnNameMap = []
self.seperatorsMap = []
self.userDefinedConfDir = {}
self.translator = []
#initialize the parser
self.parser = configparser.SafeConfigParser()
self.parser.optionxform=str
self.parser.read(self.confName)
self.fullConf = {}
#do the mapping
res = self.getMappings(confName)
if res is False:
print("Error getting values from config file")
raise error.with_traceback(sys.exc_info()[2])
def _parseConf(self):
#iterate over them and store the name components in fullConf
try:
for sectionName in self.parser.sections():
self.conf = {}
for name, value in self.parser.items(sectionName):
self.conf[name] = value
self.fullConf[sectionName] = self.conf
if __debug__:
print(self.fullConf)
except KeyError:
print("Key %s is not found in config file" %(name))
print(sys.exc_info()[2])
except TypeError:
print("TypeError while parsing config file")
print(sys.exc_info()[2])
return self.fullConf
def _doParsing(self):
#parser now contain a dictionary with the sections in conf
# first elements are section and second ones are variables defined in config file
try:
self.filenameMap = self.fullConf['Name']['filenameMapping'].replace(" ", "").split(',')
self.ndnNameMap = self.fullConf['Name']['ndnMapping'].replace(" ", "").split(',')
# user defined components look like this
#activity:cmip5, subactivity:atmos, organization:csu, ensemble:r3i1p1
userDefinedConf = self.fullConf['Name']['userDefinedComps'].replace(" ", "").split(',')
for item in userDefinedConf:
key, value = item.split(":")
self.userDefinedConfDir[key] = [value]
self.seperatorsMap = self.fullConf['Name']['se | perators'].replace(" ", "").split(',')
#reads which translator to use
self.translator = self.fullConf['Translator']['translator'].replace(" ", "")
except KeyError:
print("Key %s is not found in config file" %(name))
print(sys.exc_info()[2])
except TypeError:
print("TypeError while parsing config file")
| print(sys.exc_info()[2])
def getMappings(self, confName):
'''parses the schema file and provides name mappings'''
fullConf = self._parseConf()
#if dict is not empty
if fullConf:
res = self._doParsing()
if len(self.filenameMap) == 0 or len(self.ndnNameMap) == 0 or len(self.translator) == 0:
return False
else:
return True
else:
return False
|
petrutlucian94/nova_dev | nova/api/openstack/compute/contrib/user_data.py | Python | apache-2.0 | 949 | 0 | # Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.api.openstack import | extensions
class User_data(extensions.ExtensionDescriptor):
"""Add user_data to the Create Server v1.1 API."""
name = "UserData"
alias = "os-user-data"
namespace = ("http://docs. | openstack.org/compute/ext/"
"userdata/api/v1.1")
updated = "2012-08-07T00:00:00+00:00"
|
2deviant/Mathematica-Trees | converters.py | Python | mit | 673 | 0.004458 | """
Code converter module for trees.py
"""
def _mathematica_line_segments(tree):
"""
Produce Mathematica graphics obje | ct elements.
"""
for branch in tree:
[depth, [[x0, y0], [x1, y1]]] = branch
yield '{{Thickness[{}/300.], Line[{{{{{},{}}},{{{},{}}}}}]}}'.format(
depth, x0, y0, x1, y1
)
def to_mathematica(tree):
"""
Produce Mathematica code to draw the tree.
"""
segments = [segment for segment in _mathematica_line_segments(tree)]
co | de = 'tree = {{\n{}\n}};\n\nShow[Graphics[tree], AspectRatio -> 1, PlotRange -> All]\n'.format(
',\n'.join(segments)
)
return code
|
googlevr/tilt-brush | Support/bin/hack_tilt.py | Python | apache-2.0 | 1,720 | 0.011047 | #!/usr/bin/env python
# Copyright 2020 The Tilt Brush Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# | http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
try:
from tiltbrush.tilt import Tilt
except ImportError:
print "You need the Tilt Brush Toolkit (https://github.com/goo | glevr/tilt-brush-toolkit)"
print "and then put its Python directory in your PYTHONPATH."
sys.exit(1)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--set-min-y', dest='desired_min_y', type=float,
default=None,
help='Move sketch up/down to match the passed y value')
parser.add_argument('files', nargs='+')
args = parser.parse_args()
for filename in args.files:
tilt = Tilt(filename)
sketch = tilt.sketch
print '=== %s ===' % filename
if args.desired_min_y is not None:
min_y = min(cp.position[1]
for stroke in sketch.strokes
for cp in stroke.controlpoints)
delta = args.desired_min_y - min_y
for stroke in sketch.strokes:
for cp in stroke.controlpoints:
cp.position[1] += delta
print filename
print 'Moved by %.3f' % delta
tilt.write_sketch()
if __name__ == '__main__':
main()
|
sullivanmatt/splunk-sdk-python | tests/test_storage_passwords.py | Python | apache-2.0 | 9,282 | 0.000431 | #!/usr/bin/env python
#
# Copyright 2011-2015 Splunk, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"): you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import testlib
import logging
import splunklib.client as client
class Tests(testlib.SDKTestCase):
def setUp(self):
self.service = client.connect(**self.opts.kwargs)
self.storage_passwords = self.service.storage_passwords
def tearDown(self):
# Delete all passwords created by SDK tests
for sp in self.storage_passwords:
if "delete-me" in sp.username or "delete-me" in sp.realm:
sp.delete()
def test_create(self):
start_count = len(self.storage_passwords)
realm = testlib.tmpname()
username = testlib.tmpname()
p = self.storage_passwords.create("changeme", username, realm)
self.assertEqual(start_count + 1, len(self.storage_passwords))
self.assertEqual(p.realm, realm)
self.assertEqual(p.username, username)
self.assertEqual(p.clear_password, "changeme")
self.assertEqual(p.name, realm + ":" + username + ":")
p.delete()
self.assertEqual(start_count, len(self.storage_passwords))
def test_create_with_backslashes(self):
start_count = len(self.storage_passwords)
realm = "\\" + testlib.tmpname()
username = "\\" + testlib.tmpname()
# Prepends one escaped slash
p = self.storage_passwords.create("changeme", username, realm)
self.assertEqual(start_count + 1, len(self.storage_passwords))
self.assertEqual(p.realm, realm)
# Prepends one escaped slash
self.assertEqual(p.username, username)
self.assertEqual(p.clear_password, "changeme")
# Checks for 2 escaped slashes (Splunk encodes the single slash)
self.assertEqual(p.name, "\\" + realm + ":\\" + username + ":")
p.delete()
self.assertEqual(start_count, len(self.storage_passwords))
def test_create_with_slashes(self):
start_count = len(self.storage_passwords)
realm = "/" + testlib.tmpname()
username = "/" + testlib.tmpname()
# Prepends one escaped slash
p = self.storage_passwords.create("changeme", username, realm)
self.assertEqual(start_count + 1, len(self.storage_passwords))
self.assertEqual(p.realm, realm)
# Prepends one escaped slash
self.assertEqual(p.username, username)
self.assertEqual(p.clear_password, "changeme")
# Checks for 2 escaped slashes (Splunk encodes the single slash)
self.assertEqual(p.name, realm + ":" + username + ":")
p.delete()
self.assertEqual(start_count, len(self.storage_passwords))
def test_create_norealm(self):
start_count = len(self.storage_passwords)
username = testlib.tmpname()
p = self.storage_passwords.create("changeme", username)
self.assertEqual(start_count + 1, len(self.storage_passwords))
self.assertEqual(p.realm, None)
self.assertEqual(p.username, username)
self.assertEqual(p.clear_password, "changeme")
self.assertEqual(p.name, ":" + username + ":")
p.delete()
self.assertEqual(start_count, len(self.storage_passwords))
def test_create_with_colons(self):
start_count = len(self.storage_passwords)
username = testlib.tmpname()
realm = testlib.tmpname()
p = self.storage_passwords.create("changeme", username + ":end",
":start" + realm)
self.assertEqual(start_count + 1, len(self.storage_passwords))
self.assertEqual(p.realm, ":start" + realm)
self.assertEqual(p.username, username + ":end")
self.assertEqual(p.clear_password, "changeme")
self.assertEqual(p.name,
"\\:start" + realm + ":" + username + "\\:end:")
p.delete()
self.assertEqual(start_count, len(self.storage_passwords))
prefix = testlib.tmpname()
realm = prefix + ":r:e:a:l:m:"
user = ":u:s:e:r:"
p = self.storage_passwords.create("changeme", user, realm)
self.assertEqual(start_count + 1, len(self.storage_passwords))
self.assertEqual(p.realm, realm)
self.assertEqual(p.username, user)
self.assertEqual(p.clear_password, "changeme")
self.assertEqual(p.name,
prefix + "\\:r\\:e\\:a\\:l\\:m\\::\\:u\\:s\\:e\\:r\\::")
p.delete()
self.assertEqual(start_count, len(self.storage_passwords))
def test_create_crazy(self):
start_count = len(self.storage_passwords)
username = testlib.tmpname()
realm = testlib.tmpname()
p = self.storage_passwords.create("changeme",
username + ":end!@#$%^&*()_+{}:|<>?",
":start::!@#$%^&*()_+{}:|<>?" + realm)
self.assertEqual(start_count + 1, len(self.storage_passwords))
self.assertEqual(p.realm, ":start::!@#$%^&*()_+{}:|<>?" + realm)
self.assertEqual(p.username, username + ":end!@#$%^&*()_+{}:|<>?")
self.assertEqual(p.clear_password, "changeme")
self.assertEqual(p.name,
"\\:start\\:\\:!@#$%^&*()_+{}\\:|<>?" + realm + ":" + username + "\\:end!@#$%^&*()_+{}\\:|<>?:")
p.delete()
self.assertEqual(start_count, len(self.storage_passwords))
def test_read(self):
start_count = len(self.storage_passwords)
username = testlib.tmpname()
p = self.storage_passwords.create("changeme", username)
self.assertEqual(start_count + 1, len(self.storage_passwords))
for sp in self.storage_passwords:
self.assert | True(p.name in self.storage_passwords)
# Name works with or without a trailing colon
self.assertTrue((":" + username + ":") in self.storage_passwords)
self.as | sertTrue((":" + username) in self.storage_passwords)
p.delete()
self.assertEqual(start_count, len(self.storage_passwords))
def test_update(self):
start_count = len(self.storage_passwords)
realm = testlib.tmpname()
username = testlib.tmpname()
p = self.storage_passwords.create("changeme", username, realm)
self.assertEqual(start_count + 1, len(self.storage_passwords))
self.assertEqual(p.realm, realm)
self.assertEqual(p.username, username)
self.assertEqual(p.clear_password, "changeme")
self.assertEqual(p.name, realm + ":" + username + ":")
p.update(password="Splunkeroo!")
self.assertEqual(p.clear_password, "changeme")
p.refresh()
self.assertEqual(start_count + 1, len(self.storage_passwords))
self.assertEqual(p.realm, realm)
self.assertEqual(p.username, username)
self.assertEqual(p.clear_password, "Splunkeroo!")
self.assertEqual(p.name, realm + ":" + username + ":")
p.delete()
self.assertEqual(start_count, len(self.storage_passwords))
def test_delete(self):
start_count = len(self.storage_passwords)
username = testlib.tmpname()
p = self.storage_passwords.create("changeme", username, "myrealm")
self.assertEqual(start_count + 1, len(self.storage_passwords))
self.assertEqual(p.realm, "myrealm")
self.assertEqual(p.username, username)
self.assertEqual(p.clear_password, "changeme")
self.assertEqual(p.name, "myrealm:" + username + ":")
self.storage_passwords.delete(username, "myrealm")
self.assertEqual(start_count, len(self.storage_passwords))
self.storage_passwords.cre |
rrajath/PyFileSearch | src/FileSearch.py | Python | mit | 3,158 | 0.007916 | '''
Created on Sep 16, 2013
@author: rajath
'''
import os, fnmatch
# This function takes rootpath and pattern as argument and returns a 'list' of filenames that match the pattern
# For example, if you search for all the *.py files in your workspace, then it returns a list of strings which
# are full paths to these files
def rec_dir_list(rootpath, pattern):
files = []
for root, dirs, files in os.walk(rootpath):
print type(dirs)
for filename in fnmatch.filter(files, pattern):
files.append(os.path.join(root, filename))
return files
#end
# This function takes rootpath as the argument and returns a 'list' of all files along with their full paths.
def list_all_files(rootpath):
all_files = []
for root, dirs, files in os.walk(rootpath):
for filename in files:
all_files.append(os.path.join(root, filename))
return all_files
#end
# This function takes rootpath as the parameter and creates a dictionary of filenames categorizing them by filetypes
def all_files_dict(rootpath):
dict_files = {}
for root, dirs, files in os.walk(rootpath):
for filename in files:
filetype = filename[filename.rfind('.'):]
file_path = os.path.join(root, filename)
| try:
tmp = dict_files[filetype]
dict_files[filetype] = tmp.append(file_path)
except | :
tmp = []
tmp.append(file_path)
dict_files[filetype] = tmp
return dict_files
#end
# This function takes rootpath as argument and returns a dictionary with directories as keys and its files as values.
def dirs_dict(rootpath):
dict_dirs = {}
for root, dirs, files in os.walk(rootpath):
for filename in files:
file_path = os.path.join(root, filename)
os.chdir(os.path.join(root, file_path[:file_path.rfind('/')]))
dir_path = os.getcwd()
try:
dict_dirs[dir_path].append(file_path)
except:
tmp = []
tmp.append(file_path)
dict_dirs[dir_path] = tmp
return dict_dirs
#end
# This function takes rootpath as the argument and returns a 'list' of all the directories with their full paths.
def list_all_dirs(rootpath):
all_dirs = []
for root, dirs, files in os.walk(rootpath):
for dirname in dirs:
all_dirs.append(os.path.join(root, dirname))
return all_dirs
#end
# This function takes rootpath and filename as arguments and returns the filepath of the filename specified in the argument
def search_file(rootpath, file):
for root, dirs, files in os.walk(rootpath):
for filename in files:
if file == filename:
return os.path.join(root, filename)
return None
#end
# This function takes rootpath and dirname as arguments and returns the dirpath of the dirname specified in the argument
def search_dir(rootpath, dir):
for root, dirs, files in os.walk(rootpath):
for dirname in dirs:
if dir == dirname:
return os.path.join(root, dirname)
return None
#end
|
vitorfs/bootcamp | bootcamp/messager/models.py | Python | mit | 3,617 | 0.000553 | import uuid
from django.conf import settings
from django.contrib.auth import get_user_model
from django.db import models
from django.utils.translation import ugettext_lazy as _
from django.db import transaction
from asgiref.sync import async_to_sync
from channels.layers import get_channel_layer
class MessageQuerySet(models.query.QuerySet):
"""Personalized queryset created to improve model usability."""
def get_conversation(self, sender, recipient):
"""Returns all the messages sent between two users."""
qs_one = self.filter(sender=sender, recipient=recipient)
qs_two = self.filter(sender=recipient, recipient=sender)
return qs_one.union(qs_two).order_by("timestamp")
def get_most_recent_conversation(self, recipient):
"""Returns the most recent conversation counterpart's username."""
try:
qs_sent = self.filter(sender=recipient)
qs_recieved = self.filter(recipient=recipient)
qs = qs_sent.union(qs_recieved).latest("timestamp")
if qs.sender == recipient:
return qs.recipient
return qs.sender
except self.model.DoesNotExist:
return get_user_model().objects.get(username=recipient.username)
def mark_conversation_as_read(self, sender, recipient):
"""Mark as read any unread elements in the current conversation."""
qs = self.filter(sender=sender, recipient=recipient)
return qs.update(unread=False)
class Message(models.Model):
"""A private message sent between users."""
uuid_id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
sender = models.ForeignKey(
settings.AUTH_USER_MODEL,
related_name="sent_messages",
verbose_name=_("Sender"),
null=True,
on_delete=models.SET_NULL,
)
recipient = models.ForeignKey(
settings.AUTH_USER_MODEL,
related_name="received_messages",
null=True,
blank=True,
verbose_name=_("Recipient"),
on_delete=models.SET_NULL,
)
timestamp = models.DateTimeField(auto_now_add=True)
message = models.TextField(max_length=1000, blank=True)
unread = models.BooleanField(default=True, db_index=True)
objects = MessageQuerySet.as_manager()
class Meta:
verbose_name = _("Message")
verbose_name_plural = _("Messages")
ordering = ("-timestamp",)
def __str__(self):
return self.message
def mark_as_read(self):
"""Method to mark a message as read."""
if self.unread:
| self.unread = False
self.save()
@staticmethod
def send_message(sender, recipient, message):
"""Method to create a new message in a conversation.
:requires:
:param sender: | User instance of the user sending the message.
:param recipient: User instance of the user to recieve the message.
:param message: Text piece shorter than 1000 characters containing the
actual message.
"""
new_message = Message.objects.create(
sender=sender, recipient=recipient, message=message
)
channel_layer = get_channel_layer()
payload = {
"type": "receive",
"key": "message",
"message_id": str(new_message.uuid_id),
"sender": str(sender),
"recipient": str(recipient),
}
transaction.on_commit(
lambda: async_to_sync(channel_layer.group_send)(recipient.username, payload)
)
return new_message
|
huiyiqun/check_mk | web/htdocs/availability.py | Python | gpl-2.0 | 81,172 | 0.010866 | #!/usr/bin/python
# -*- encoding: utf-8; py-indent-offset: 4 -*-
# +------------------------------------------------------------------+
# | ____ _ _ __ __ _ __ |
# | / ___| |__ ___ ___| | __ | \/ | |/ / |
# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / |
# | | |___| | | | __/ (__| < | | | | . \ |
# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ |
# | |
# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de |
# +------------------------------------------------------------------+
#
# This file is part of Check_MK.
# The official homepage is at http://mathias-kettner.de/check_mk.
#
# check_mk is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation in version 2. check_mk is distributed
# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with-
# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A
# PARTICULAR PURPOSE. See the GNU General Public License for more de-
# tails. You should have received a copy of the GNU General Public
# License along with GNU Make; | see the file COPYING. If not, write
# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
# Boston, MA 02110-1301 USA.
import utils
import bi, views, visuals
import sites
from valuespec import *
import cmk.defines as defines
import cmk.paths
import cmk.store as store
# .--Declarations--------------------------------------------------------.
# | ____ _ _ _ |
# | | | _ \ ___ ___| | __ _ _ __ __ _| |_(_) ___ _ __ ___ |
# | | | | |/ _ \/ __| |/ _` | '__/ _` | __| |/ _ \| '_ \/ __| |
# | | |_| | __/ (__| | (_| | | | (_| | |_| | (_) | | | \__ \ |
# | |____/ \___|\___|_|\__,_|_| \__,_|\__|_|\___/|_| |_|___/ |
# | |
# '----------------------------------------------------------------------'
host_availability_columns = [
( "up", "state0", _("UP"), None ),
( "down", "state2", _("DOWN"), None ),
( "unreach", "state3", _("UNREACH"), None ),
( "flapping", "flapping", _("Flapping"), None ),
( "in_downtime", "downtime", _("Downtime"), _("The host was in a scheduled downtime") ),
( "outof_notification_period", "", _("OO/Notif"), _("Out of Notification Period") ),
( "outof_service_period", "ooservice", _("OO/Service") , _("Out of Service Period") ),
( "unmonitored", "unmonitored", _("N/A"), _("During this time period no monitoring data is available") ),
]
service_availability_columns = [
( "ok", "state0", _("OK"), None ),
( "warn", "state1", _("WARN"), None ),
( "crit", "state2", _("CRIT"), None ),
( "unknown", "state3", _("UNKNOWN"), None ),
( "flapping", "flapping", _("Flapping"), None ),
( "host_down", "hostdown", _("H.Down"), _("The host was down") ),
( "in_downtime", "downtime", _("Downtime"), _("The host or service was in a scheduled downtime") ),
( "outof_notification_period", "", _("OO/Notif"), _("Out of Notification Period") ),
( "outof_service_period", "ooservice", _("OO/Service"), _("Out of Service Period") ),
( "unmonitored", "unmonitored", _("N/A"), _("During this time period no monitoring data is available") ),
]
bi_availability_columns = [
( "ok", "state0", _("OK"), None ),
( "warn", "state1", _("WARN"), None ),
( "crit", "state2", _("CRIT"), None ),
( "unknown", "state3", _("UNKNOWN"), None ),
( "in_downtime", "downtime", _("Downtime"), _("The aggregate was in a scheduled downtime") ),
( "unmonitored", "unmonitored", _("N/A"), _("During this time period no monitoring data is available") ),
]
availability_columns = {
"host" : host_availability_columns,
"service" : service_availability_columns,
"bi" : bi_availability_columns,
}
statistics_headers = {
"min" : _("Shortest"),
"max" : _("Longest"),
"avg" : _("Average"),
"cnt" : _("Count"),
}
#.
# .--Options-------------------------------------------------------------.
# | ___ _ _ |
# | / _ \ _ __ | |_(_) ___ _ __ ___ |
# | | | | | '_ \| __| |/ _ \| '_ \/ __| |
# | | |_| | |_) | |_| | (_) | | | \__ \ |
# | \___/| .__/ \__|_|\___/|_| |_|___/ |
# | |_| |
# +----------------------------------------------------------------------+
# | Handling of all options for tuning availability computation and |
# | display. |
# '----------------------------------------------------------------------'
# Options for availability computation and rendering. These are four-tuple
# with the columns:
# 1. variable name
# 2. show in single or double height box
# 3. use this in reporting
# 4. the valuespec
def get_avoption_entries(what):
if what == "bi":
grouping_choices = [
( None, _("Do not group") ),
( "host", _("By Aggregation Group") ),
]
else:
grouping_choices = [
( None, _("Do not group") ),
( "host", _("By Host") ),
( "host_groups", _("By Host group") ),
( "service_groups", _("By Service group") ),
]
def aligned_label(text):
return "<div style=\"width: 186px; display: inline-block;\">%s:</div>" % text
return [
# Time range selection
( "rangespec",
"double",
False,
Timerange(
title = _("Time Range"),
default_value = 'd0',
)),
# Labelling and Texts
( "labelling",
"double",
True,
ListChoice(
title = _("Labelling Options"),
choices = [
( "omit_headers", _("Do not display column headers")),
( "omit_host", _("Do not display the host name")),
( "show_alias", _("Display the host alias")),
( "use_display_name", _("Use alternative display name for services")),
( "omit_buttons", _("Do not display icons for history and timeline")),
( "display_timeline_legend", _("Display legend for timeline")),
( "omit_av_levels", _("Do not display legend for availability levels")),
]
)),
# How to deal with downtimes
( "downtimes",
"double",
True,
Dictionary(
title = _("Scheduled Downtimes"),
columns = 2,
elements = [
( "include",
DropdownChoice(
choices = [
( "honor", _("Honor scheduled downtimes") ),
( "ignore", _("Ignore scheduled downtimes") ),
( "exclude", _("Exclude scheduled downtimes" ) ),
],
|
yashpatel5400/synalyze | app/segment/settings.py | Python | mit | 420 | 0.009524 | """
__authors__ = Yash, Will, Peter
__description__ = Global variables for the Python files (largely for
doing organization) for segmentation
"""
# ------------------------------ Directory variables -----------------------------
INPUT_DIR = "a | pp/segment/audio"
OUTPUT_DIR = "app/segment | /output"
# ------------------------------ Ruby diarizer ---=-----------------------------
DIARIZER = "app/segment/segment.rb"
|
fnp/redakcja | src/documents/templatetags/common_tags.py | Python | agpl-3.0 | 336 | 0.00597 | # This file is part of FNP-Redakcja, licensed under GNU Affero GPLv3 or later.
# Copyright © Fundacja Nowoczesna Polska. See NOTICE for more information.
#
from django impor | t template
register = template.Library()
@register.filter
def username(user):
return (" | %s %s" % (user.first_name, user.last_name)).lstrip() or user.username
|
andrewdotn/vmreflect | vmreflect/tests/test_end_to_end.py | Python | bsd-2-clause | 5,318 | 0.002445 | # coding: UTF-8
"""
Full end-to-end test: start a server, tunnel, and connect from the VM
"""
import argparse
import SocketServer
import contextlib
import os
import pkg_resources
import socket
import string
import tempfile
import threading
import time
import unittest
from path import path
from vmreflect import Tunnel
from vmreflect.tests import test_config
from vmreflect.utils import get_random_string
from vmreflect.vmapi import VM
class DataReversingTCPRequestHandler(SocketServer.BaseRequestHandler):
def handle(self):
data = self.request.recv(1024)
response = ''.join(reversed(data))
self.request.sendall(response)
class ThreadedTCPServer(SocketServer.ThreadingMixIn, SocketServer.TCPServer):
allow_reuse_address = True
def client(ip, port, message):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((ip, port))
try:
sock.sendall(message)
response = sock.recv(1024)
return response
finally:
sock.close()
class Server(object):
"""
A simple TCP server that echoes whatever is sent to it, in reverse.
"""
def __init__(self, verbose=False, port=0):
# Port 0 means to select an arbitrary unused port
HOST, PORT = "localhost", port
self.server = ThreadedTCPServer((HOST, PORT),
DataReversingTCPRequestHandler)
self.ip, self.port = self.server.server_address
# Start a thread with the server -- that thread will then start one
# more thread for each request
server_thread = threading.Thread(target=self.server.serve_forever)
# Exit the server thread when the main thread terminates
server_thread.daemon = True
server_thread.start()
if verbose:
print "Server running at %s:%s" % (self.ip, self.port)
def close(self):
self.server.shutdown()
class TestEndToEnd(unittest.TestCase):
"""
This is the real test of the package: start a server on the host
on a random port, set up the tunnel, then connect to the server
from the guest and verify that it works.
"""
def test_reversing_server(self):
random_string = get_random_string()
with contextlib.closing(Server()) as server:
result = client(server.ip, server.port, random_string)
self.assertEquals(result, ''.join(reversed(random_string)))
def test_end_to_end(self):
random_string = get_random_string()
with contextlib.closing(Server()) as server:
# Create a tunnel and wait for it to start.
tunnel = Tunnel(port=server.port,
vm_name=test_config.vm_name,
username=test_config.vm_username,
password=test_config.vm_password)
started = threading.Event()
done = threading.Event()
def tunnel_thread():
tunnel.start(started_event=started, done_event=done)
tunnel_thread = threading.Thread(target=tunnel_thread)
tunnel_thread.start()
started.wait()
try:
tunnel_vmapi = VM(vm_name=test_config.vm_name,
username=test_config.vm_username,
password=test_config.vm_password)
# get temp dir
tmpdir, _ = tunnel_vmapi._run_command('echo %TEMP%')
tmpdir = tmpdir.strip()
# copy socketclient.exe to temporary filename
target_filename = (tmpdir + r'\socketclient-' +
get_random_string() + '.exe')
fd, local_temp = tempfile.mkstemp(prefix='vmreflect')
os.close(fd)
try:
socketclient_bin = pkg_resources.resource_stream(
'vmreflect', 'lib-win32/socketclient/socketclient.exe')
with open(local_temp, 'wb') as out:
out.write(socketclient_bin.read())
os.chmod(local_temp, 0700)
tunnel_vmapi.copy_file_to_guest(
local_temp, target_filename)
try:
output, _ = tunnel_vmapi._run_command(
'%s %s %d %s' % (target_filename,
'localhost', server.port,
repr(random_string)))
finally:
tunnel_vmapi.delete_file(target_filename)
self.assertIn(''.join(reversed(random_string)),
output)
finally:
os.unlink(local_temp)
finally:
done.set()
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--keep-alive', action='store_tr | ue')
parser.add_argument('--port', type=int, default=0)
args = parser.parse_args()
server = Server(verbose=True, port=args.port)
print client(server.ip, server.port, "Hello World 1")
try:
if args.keep_alive:
| while 1:
time.sleep(60)
finally:
server.close()
if __name__ == '__main__':
main()
|
MarcoBuster/OrarioTreniBot | src/updates/global_messages.py | Python | mit | 3,165 | 0.001896 | # Copyright (c) 2016-2017 The OrarioTreniBot Authors (see AUTHORS)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import json
import time
import botogram
import progressbar
import redis
import config
r = redis.StrictRedis(host=config.REDIS_HOST, port=config.REDIS_PORT, db=config.REDIS_DB, password=config.REDIS_PASSWORD)
bot = botogram.create(config.BOT_TOKEN)
def post(text, parse_mode="HTML", reply_markup=None, disable_web_page_preview=True, message=None):
users = []
for user in r.keys("user:*"):
users.append(int(user[5:]))
print("Sending global message...")
print("> Text", text, sep=": ")
print("> Reply Markup", reply_markup, sep=": ")
print("> Parse mode", parse_mode, sep=": ")
print("> Disable web page preview", disable_web_page_preview, sep=": ")
bar = progressbar.ProgressBar()
for user in bar(users):
if message:
message.edit(
"<b>Sending global message...</b>"
"\n<b>{value}/{max_value}</b> ({percentage}%)"
.format(value=bar.value, max_value=bar.max_value, percentage=round(bar.percentage, 1))
)
time.sleep(0.1)
user_hash = "user:" + str(user)
try:
bot.chat(user)
except botogram.APIError:
r.hset(user_hash, "active", False)
continue
try:
if r.hget(user_hash, "active").decode("utf-8") == "False":
continue
bot.api.call("sendMessage", {
"chat_id": user, "text": text, "parse_mode": parse | _mode,
"disable_web_page_preview": disable_web_page_preview,
"reply_markup": json.dumps(reply_markup) if reply_markup else ""
})
except botogram.APIError:
r.hset(user_hash, "active", False)
| finally:
time.sleep(0.5)
if message:
message.edit(
"<b>Sending global message...</b>"
"\n<b>{value}/{max_value}</b> ({percentage}%)"
.format(value=bar.value, max_value=bar.max_value, percentage=round(bar.percentage, 1))
)
time.sleep(0.1)
|
Baloc/TouSIX-Manager | tousix_manager/Rules_Generation/Statistics/manager.py | Python | gpl-3.0 | 3,475 | 0.000576 | # Copyright 2015 Rémy Lapeyrade <remy at lapeyrade dot net>
# Copyright 2015 LAAS-CNRS
#
#
# This file is part of TouSIX-Manager.
#
# TouSIX-Manager is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# TouSIX-Manager is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with TouSIX-Manager. If not, see <http://www.gnu.org/licenses/>.
from tousix_manager.Rules_Generation.Statistics.icmpv6 import ICMPv6
from tousix_manager.Rules_Generation.Statistics.ipv4 import IPv4
from tousix_manager.Rules_Generation.Statistics.ipv6 import IPv6
from django.conf import settings
from tousix_manager.Rules_Generation.Statistics.arp import ARP
class Manager(object):
"""
Manager class for creating dataflow rules.
"""
def create_rules_members(self, dpid, peers):
"""
Create Statistics_Manager rules.
:param dpid: Target DPID
:type dpid: int
:param peers: Peer object array
:type peers: list(Peer)
:return: Flow rules array
"""
rules = []
enabled = settings.RULES_GENERATION_ENABLED
ipv4 = IPv4()
ipv6 = IPv6()
icmpv6 = ICMPv6()
arp = ARP()
for peer_dst in peers:
if peer_dst.Egress is True:
if enabled["Stats"].get('ICMPv6') is True:
rule = {"module": "Statistics_ICMPv6",
"rule": icmpv6.create_stat(dpid, None, peer_dst),
"source": None,
"destination": peer_dst.idPeer}
rules.append(rule)
if enabled["Stats"].get('ARP') is True:
rule = {"module": "Statistics_ARP",
"rule": arp.create_stat(dpid, None, peer_dst),
"source": None,
"de | stination": peer_dst.idPeer}
rules.append(rule)
for peer_src in peers:
if peer_src != peer_dst:
if enabled["Stats"].get('IPv6') is True:
| rule = {"module": "Statistics_IPv6",
"rule": ipv6.create_stat(dpid, peer_src, peer_dst),
"source": peer_src.idPeer,
"destination": peer_dst.idPeer}
rules.append(rule)
if enabled["Stats"].get('IPv4') is True:
rule = {"module": "Statistics_IPv4",
"rule": ipv4.create_stat(dpid, peer_src, peer_dst),
"source": peer_src.idPeer,
"destination": peer_dst.idPeer}
rules.append(rule)
rule = {
"module": "Miss-table",
"rule": ipv4.create_miss_table(dpid),
"source": None,
"destination": None
}
rules.append(rule)
return rules
|
CMUSV-VisTrails/WorkflowRecommendation | examples/vtk_examples/Rendering/TPlane.py | Python | bsd-3-clause | 1,344 | 0 | #!/usr/bin/env python
# This simple example shows how to do basic texture mapping.
import vtk
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
# Load in the texture map. A texture is any unsigned char image. If it
# is not of this type, you will have to map it through a lookup table
# or by using vtkImageShiftScale.
bmpReader = vtk.vtkBMPReader()
bmpReader.SetFileName(VTK_DATA_ROOT + "/Data/masonry.bmp")
atext = vtk.vtkTexture()
atext.SetInputConnection(bmpReader.GetOutputPort())
atext.InterpolateOn()
# Create a plane source and actor. The vtkPlanesSource generates
# texture coordinates.
plane = vtk.vtkPlaneSource()
planeMapper = vtk.vtkPolyDataMapper()
planeMapper.SetInputConnection(plane.GetOutputPort())
planeActor = vtk.vtkActor()
planeActor.SetMapper(planeMapper)
planeActor.SetTexture(atext)
# Create the RenderWindow, Renderer and both Actors
ren = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
# Add the actors to th | e renderer, set the background and size
ren.AddActor(planeActor)
ren.SetBackground(0.1, 0.2, 0.4)
renWin.SetSize(500, 500)
ren.ResetCamera()
cam1 = ren.GetActiveCamera()
cam1.Elevation(-30)
cam1.Roll(-20)
ren.ResetCameraClippingRange()
| iren.Initialize()
renWin.Render()
iren.Start()
|
apark263/tensorflow | tensorflow/contrib/tensorrt/test/memory_alignment_test.py | Python | apache-2.0 | 2,982 | 0.003018 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Model script to test TF-TensorRT integration."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contri | b.tensorrt.test import tf_trt_integration_test_base as trt_test
from tensorflow.python.framework | import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import nn
from tensorflow.python.platform import test
class MemoryAlignmentTest(trt_test.TfTrtIntegrationTestBase):
def GetParams(self):
"""Testing conversion of BatchMatMul in TF-TRT conversion."""
dtype = dtypes.float32
input_name = "input"
input_dims = [2, 15, 15, 3]
output_name = "output"
g = ops.Graph()
with g.as_default():
inp = array_ops.placeholder(
dtype=dtype, shape=[None] + input_dims[1:], name=input_name)
with g.device("/GPU:0"):
e1 = constant_op.constant(
np.random.randn(1, 1, 3, 5), name="kernel_1", dtype=dtype)
e2 = constant_op.constant(
np.random.randn(1, 1, 5, 10), name="kernel_2", dtype=dtype)
conv = nn.conv2d(
input=inp,
filter=e1,
strides=[1, 1, 1, 1],
padding="VALID",
name="conv")
out = nn.conv2d(
input=conv,
filter=e2,
strides=[1, 1, 1, 1],
padding="VALID",
name="conv_2")
array_ops.squeeze(out, name=output_name)
return trt_test.TfTrtIntegrationTestParams(
gdef=g.as_graph_def(),
input_names=[input_name],
input_dims=[[input_dims]],
output_names=[output_name],
expected_output_dims=[[[2, 15, 15, 10]]])
def ExpectedEnginesToBuild(self, run_params):
"""Return the expected engines to build."""
return ["TRTEngineOp_0"]
def ExpectedAbsoluteTolerance(self, run_params):
"""The absolute tolerance to compare floating point results."""
return 1.e-06 if run_params.precision_mode == "FP32" else 1.e-02
def ExpectedRelativeTolerance(self, run_params):
"""The relative tolerance to compare floating point results."""
return 0.1
if __name__ == "__main__":
test.main()
|
chemelnucfin/tensorflow | tensorflow/python/keras/layers/local_test.py | Python | apache-2.0 | 17,770 | 0.009285 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for locally-connected layers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from absl.testing import parameterized
from tensorflow.python import keras
from tensorflow.python.framework import test_util as tf_test_util
from tensorflow.python.keras import testing_utils
from tensorflow.python.platform import test
from tensorflow.python.training.rmsprop import RMSPropOptimizer
_DATA_FORMAT_PADDING_IMPLEMENTATION = [{
'data_format': 'channels_first',
'padding': 'valid',
'implementation': 1
}, {
'data_format': 'channels_first',
'padding': 'same',
'implementation': 1
}, {
'data_format': 'channels_last',
'padding': 'valid',
'implementation': 1
}, {
'data_format': 'channels_last',
'padding': 'same',
'implementation': 1
}, {
'data_format': 'channels_first',
'padding': 'valid',
'implementation': 2
}, {
'data_format': 'channels_first',
'padding': 'same',
'implementation': 2
}, {
'data_format': 'channels_last',
'padding': 'valid',
'implementation': 2
}, {
'data_format': 'channels_last',
'padding': 'same',
'implementation': 2
}, {
'data_format': 'channels_first',
'padding': 'valid',
'implementation': 3
}, {
'data_format': 'channels_first',
'padding': 'same',
'implementation': 3
}, {
'data_format': 'channels_last',
'padding': 'valid',
'implementation': 3
}, {
'data_format': 'channels_last',
'padding': 'same',
'implementation': 3
}]
@tf_test_util.run_all_in_graph_and_eager_modes
class LocallyConnected1DLayersTest(test.TestCase, parameterized.TestCase):
@parameterized.parameters(_DATA_FORMAT_PADDING_IMPLEMENTATION)
def test_locallyconnected_1d(self, data_format, padding, implementation):
with self.cached_session():
num_samples = 2
num_steps = 8
input_dim = 5
filter_length = 3
filters = 4
for strides in [1]:
if padding == 'same' and strides != 1:
continue
kwargs = {
'filters': filters,
'kernel_size': filter_length,
'padding': padding,
'strides': strides,
'data_format': data_format,
'implementation': implementation
}
if padding == 'same' and implementation == 1:
self.assertRaises(ValueError, keras.layers.LocallyConnected1D,
**kwargs)
else:
testing_utils.layer_test(
keras.layers.LocallyConnected1D,
kwargs=kwargs,
input_shape=(num_samples, num_steps, input_dim))
@parameterized.parameters(_DATA_FORMAT_PADDING_IMPLEMENTATION)
def test_locallyconnected_1d_regularization(self, data_format, padding,
implementation):
num_samples = 2
num_steps = 8
input_dim = 5
filter_length = 3
filters = 4
kwargs = {
'filters': filters,
'kernel_size': filter_length,
'kernel_regularizer': 'l2',
'bias_regularizer': 'l2',
'activity_regularizer': 'l2',
'data_format': data_format,
'implementation': implementation,
'padding': padding
}
if padding == 'same' and implementation == 1:
self.assertRaises(ValueError, keras.layers.LocallyConnected1D, **kwargs)
else:
with self.cached_session():
layer = keras.layers.LocallyConnected1D(**kwargs)
layer.build((num_samples, num_steps, input_dim))
self.assertEqual(len(layer.losses), 2)
layer(
keras.backend.variable(
np.ones((num_samples, num_steps, input_dim))))
self.assertEqual(len(layer.losses), 3)
k_constraint = keras.constraints.max_norm(0.01)
b_constraint = keras.constraints.max_norm(0.01)
kwargs = {
'filters': filters,
'kernel_size': filter_length,
'kernel_constraint': k_constraint,
'bias_constraint': b_constraint,
}
with self.cached_session():
layer = keras.layers.LocallyConnected1D(**kwargs)
layer.build((num_samples, num_steps, input_dim))
self.assertEqual(layer.kernel.constraint, k_constraint)
self.assertEqual(layer.bias.constraint, b_constraint)
@tf_test_util.run_all_in_graph_and_eager_modes
class LocallyConnected2DLayersTest(test.TestCase, parameterized.TestCase):
@parameterized.parameters(_DATA_FORMAT_PADDING_IMPLEMENTATION)
def test_locallyconnected_2d(self, data_format, padding, implementation):
with self.cached_session():
num_samples = 8
filters = 3
stack_size = 4
num_row = 6
num_col = 10
for strides in [(1, 1), (2, 2)]:
if padding == 'same' and strides != (1, 1):
continue
kwargs = {
'filters': filters,
'kernel_size': 3,
'padding': padding,
'kernel_regularizer': 'l2',
'bias_regulari | zer': 'l2',
'strides': strides,
'data_format': data_format,
'implementation': implementation
}
if padding == 'same' and implementation == 1:
self.assertRaises(ValueError, keras.layers.LocallyConnected2D,
**kwargs)
else:
testing_utils.layer_test(
keras.layers.LocallyConnected2D,
kwargs=kwargs,
input_shape=(nu | m_samples, num_row, num_col, stack_size))
@parameterized.parameters(_DATA_FORMAT_PADDING_IMPLEMENTATION)
def test_locallyconnected_2d_channels_first(self, data_format, padding,
implementation):
with self.cached_session():
num_samples = 8
filters = 3
stack_size = 4
num_row = 6
num_col = 10
kwargs = {
'filters': filters,
'kernel_size': 3,
'data_format': data_format,
'implementation': implementation,
'padding': padding
}
if padding == 'same' and implementation == 1:
self.assertRaises(ValueError, keras.layers.LocallyConnected2D, **kwargs)
else:
testing_utils.layer_test(
keras.layers.LocallyConnected2D,
kwargs=kwargs,
input_shape=(num_samples, num_row, num_col, stack_size))
@parameterized.parameters(_DATA_FORMAT_PADDING_IMPLEMENTATION)
def test_locallyconnected_2d_regularization(self, data_format, padding,
implementation):
num_samples = 2
filters = 3
stack_size = 4
num_row = 6
num_col = 7
kwargs = {
'filters': filters,
'kernel_size': 3,
'kernel_regularizer': 'l2',
'bias_regularizer': 'l2',
'activity_regularizer': 'l2',
'implementation': implementation,
'padding': padding,
'data_format': data_format
}
if padding == 'same' and implementation == 1:
self.assertRaises(ValueError, keras.layers.LocallyConnected2D, **kwargs)
else:
with self.cached_session():
layer = keras.layers.LocallyConnected2D(**kwargs)
layer.build((num_samples, num_row, num_col, stack_size))
self.assertEqual(len(layer.losses), 2)
layer(
keras.backend.variable(
np.ones((num_samples, num_row, num_col, stack_size))))
self.assertEqual(len(layer.losses), 3)
k_constrain |
dadadel/pyment | tests/docs_already_javadoc.py | Python | gpl-3.0 | 329 | 0.00304 | def func | 1(param1):
"""Function 1
with 1 param
@param param1: 1st parameter
@type param1: type
@return: None
"""
return None
def func2(param1, param2):
"""Function 2
with 2 params
@p | aram param1: 1st parameter
@type param1: type
@param param2: 2nd parameter
"""
pass
|
jcfr/mystic | examples2/g01_alt.py | Python | bsd-3-clause | 2,107 | 0.011391 | #!/usr/bin/env python
#
# Problem definition:
# A-R Hedar and M Fukushima, "Derivative-Free Filter Simulated Annealing
# Method for Constrained Continuous Global Optimization", Journal of
# Global Optimization, 35(4), 521-549 (2006).
#
# Original Matlab code written by A. Hedar (Nov. 23, 2005)
# http://www-optima.amp.i.kyoto-u.ac.jp/member/student/hedar/Hedar_files/go.htm
# and ported to Python by Mike McKerns (December 2014)
#
# Author: Mike McKerns (mmckerns @caltech and @uqfoundation)
# Copyright (c) 1997-2015 California Institute of Technology.
# License: 3-clause BSD. The full license text is available at:
# - http://trac.mystic.cacr.caltech.edu/project/mystic/browser/mystic/LICENSE
from g01 import objective, bounds, xs, ys
from mystic.constraints import as_constraint
from mystic.penalty import quadratic_inequality
def penalty1(x): # <= 0.0
return 2*x[0] + 2*x[1] + x[9] + x[10] - 10.0
def penalty2(x): # <= 0.0
return 2*x[0] + 2*x[2] + x[9] + x[11] - 10.0
def penalty3(x): # <= 0.0
return 2*x[1] + 2*x[2] + x[10] + x[11] - 10.0
def penalty4(x): # <= 0.0
return -8*x[0] + x[9]
def penalty5(x): # <= 0.0
return -8*x[1] + x[10]
def penalty6(x): # <= 0.0
return -8*x[2] + x[11]
def penalty7(x): # <= 0.0
retu | rn -2*x[3] - x[4] + x[9]
def penalty8(x): # <= 0.0
return -2*x[5] - x[6] + x[10]
def penalty9(x): # <= 0.0
return -2*x[7] - x[8] + x[11]
@quadratic_inequality(penalty1)
@quadratic_inequality(penalty2)
@quadratic_inequality(penalty3)
@quadratic_inequality(penalty4)
@quadratic_inequality(penalty5)
@quadratic_inequality(penalty6)
@quadratic_inequality(penalty7)
@ | quadratic_inequality(penalty8)
@quadratic_inequality(penalty9)
def penalty(x):
return 0.0
solver = as_constraint(penalty)
if __name__ == '__main__':
x = [0]*len(xs)
from mystic.solvers import fmin_powell
from mystic.math import almostEqual
result = fmin_powell(objective, x0=x, bounds=bounds, penalty=penalty, disp=False, full_output=True)
assert almostEqual(result[0], xs, tol=1e-2)
assert almostEqual(result[1], ys, tol=1e-2)
# EOF
|
kickstandproject/ripcord | ripcord/tests/api/v1/subscribers/test_get.py | Python | apache-2.0 | 5,736 | 0 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (C) 2013 PolyBeacon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ripcord.openstack.common import uuidutils
from ripcord.tests.api.v1 import base
class TestCase(base.FunctionalTest):
def setUp(self):
super(TestCase, self).setUp()
self.domain_name = 'example.org'
self.project_id = '793491dd5fa8477eb2d6a820193a183b'
self.user_id = '02d99a62af974b26b510c3564ba84644'
params = {
'name': self.domain_name,
}
self.headers = {
'X-Tenant-Id': self.project_id,
'X-User-Id': self.user_id,
}
res = self.post_json(
'/domains', params=params, status=200, headers=self.headers)
self.domain_id = res.json['uuid']
self.assertTrue(uuidutils.is_uuid_like(self.domain_id))
json = {
'description': 'a subscriber',
'disabled': False,
'domain_id': self.domain_id,
'email_address': 'bob@example.org',
'password': 'foobar',
'project_id': self.project_id,
'rpid': 'bob@example.org',
'user_id': self.user_id,
'username': 'bob',
}
params = {
'description': json['description'],
'disabled': json['disabled'],
'domain_id': json['domain_id'],
'email_address': json['email_address'],
'password': json['password'],
'rpid': json['rpid'],
'username': json['username'],
}
tmp = self.post_json(
'/subscribers', params=params, status=200, headers=self.headers)
self.assertTrue(tmp)
def test_get_one_failure(self):
res = self.get_json(
'/subscribers/%s' % '0eda016a-b078-4bef-94ba-1ab10fe15a7d',
expect_errors=True)
self.assertEqual(res.status_int, 404)
self.assertTrue(res.json['error_message'])
def test_get_one_success(self):
json = {
'description': 'a subscriber',
'disabled': False,
'domain_id': self.domain_id,
'email_address': 'alice@example.org',
'ha1': '84ed3e3a76703c1044da21c8609334a2',
'ha1b': '2dc0ac0e03670d8474db6b1e62df8fd1',
'password': 'foobar',
'project_id': self.project_id,
'rpid': 'alice@example.org',
'updated_at': None,
'user_id': self.user_id,
'username': 'alice',
}
params = {
'description': json['description'],
'disabled': json['disabled'],
'domain_id': json['domain_id'],
'email_address': json['email_address'],
'password': json['password'],
'rpid': json['rpid'],
'username': json['username'],
}
tmp = self.post_json(
'/subscribers', params=params, status=200, headers=self.headers)
self.assertTrue(tmp)
res = self.get_json('/subscribers/%s' % tmp.json['uuid'])
for k, v in json.iteritems():
self.assertEqual(res[k], v)
self.assertTrue(res['created_at'])
self.assertTrue(uuidutils.is_uuid_like(res['uuid']))
# NOTE(pabelanger): We add 2 because of created_at and uuid.
self.assertEqual(len(res), len(json) + 2)
def test_get_all_success(self):
domain_name = 'example.net'
project_id = '5fccabbb-9d65-417f-8b0b-a2fc77b501e6'
user_id = '09f07543-6dad-441b-acbf | -1c61b5f4015e'
params = {
'name': domain_name,
}
headers = {
'X-Tenant-Id': project_id,
'X-User-Id': user_id,
}
res = self.post_json(
'/domains', params=params, status=200, headers=headers)
json = {
'description': 'a subscriber',
'disabled': False,
'domain_id': res.json['uuid'],
'email_address': 'alice@example.net',
'ha1': '1f66286e1db577f | 81e06c22c017c137b',
'ha1b': '88bb93a6b9273446665753b5972265a8',
'password': 'foobar',
'project_id': project_id,
'rpid': 'alice@example.net',
'updated_at': None,
'user_id': user_id,
'username': 'alice',
}
params = {
'description': json['description'],
'disabled': json['disabled'],
'domain_id': json['domain_id'],
'email_address': json['email_address'],
'password': json['password'],
'rpid': json['rpid'],
'username': json['username'],
}
tmp = self.post_json(
'/subscribers', params=params, status=200, headers=headers)
self.assertTrue(tmp)
res = self.get_json('/subscribers', headers=headers)
self.assertEqual(len(res), 1)
for k, v in json.iteritems():
self.assertEqual(res[0][k], v)
self.assertTrue(res[0]['created_at'])
self.assertTrue(uuidutils.is_uuid_like(res[0]['uuid']))
# NOTE(pabelanger): We add 2 because of created_at and uuid.
self.assertEqual(len(res[0]), len(json) + 2)
|
Shashwat986/thesis | vectoralign/get_nn.py | Python | mit | 1,185 | 0.032911 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
import scipy
from sklearn.neighbors import KNeighborsClassifier
from scipy.cluster import hierarchy as hier
from scipy.spatial import distance
import json
import codecs
import sys
if len(sys.argv) < 2:
print "Provide file name"
sys.exit(1)
elif len(sys.argv) < 3:
out_file = "nn9m.dat"
else:
out_file = sys.argv[2]
print "Start"
fi = codecs.o | pen(sys.argv[1],"r","utf-8")
words = []
data = []
for line in fi:
if not len(line.strip()): continue
k = line.strip().split()
words | .append(k[0])
data.append([float(i) for i in k[-200:]])
fi.close()
vectors = np.array(data)
print "Pre-processing done"
# Calculate the distance matrix
def dist(x,y):
return np.dot(x,y)
knn = KNeighborsClassifier()
knn.fit(vectors,[0]*len(vectors))
fo = codecs.open(out_file,"w","utf-8")
for i,word in enumerate(words):
d,n = knn.kneighbors(vectors[i], n_neighbors = 25, return_distance = True)
if i%1000==0: print d,n
fo.write(word+"\t")
for j in range(1,len(n[0])):
fo.write(words[n[0][j]]+" ({:.6f}), ".format(d[0][j]))
fo.write("\n")
fo.close()
|
shimpe/frescobaldi | frescobaldi_app/cursortools.py | Python | gpl-2.0 | 7,435 | 0.004304 | # cursortools.py -- QTextCursor utility functions
#
# Copyright (c) 2008 - 2014 by Wilbert Berendsen
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
# See http://www.gnu.org/licenses/ for more information.
"""
Functions manipulating QTextCursors and their selections.
"""
from __future__ import unicode_literals
import contextlib
import operator
from PyQt4.QtGui import QTextBlock, QTextBlockUserData, QTextCursor
def block(cursor):
"""Returns the cursor's block.
If the cursor has a selection, returns the block the selection starts in
(regardless of the cursor's position()).
"""
if cursor.hasSelection():
return cursor.document().findBlock(cursor.selectionStart())
return cursor.block()
def blocks(cursor):
"""Yields the block(s) containing the cursor or selection."""
d = cursor.document()
block = d.findBlock(cursor.selectionStart())
end = d.findBlock(cursor.selectionEnd())
while True:
yield block
if block == end:
break
block = block.next()
def contains(c1, c2):
"""Returns True if cursor2's selection falls inside cursor1's."""
return (c1.selectionStart() <= c2.selectionStart()
and c1.selectionEnd() >= c2.selectionEnd())
def forwards(block, until=QTextBlock()):
"""Yields the block and all following blocks.
If until is a valid block, yields the blocks until the specified block.
"""
if until.isValid():
while block.isValid() and block <= until:
yield block
block = block.next()
else:
while block.isValid():
yield block
block = block.next()
def backwards(block, until=QTextBlock()):
"""Yields the block and all preceding blocks.
If until is a valid block, yields the blocks until the specified block.
"""
if until.isValid():
while block.isValid() and block >= until:
yield block
block = block.previous()
else:
while block.isValid():
yield block
block = block.previous()
def all_blocks(document):
"""Yields all blocks of the document."""
return forwards(document.firstBlock())
def partition(cursor):
"""Returns a three-tuple of strings (before, selection, after).
'before' is the text before the cursor's position or selection start,
'after' is the text after the cursor's position or selection end,
'selection' is the selected text.
before and after never contain a newline.
"""
start = cursor.document().findBlock(cursor.selectionStart())
end = cursor.document().findBlock(cursor.selectionEnd())
before = start.text()[:cursor.selectionStart() - start.position()]
selection = cursor.selection().toPlainText()
after = end.text()[cursor.selectionEnd() - end.position():]
return before, selection, after
@contextlib.contextmanager
def compress_undo(cursor, join_previous = False):
"""Returns a context manager to perform operations on cursor as a single undo-item."""
cursor.joinPreviousEditBlock() if join_previous else cursor.beginEditBlock()
try:
yield
finally:
cursor.endEditBlock()
@contextlib.contextmanager
def keep_selection(cursor, edit=None):
"""Performs operations inside the selection and restore the selection afterwards.
If edit is given, call setTextCursor(cursor) on the Q(Plain)TextEdit afterwards.
"""
start, end, pos = cursor.selectionStart(), cursor.selectionEnd(), cursor.position()
cur2 = QTextCursor(cursor)
cur2.setPosition(end)
try:
yield
finally:
if pos == start:
cursor.setPosition(cur2.position())
cursor.setPosition(start, QTextCursor.KeepAnchor)
else:
cursor.setPosition(start)
cursor.setPosition(cur2.position(), QTextCursor.KeepAnchor)
if edit:
edit.setTextCursor(cursor)
def strip_selection(cursor, chars=None):
"""Adjusts the selection of the cursor just like Python's strip().
If there is no selection or the selection would vanish completely,
nothing is done.
"""
if not cursor.hasSelection():
| return
text = cursor.selection().toPlainText()
if not text.strip(chars):
return
l = len(text) - len(text.lstrip(chars))
r = len(text) - | len(text.rstrip(chars))
s = cursor.selectionStart() + l
e = cursor.selectionEnd() - r
if cursor.position() < cursor.anchor():
s, e = e, s
cursor.setPosition(s)
cursor.setPosition(e, QTextCursor.KeepAnchor)
def strip_indent(cursor):
"""Moves the cursor in its block to the first non-space character."""
text = cursor.block().text()
pos = len(text) - len(text.lstrip())
cursor.setPosition(cursor.block().position() + pos)
def insert_select(cursor, text):
"""Inserts text and then selects all inserted text in the cursor."""
pos = cursor.selectionStart()
cursor.insertText(text)
new = cursor.position()
cursor.setPosition(pos)
cursor.setPosition(new, QTextCursor.KeepAnchor)
def isblank(block):
"""Returns True if the block is an empty or blank line."""
text = block.text()
return not text or text.isspace()
def isblank_before(cursor):
"""Returns True if there's no text on the current line before the cursor."""
if cursor.atBlockStart():
return True
text = cursor.block().text()[:cursor.position() - cursor.block().position()]
return not text or text.isspace()
def isblank_after(cursor):
"""Returns True if there's no text on the current line after the cursor."""
if cursor.atBlockEnd():
return True
text = cursor.block().text()[cursor.position() - cursor.block().position():]
return not text or text.isspace()
def next_blank(block):
"""Returns the next block that is the first block of one or more blank blocks."""
bb = forwards(block)
for b in bb:
if not isblank(b):
for b in bb:
if isblank(b):
return b
def previous_blank(block):
"""Returns the previous block that is the first block of one or more blank blocks."""
bb = backwards(block)
for b in bb:
if not isblank(b):
for b in bb:
if isblank(b):
for b in bb:
if not isblank(b):
b = b.next()
break
return b
def data(block):
"""Get the block's QTextBlockUserData, creating it if necessary."""
data = block.userData()
if not data:
data = QTextBlockUserData()
block.setUserData(data)
return data
|
chrxr/wagtail | wagtail/utils/pagination.py | Python | bsd-3-clause | 1,095 | 0 | from __future__ import absolute_import, unicode_literals
from django.core.paginator import EmptyPage, PageNotAnInteger, Paginator
from django.utils.http import urlencode
from django.utils.six.moves.urllib.parse import parse_qs
DEFAULT_PAGE_KEY = 'p'
def paginate(request, items, page_key=DEFAULT_PAGE_KEY, per_page=20):
page = request.GET.get(page_key, 1)
paginator = Paginator(items, per_page)
try:
page = paginator.page(page)
except PageNotAnInteger:
page = paginator.page(1)
except EmptyPage:
page = paginator.page(p | aginator.num_pages)
return paginator, page
def replace_page_in_query(query, page_number, page_key=DEFAULT_PAGE_KEY):
"""
Replaces ``page_key` | ` from query string with ``page_number``.
>>> replace_page_in_query("p=1&key=value", 2)
'p=2&key=value'
>>> replace_page_in_query("p=1&key=value", None)
'key=value'
"""
getvars = parse_qs(query)
if page_number is None:
getvars.pop(page_key, None)
else:
getvars[page_key] = page_number
return urlencode(getvars, True)
|
ngageoint/geoq | geoq/core/forms.py | Python | mit | 8,191 | 0.007447 | # -*- coding: utf-8 -*-
# This technical data was produced for the U. S. Government under Contract No. W15P7T-13-C-F600, and
# is subject to the Rights in | Technical Data-Noncommercial Items clause at DFARS 252.227-7013 (FEB 2012)
from django import forms
from django.forms.widgets import (RadioSelect, CheckboxInput,
CheckboxSelectMultiple)
from django.contrib.auth.models import User, Group
from django.utils.html import escape, conditional_escape
from django.db.models import Max
from itertools import chain
from .models import AOI, Job, Project
from geoq.maps.models import Layer, MapLayer
from django.contrib.admin.widget | s import FilteredSelectMultiple
no_style = [RadioSelect, CheckboxInput, CheckboxSelectMultiple]
class StyledModelForm(forms.ModelForm):
"""
Adds the span5 (in reference to the Twitter Bootstrap element)
to form fields.
"""
cls = 'span5'
def __init__(self, *args, **kwargs):
super(StyledModelForm, self).__init__(*args, **kwargs)
for f in self.fields:
if type(self.fields[f].widget) not in no_style:
self.fields[f].widget.attrs['class'] = self.cls
class AOIForm(StyledModelForm):
class Meta:
fields = ('name', 'description', 'job', 'analyst',
'priority', 'status')
model = AOI
class ItemSelectWidget(forms.SelectMultiple):
def __init__(self, attrs=None, choices=(), option_title_field=''):
self.option_title_field = option_title_field
super(ItemSelectWidget, self).__init__(attrs, choices)
def render_option(self, selected_choices, option_value, option_label, option_title=''):
option_value = forms.util.force_text(option_value)
if option_value in selected_choices:
selected_html = ' selected="selected"'
if not self.allow_multiple_selected:
selected_choices.remove(option_value)
else:
selected_html = ''
return '<option title="%s" value="%s"%s>%s</option>' % ( \
escape(option_title), escape(option_value), selected_html, conditional_escape(forms.util.force_text(option_label)))
def render_options(self, choices, selected_choices):
# Normalize to strings.
selected_choices = set(forms.util.force_text(v) for v in selected_choices)
choices = [(c[0], c[1], '') for c in choices]
more_choices = [(c[0], c[1]) for c in self.choices]
try:
option_title_list = [val_list[0] for val_list in self.choices.queryset.values_list(self.option_title_field)]
if len(more_choices) > len(option_title_list):
option_title_list = [''] + option_title_list # pad for empty label field
more_choices = [(c[0], c[1], option_title_list[more_choices.index(c)]) for c in more_choices]
except:
more_choices = [(c[0], c[1], '') for c in more_choices] # couldn't get title values
output = []
for option_value, option_label, option_title in chain(more_choices, choices):
if isinstance(option_label, (list, tuple)):
output.append('<optgroup label="%s">' % escape(forms.util.force_text(option_value)))
for option in option_label:
output.append(self.render_option(selected_choices, *option, **dict(option_title=option_title)))
output.append('</optgroup>')
else: # option_label is just a string
output.append(self.render_option(selected_choices, option_value, option_label, option_title))
return '\n'.join(output)
class JobForm(StyledModelForm):
# analysts = forms.ModelMultipleChoiceField(
# queryset = User.objects.all(),
# widget = ItemSelectWidget(option_title_field='email')
# )
# layers = forms.ModelMultipleChoiceField(
# queryset = Layer.objects.all(),
# widget = ItemSelectWidget()
# )
analysts = forms.ModelMultipleChoiceField(
queryset = User.objects.all()
)
layers = forms.ModelMultipleChoiceField(
queryset = Layer.objects.all()
)
class Meta:
fields = ('name', 'description', 'project', 'analysts',
'teams', 'reviewers', 'feature_types', 'required_courses', 'tags', 'layers', 'editor',
'workflow', 'editable_layer', 'vocabulary')
model = Job
def __init__(self, project, *args, **kwargs):
super(JobForm, self).__init__(*args, **kwargs)
def remove_anonymous(field):
""" Removes anonymous from choices in form. """
field_var = self.fields[field].queryset.exclude(id=-1)
self.fields[field].queryset = field_var
return None
remove_anonymous('reviewers')
remove_anonymous('analysts')
self.fields['project'].initial = project
if 'data' in kwargs:
# If we're creating Job, we don't have a map
if self.instance.map == None:
return;
self.fields['analysts'].initial = kwargs['data'].getlist('analysts',None)
# must be a better way, but figure out the layers to display
layers_selected = set(kwargs['data'].getlist('layers',None))
layers_current_int = MapLayer.objects.filter(map=self.instance.map.id).values_list('layer_id', flat=True)
layers_current = set([str(i) for i in layers_current_int])
if layers_selected != layers_current:
# resolve differences
# first take out ones we want to remove
for x in layers_current - layers_selected:
MapLayer.objects.filter(map=self.instance.map.id,layer_id=x).delete()
# now add in new ones
layers = MapLayer.objects.filter(map=self.instance.map.id)
if layers.count() > 0:
max_stack_order = list(layers.aggregate(Max('stack_order')).values())[0]
else:
max_stack_order = 0
for x in layers_selected - layers_current:
max_stack_order+=1
ml = MapLayer.objects.create(map=self.instance.map,layer_id=int(x),stack_order=max_stack_order)
ml.save()
else:
if hasattr(kwargs['instance'],'analysts'):
self.fields['analysts'].initial = kwargs['instance'].analysts.all().values_list('id', flat=True)
else:
self.fields['analysts'].initial = []
if hasattr(kwargs['instance'],'map') and kwargs['instance'].map and kwargs['instance'].map.layers:
self.fields['layers'].initial = [x.layer_id for x in kwargs['instance'].map.layers]
# #form extending original job form to export old job data into new job
class ExportJobForm(JobForm):
class Meta:
fields = ('map',)
model = Job
class ProjectForm(StyledModelForm):
class Meta:
fields = ('name', 'description', 'project_type', 'active', 'private')
model = Project
class TeamForm(StyledModelForm):
users = forms.ModelMultipleChoiceField(
queryset=User.objects.all()
)
class Media:
css = {
'all':('/static/admin/css/widgets.css',),
}
js = ('/admin/jsi18n',)
class Meta:
fields = ('name', 'users',)
model = Group
def __init__(self, *args, **kwargs):
self.team_id = kwargs.pop('team_id')
super(TeamForm, self).__init__(*args, **kwargs)
self.fields['name'].required = True
other_teams = Group.objects.exclude(id=self.team_id).values_list('name', flat=True)
self.fields['users'].queryset = User.objects.exclude(groups__name__in=other_teams)
self.fields['users'].initial = User.objects.filter(groups__id=self.team_id)
def remove_anonymous(field):
""" Removes anonymous from choices in form. """
field_var = self.fields[field].queryset.exclude(id=-1)
self.fields[field].queryset = field_var
return None
remove_anonymou |
bobmyhill/burnman | burnman/classes/layer.py | Python | gpl-2.0 | 36,384 | 0.000082 | from __future__ import print_function
# This file is part of BurnMan - a thermoelastic and thermodynamic toolkit for
# the Earth and Planetary Sciences
# Copyright (C) 2012 - 2017 by the BurnMan team, released under the GNU
# GPL v2 or later.
import numpy as np
from scipy.integrate import odeint
from scipy.integrate import quad
from scipy.interpolate import UnivariateSpline, interp1d
from scipy.optimize import fsolve
from burnman import constants
from burnman.tools import geotherm
import warnings
from .material import Material, material_property
class Layer(object):
"""
The base class for a planetary layer.
The user needs to set the following before properties can be computed:
- set_material(), which sets the material of the layer,
e.g. a mineral, solid_solution, or composite
- set_temperature_mode(), either predefine, or set to an adiabatic profile
- set_pressure_mode(), to set the self-consistent pressure
(with user-defined option the pressures can be overwritten).
To set the self-consistent pressure the pressure at the top and the
gravity at the bottom of the layer need to be set.
- make(), computes the self-consistent part of the layer and starts the
settings to compute properties within the layer
Note that the entire planet this layer sits in is not necessarily
self-consistent, as the pressure at the top of the layer is a
function of the density within the layer (through the gravity).
Entire planets can be computed self-consistently with the planet class.
Properties will be returned at the pre-defined radius array,
although the evaluate() function can take a newly defined depthlist
and values are interpolated between these (sufficient sampling of the layer
is needed for this to be accurate).
"""
def __init__(self, name=None, radii=None, verbose=False):
self.name = name
assert np.all(np.diff(radii) > 0)
self.radii = radii
self.outer_radius = max(self.radii)
self.inner_radius = min(self.radii)
self.thickness = self.outer_radius - self.inner_radius
self.n_slices = len(self.radii)
self.verbose = verbose
self._cached = {}
self._pressures = None
self._temperatures = None
self.sublayers = None
self.material = None
self.pressure_mode = 'self-consistent'
self.temperature_mode = None
def __str__(self):
"""
Prints details of the layer
"""
writing = (f'The {self.name} is made of {self.material.name}'
f' with { | self.temperature_mode} temperatures and '
f'{self.pressure_mode} pressures\n')
return writing
def reset(self):
"""
Resets all cached material properties.
It is typically not required for the user to call this function.
"""
self._cached = {}
self._pressures = None
self._temperatures = None
self.sublayers = None
| def set_material(self, material):
"""
Set the material of a Layer with a Material
"""
assert(isinstance(material, Material))
self.material = material
self.reset()
def set_temperature_mode(self, temperature_mode='adiabatic',
temperatures=None, temperature_top=None):
"""
Sets temperatures within the layer as user-defined values or as
a (potentially perturbed) adiabat.
Parameters
----------
temperature_mode : string
This can be set to 'user-defined', 'adiabatic',
or 'perturbed-adiabatic'. 'user-defined' fixes the temperature
with the profile input by the user. 'adiabatic' self-consistently
computes the adiabat when setting the state of the layer.
'perturbed-adiabatic' adds the user input array to the adiabat.
This allows the user to apply boundary layers (for example).
temperatures : array of float
The desired fixed temperatures in [K].
Should have same length as defined radii in layer.
temperature_top : float
Temperature at the top of the layer. Used if the temperature mode
is chosen to be 'adiabatic' or 'perturbed-adiabatic'.
If 'perturbed-adiabatic' is chosen as the temperature mode,
temperature_top corresponds to the true temperature at the
top of the layer, and the reference isentrope at this radius
is defined to lie at a temperature of
temperature_top - temperatures[-1].
"""
self.reset()
assert(temperature_mode == 'user-defined'
or temperature_mode == 'adiabatic'
or temperature_mode == 'perturbed-adiabatic')
self.temperature_mode = temperature_mode
if ((temperature_mode == 'user-defined'
or temperature_mode == 'perturbed-adiabatic')):
assert(len(temperatures) == len(self.radii))
self.usertemperatures = temperatures
else:
self.usertemperatures = np.zeros_like(self.radii)
if ((temperature_mode == 'adiabatic'
or temperature_mode == 'perturbed-adiabatic')):
self.temperature_top = temperature_top
else:
self.temperature_top = None
def set_pressure_mode(self, pressure_mode='self-consistent',
pressures=None, gravity_bottom=None,
pressure_top=None, n_max_iterations=50,
max_delta=1.e-5):
"""
Sets the pressure mode of the layer,
which can either be 'user-defined', or 'self-consistent'.
Parameters
----------
pressure_mode : string
This can be set to 'user-defined' or 'self-consistent'.
'user-defined' fixes the pressures with the profile input
by the user in the 'pressures' argument.
'self-consistent' forces Layer to calculate pressures
self-consistently. If this is selected, the user will need
to supply values for the gravity_bottom [m/s^2]
and pressure_top [Pa] arguments.
pressures : array of floats
Pressures [Pa] to set layer to
(if the 'user-defined' pressure_mode has been selected).
The array should be the same length as
the layers user-defined radii array.
pressure_top : float
Pressure [Pa] at the top of the layer.
gravity_bottom : float
gravity [m/s^2] at the bottom of the layer.
n_max_iterations : integer
Maximum number of iterations to reach
self-consistent pressures (default = 50)
max_delta : float
Relative update to the highest pressure in the layer between
iterations to stop iterations (default = 1.e-5)
"""
self.reset()
assert(pressure_mode == 'user-defined'
or pressure_mode == 'self-consistent')
self.pressure_mode = pressure_mode
assert(gravity_bottom is not None)
self.gravity_bottom = gravity_bottom
if pressure_mode == 'user-defined':
assert(pressures is not None)
assert(len(pressures) == len(self.radii))
self.pressures = pressures
warnings.warn("By setting the pressures in Layer they "
"are unlikely to be self-consistent")
elif pressure_mode == 'self-consistent':
self.pressure_top = pressure_top
self.n_max_iterations = n_max_iterations
self.max_delta = max_delta
else:
raise NotImplementedError(f'pressure mode {pressure_mode} '
'not recognised')
def make(self):
"""
This routine needs to be called before evaluating any properties.
If pressures and temperatures are not user-defined, they
are computed here. This method also initializes an array of copied
materials from which properties ca |
minidron/django-geoaddress | setup.py | Python | gpl-2.0 | 1,074 | 0 | # --coding: utf8--
from setuptools import setup, find_packages
setup(
name='django-geoaddress',
version='0.1.14',
description=('Address field with GEO coordinates'),
long_description=open('README.md').read(),
author='Pavel ALekin',
maintainer='Pavel Alek | in',
maintainer_email='pavel.alekin@gmail.com',
url='https://github.com/minidron/django-geoaddress',
packages=find_packages(),
include_package_data=True,
zip_safe=False,
classifiers=[
"Development Status :: 4 - Beta",
"Environment :: Web Environment",
"Intended Audience :: Developers",
"License :: OSI Approved :: GNU General Public License (GPL)",
"Programming Language :: Python",
'Framework :: Django',
"Topi | c :: Database :: Front-Ends",
"Topic :: Documentation",
"Topic :: Internet :: WWW/HTTP :: Dynamic Content",
"Topic :: Internet :: WWW/HTTP :: Site Management",
"Topic :: Internet :: WWW/HTTP :: WSGI :: Application",
"Operating System :: OS Independent",
]
)
|
naparuba/opsbro | opsbro/topic.py | Python | mit | 4,562 | 0.013245 | # -*- coding: utf-8 -*-
import random
import itertools
from .misc.lolcat import lolcat
TOPIC_SERVICE_DISCOVERY = 0x1 << 0
TOPIC_AUTOMATIC_DECTECTION = 0x1 << 1
TOPIC_MONITORING = 0x1 << 2
TOPIC_METROLOGY = 0x1 << 3
TOPIC_CONFIGURATION_AUTOMATION = 0x1 << 4
TOPIC_SYSTEM_COMPLIANCE = 0x1 << 5
TOPIC_GENERIC = 0x1 << 6 # hidden one, for other stuff
TOPICS = [TOPIC_SERVICE_DISCOVERY, TOPIC_AUTOMATIC_DECTECTION, TOPIC_MONITORING, TOPIC_MET | ROLOGY,
TOPIC_CONFIGURATION_AUTOMATION, TOPIC_SYSTEM_COMPLIANCE]
VERY_ALL_TOPICS = TOPICS[:]
VERY_ALL_TOPICS.append(TOPIC_GENERIC)
TOPICS_LABELS = {
TOPIC_SERVICE_DISCOVERY : u'service discovery',
TOPIC_AUTOMATIC_DECTECTION : u'automatic detection',
TOPIC_MONITORING : u'monitoring',
TOPIC_METROLOGY : u'metrology',
TO | PIC_CONFIGURATION_AUTOMATION: u'configuration automation',
TOPIC_SYSTEM_COMPLIANCE : u'system compliance',
TOPIC_GENERIC : u'generic',
}
TOPIC_ID_BY_STRING = {
u'service discovery' : TOPIC_SERVICE_DISCOVERY,
u'automatic detection' : TOPIC_AUTOMATIC_DECTECTION,
u'monitoring' : TOPIC_MONITORING,
u'metrology' : TOPIC_METROLOGY,
u'configuration automation': TOPIC_CONFIGURATION_AUTOMATION,
u'system compliance' : TOPIC_SYSTEM_COMPLIANCE,
u'generic' : TOPIC_GENERIC,
}
# Size of configuration automation for aligment display
MAX_TOPICS_LABEL_SIZE = 26
TOPICS_LABEL_BANNER = {
TOPIC_SERVICE_DISCOVERY : u'┏ service discovery',
TOPIC_AUTOMATIC_DECTECTION : u'┗ automatic detection',
TOPIC_MONITORING : u'┏ monitoring',
TOPIC_METROLOGY : u'┗ metrology',
TOPIC_CONFIGURATION_AUTOMATION: u'┏ configuration automation',
TOPIC_SYSTEM_COMPLIANCE : u'┗ system compliance',
}
TOPICS_SUB_TITLES = {
TOPIC_SERVICE_DISCOVERY : u'Is there any new servers spawn last few seconds?',
TOPIC_AUTOMATIC_DECTECTION : u'Is my server linux or windows, mongodb or redis?',
TOPIC_MONITORING : u'Is all OK on my server and applications?',
TOPIC_METROLOGY : u'Is my server and application performing well?',
TOPIC_CONFIGURATION_AUTOMATION: u'Install+configure apache+nginx if server in web group',
TOPIC_SYSTEM_COMPLIANCE : u'Are the security patches applied?',
}
# sort of blue
DEFAULT_COLOR_ID = 40
TOPICS_COLORS = {
# light Purple
TOPIC_SERVICE_DISCOVERY : 26,
# pale purple
TOPIC_AUTOMATIC_DECTECTION : 30,
# light green
TOPIC_MONITORING : 53,
# pastel green
TOPIC_METROLOGY : 57,
# couleur peau
TOPIC_CONFIGURATION_AUTOMATION: 12,
# jaune sombre
TOPIC_SYSTEM_COMPLIANCE : 8,
# Other?
TOPIC_GENERIC : DEFAULT_COLOR_ID,
}
_TOPICS_COLORS_VALUES = list(TOPICS_COLORS.values()) # note: in python3 values is a generator, not a true list
random.shuffle(_TOPICS_COLORS_VALUES)
TOPICS_COLORS_RANDOM_VALUES_LOOP = itertools.cycle(_TOPICS_COLORS_VALUES)
# Yes, there is a pokemon word play with a french pokemon. I love pokemon and my son too. Deal with it ( •_•) ( •_•)>⌐■-■ (⌐■_■)
class Topiker(object):
def __init__(self):
self.topic_enabled = {}
for topic in TOPICS:
self.topic_enabled[topic] = True
def get_topic_states(self):
return self.topic_enabled
def set_topic_state(self, topic, state):
if topic not in TOPICS:
raise Exception('The topic %s is not an allowed one' % topic)
self.topic_enabled[topic] = state
def is_topic_enabled(self, topic):
if topic not in TOPICS:
raise Exception('The topic %s is not an allowed one' % topic)
return self.topic_enabled[topic]
def get_color_id_by_topic_string(self, topic_s):
if topic_s not in TOPIC_ID_BY_STRING:
return DEFAULT_COLOR_ID
topic_id = TOPIC_ID_BY_STRING[topic_s]
color_id = TOPICS_COLORS[topic_id]
return color_id
def get_color_id_by_topic_id(self, topic_id):
color_id = TOPICS_COLORS[topic_id]
return color_id
def get_colorized_topic_from_string(self, topic_s):
color_id = self.get_color_id_by_topic_string(topic_s)
r = lolcat.get_line(topic_s, color_id, spread=None)
return r
topiker = Topiker()
|
Araneidae/cothread | cothread/input_hook.py | Python | gpl-2.0 | 5,250 | 0.002667 | # This file is part of the Diamond cothread library.
#
# Copyright (C) 2007 James Rowland, 2007-2012 Michael Abbott,
# Diamond Light Source Ltd.
#
# The Diamond cothread library is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the License,
# or (at your option) any later version.
#
# The Diamond cothread library is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
# Contact:
# Dr. Michael Abbott,
# Diamond Light Source Ltd,
# Diamond House,
# Chilton,
# Didcot,
# Oxfordshire,
# OX11 0DE
# michael.abbott@diamond.ac.uk
'''Simple readline hook to allow the scheduler to run while we're waiting
for input from the interpreter command line. Also includes optional support
for | the Qt event loop.'''
import sys
import os
from . import cothread
from . import coselect
__all__ = [
'iqt', # Enable interactive Qt loop
]
# When Qt is running in its own stack it really needs quite a bit of room.
QT_STACK_SIZE = int(os.environ.get('COTHREAD_QT_STACK', 1024 * 1024))
def _readline_hook():
'''Runs other cothreads until input | is available.'''
coselect.poll_list([(0, coselect.POLLIN)])
def _install_readline_hook(enable_hook = True):
'''Install readline hook. This allows the scheduler to run in parallel
with interactive python: while readline is waiting for input, the
scheduler still operates.
This routine can also be used to disable the input hook by setting the
enable_hook parameter to False -- for example, this can be helpful if a
background activity is causing a nuisance.'''
from ._coroutine import install_readline_hook
if enable_hook:
install_readline_hook(_readline_hook)
else:
install_readline_hook(None)
# This is used by the _run_iqt timeout() function to avoid nested returns.
_global_timeout_depth = 0
def _timer_iqt(poll_interval):
def timeout():
# To avoid nested returns from timeout (which effectively means we
# would resume the main Qt thread from within a Qt message box -- not
# a good idea!) we keep track of how many nested calls to timeout()
# there are. Then we refuse to return until we're at the top of the
# stack.
global _global_timeout_depth
_global_timeout_depth += 1
timeout_depth = _global_timeout_depth
cothread.Yield(poll_interval)
while _global_timeout_depth > timeout_depth:
cothread.Sleep(poll_interval)
_global_timeout_depth -= 1
# Set up a timer so that Qt polls cothread. All the timer needs to do
# is to yield control to the coroutine system.
from .qt import QtCore
timer = QtCore.QTimer()
timer.timeout.connect(timeout)
timer.start(poll_interval * 1e3)
return timer
# There are a number of issues with this function... needs to be properly
# idempotent, need to ensure that run_exec doesn't create an app instance? Or
# some other mechanism for same.
#
# Currently Ian's widget import includes the following code:
#
# if not hasattr(cothread.input_hook, '_timer'):
# cothread.iqt(run_exec = False)
#
# This is used to ensure that if widgets are imported directly into designer
# then cothread works. Note that things are not too complicated in this
# particular case as the Qt application is required to exist already.
def iqt(poll_interval = 0.05, run_exec = True, argv = None):
'''Installs Qt event handling hook. The polling interval is in
seconds.'''
from .qt import QtCore, QtWidgets
global _qapp, _timer
# Importing PyQt4 has an unexpected side effect: it removes the input hook!
# So we put it back again...
_install_readline_hook(True)
# Repeated calls to iqt() are (silent) no-ops. Is it more friendly do this
# than to assert fail? Not sure to be honest.
if _qapp is not None:
return _qapp
# Ensure that there is a QtApplication instance, creating one if necessary.
_qapp = QtCore.QCoreApplication.instance()
if _qapp is None:
if argv is None:
argv = sys.argv
_qapp = QtWidgets.QApplication(argv)
# Arrange to get a Quit event when the last window goes. This allows the
# application to simply rest on WaitForQuit().
_qapp.aboutToQuit.connect(cothread.Quit)
# Create timer. Hang onto the timer to prevent it from vanishing.
_timer = _timer_iqt(poll_interval)
# Finally, unless we've been told not to, spawn our own exec loop.
if run_exec:
cothread.Spawn(_qapp.exec_, stack_size = QT_STACK_SIZE)
cothread.Yield()
return _qapp
_qapp = None
# Automatically install the readline hook. This is the safest thing to do.
_install_readline_hook(True)
|
roadhead/satchmo | satchmo/recentlist/middleware.py | Python | bsd-3-clause | 1,327 | 0.002261 | from django.core.urlresolvers import NoReverseMatch, reverse
from satchmo.configuration import config_value
import logging
import re
log = logging.getLogger('recentlist.middleware')
try:
producturl = reverse('satchmo_product',
kwargs={'product_slug': 'FAKE'})
produ | cturl = "^" + producturl.replace("FAKE", r'(?P<slug>[-\w]+)')
log.debug('Product url is %s', | producturl)
urlre = re.compile(producturl)
except NoReverseMatch:
log.debug("Could not find product url.")
urlre = None
class RecentProductMiddleware(object):
"""Remember recent products"""
def process_response(self, request, response):
if urlre is not None: # If the product url was found earlier.
g = urlre.search(request.path)
if g and len(g.groups()) > 0 and "/admin/" not in request.path:
recentmax = config_value('SHOP', 'RECENT_MAX') + 1
slug = g.groups()[0]
recent = request.session.get('RECENTLIST', [])
if slug not in recent:
recent.insert(0, slug)
if len(recent) > recentmax:
recent = recent[:recentmax]
log.debug('Added recently viewed: %s', recent)
request.session['RECENTLIST'] = recent
return response
|
Melisius/SlowQuant | slowquant/geometryoptimization/GeometryOptimization.py | Python | bsd-3-clause | 1,721 | 0.010459 | import numpy as np
import time
import slowquant.derivatives.runForce as F
from slowquant.numerical.numForce import nForce
def GeoOpt(input, set, results):
maxstep = int(set['Max iteration GeoOpt'])+1
GeoOptol = float(set['Geometry Tolerance'])
stepsize = float(set['Gradient Descent Step'])
for i in range(1, maxstep):
if set['Force Numeric'] == 'Yes':
dX, dY, dZ = nForce(input, set, results)
else:
dX, dY, dZ, results = F.runForce(input, set, results)
for j in range(1, len(dX)):
input[j,1] = input[j,1] - stepsize*dX[j]
input[j,2] = input[j,2] - stepsize*dY[j]
inpu | t[j,3] = input[j,3] - stepsize*dZ[j]
|
output = open('out.txt', 'a')
for j in range(1, len(dX)):
output.write("{: 12.8e}".format(dX[j]))
output.write("\t \t")
output.write("{: 12.8e}".format(dY[j]))
output.write("\t \t")
output.write("{: 12.8e}".format(dZ[j]))
output.write('\n')
output.write('\n \n')
for j in range(1, len(input)):
for k in range(0, 4):
output.write("{: 12.8e}".format(input[j,k]))
output.write("\t \t")
output.write('\n')
output.write('\n \n')
output.close()
if np.max(np.abs(dX)) < GeoOptol and np.max(np.abs(dY)) < GeoOptol and np.max(np.abs(dZ)) < GeoOptol:
break
return input, results
def runGO(input, set, results):
input, results = GeoOpt(input, set, results)
return input, results
|
acuriel/Nixtla | nixtla/core/tools/pympi/Elan.py | Python | gpl-2.0 | 33,330 | 0.00003 | # -*- coding: utf-8 -*-
import time
import EafIO
import warnings
class Eaf:
"""Read and write Elan's Eaf files.
.. note:: All times are in milliseconds and can't have decimals.
:var dict annotation_document: Annotation document TAG entries.
:var dict licences: Licences included in the file.
:var dict header: XML header.
:var list media_descriptors: Linked files, where every file is of the
form: ``{attrib}``.
:var list properties: Properties, where every property is of the form:
``(value, {attrib})``.
:var list linked_file_descriptors: Secondary linked files, where every
linked file is of the form:
``{attrib}``.
:var dict timeslots: Timeslot data of the form:
``{TimslotID -> time(ms)}``.
:var dict tiers: Tier data of the form:
``{tier_name -> (aligned_annotations,
reference_annotations, attributes, ordinal)}``,
aligned_annotations of the form:
``[{annotation_id ->
(begin_ts, end_ts, value, svg_ref)}]``,
reference annotations of the form:
``[{annotation_id ->
(reference, value, previous, svg_ref)}]``.
:var list linguistic_types: Linguistic types, where every type is of the
form: ``{id -> attrib}``.
:var list locales: Locales, where every locale is of the form:
``{attrib}``.
:var dict constraints: Constraint data of the form:
| ``{stereotype -> description}``.
:var dict controlled_vocabularies: Controlled vocabulary data of the
form: ``{id ->
(descriptions, entries, ext_ref)}``,
descriptions of the form:
``[(lang_ref, text)]``,
| entries of the form:
``{id -> (values, ext_ref)}``,
values of the form:
``[(lang_ref, description, text)]``.
:var list external_refs: External references, where every reference is of
the form ``[id, type, value]``.
:var list lexicon_refs: Lexicon references, where every reference is of
the form: ``[{attribs}]``.
"""
def __init__(self, file_path=None, author='pympi'):
"""Construct either a new Eaf file or read on from a file/stream.
:param str file_path: Path to read from, - for stdin. If ``None`` an
empty Eaf file will be created.
:param str author: Author of the file.
"""
self.naive_gen_ann, self.naive_gen_ts = False, False
self.annotation_document = {
'AUTHOR': author,
'DATE': time.strftime("%Y-%m-%dT%H:%M:%S%z"),
'VERSION': '2.8',
'FORMAT': '2.8',
'xmlns:xsi': 'http://www.w3.org/2001/XMLSchema-instance',
'xsi:noNamespaceSchemaLocation':
'http://www.mpi.nl/tools/elan/EAFv2.8.xsd'}
self.constraints = {}
self.controlled_vocabularies = {}
self.header = {}
self.licences = {}
self.linguistic_types = {}
self.tiers = {}
self.timeslots = {}
self.external_refs = []
self.lexicon_refs = []
self.linked_file_descriptors = []
self.locales = []
self.media_descriptors = []
self.properties = []
self.new_time, self.new_ann = 0, 0
if file_path is None:
self.add_linguistic_type('default-lt', None)
self.constraints = {'Time_Subdivision': 'Time subdivision of paren'
't annotation\'s time interval, no time gaps a'
'llowed within this interval',
'Symbolic_Subdivision': 'Symbolic subdivision '
'of a parent annotation. Annotations refering '
'to the same parent are ordered',
'Symbolic_Association': '1-1 association with '
'a parent annotation',
'Included_In': 'Time alignable annotations wit'
'hin the parent annotation\'s time interval, g'
'aps are allowed'}
self.properties.append(('0', {'NAME': 'lastUsedAnnotation'}))
self.add_tier('default')
else:
EafIO.parse_eaf(file_path, self)
def to_file(self, file_path, pretty=True):
"""Write the object to a file, if the file already exists a backup will
be created with the ``.bak`` suffix.
:param str file_path: Path to write to, - for stdout.
:param bool pretty: Flag for pretty XML printing.
"""
EafIO.to_eaf(file_path, self, pretty)
def to_textgrid(self, excluded_tiers=[], included_tiers=[]):
"""Convert the object to a :class:`pympi.Praat.TextGrid` object.
:param list excluded_tiers: Specifically exclude these tiers.
:param list included_tiers: Only include this tiers, when empty all are
included.
:returns: :class:`pympi.Praat.TextGrid` object
:raises ImportError: If the pympi.Praat module can't be loaded.
"""
from Praat import TextGrid
tgout = TextGrid()
tiers = [a for a in self.tiers if a not in excluded_tiers]
if included_tiers:
tiers = [a for a in tiers if a in included_tiers]
for tier in tiers:
currentTier = tgout.add_tier(tier)
for interval in self.get_annotation_data_for_tier(tier):
if interval[0] == interval[1]:
continue
currentTier.add_interval(interval[0]/1000.0,
interval[1]/1000.0, interval[2])
return tgout
def extract(self, start, end):
"""Extracts the selected time frame as a new object.
:param int start: Start time.
:param int end: End time.
:returns: The extracted frame in a new object.
"""
from copy import deepcopy
eaf_out = deepcopy(self)
for tier in eaf_out.tiers.itervalues():
rems = []
for ann in tier[0]:
if eaf_out.timeslots[tier[0][ann][1]] > end or\
eaf_out.timeslots[tier[0][ann][0]] < start:
rems.append(ann)
for r in rems:
del tier[0][r]
return eaf_out
def get_linked_files(self):
"""Give all linked files."""
return self.media_descriptors
def add_linked_file(self, file_path, relpath=None, mimetype=None,
time_origin=None, ex_from=None):
"""Add a linked file.
:param str file_path: Path of the file.
:param str relpath: Relative path of the file.
:param str mimetype: Mimetype of the file, if ``None`` it tries to
guess it according to the file extension which
currently only works for wav, mpg, mpeg and xml.
:param int time_origin: Time origin for the media file.
:param str ex_from: Extracted from field.
:raises KeyError: If mimetype had to be guessed and a non standard
extension or an unknown mimetype.
"""
if mimetype is None:
mimes = {'wav': 'audio/x-wav', 'mpg': 'video/mpeg',
'mpeg': 'video/mpg', 'xml': 'text/xml'}
mimetype = mimes[file_path.split('.')[-1]]
self.media_descriptors.append({
'MEDIA_URL': file_path, 'RELATIVE_MEDIA_URL': relpath,
'MIME_TYPE': mimetype, 'TIME_ORIGIN': t |
canturkisci/agentless-system-crawler | tests/unit/test_plugins.py | Python | apache-2.0 | 72,595 | 0.00135 | import types
import unittest
from collections import namedtuple
import os
import sys
import tempfile
from zipfile import ZipFile, ZipInfo
from utils import jar_utils
sys.path.append('tests/unit/')
import mock
from plugins.systems.config_container_crawler import ConfigContainerCrawler
from plugins.systems.config_host_crawler import ConfigHostCrawler
from plugins.systems.connection_container_crawler import ConnectionContainerCrawler
from plugins.systems.connection_host_crawler import ConnectionHostCrawler
from plugins.systems.connection_vm_crawler import ConnectionVmCrawler
from plugins.systems.cpu_container_crawler import CpuContainerCrawler
from plugins.systems.cpu_host_crawler import CpuHostCrawler
from plugins.systems.disk_container_crawler import DiskContainerCrawler
from plugins.systems.disk_host_crawler import DiskHostCrawler
from plugins.systems.dockerhistory_container_crawler import DockerhistoryContainerCrawler
from plugins.systems.dockerinspect_container_crawler import DockerinspectContainerCraw | ler
from plugins.systems.dockerps_host_crawler import DockerpsHostCrawler
from plugins.systems.file_container_crawler import FileContainerCrawler
from plugins.systems.file_host_crawler import FileHostCrawler
from plugins.systems.interface_container_crawler import InterfaceContainerCrawler
from plugins.systems.interface_host_crawler import InterfaceHostCrawler
from plugins.systems.interface_vm_crawler import InterfaceVmCrawler
from plugins.systems.jar_cont | ainer_crawler import JarContainerCrawler
from plugins.systems.jar_host_crawler import JarHostCrawler
from plugins.systems.load_container_crawler import LoadContainerCrawler
from plugins.systems.load_host_crawler import LoadHostCrawler
from plugins.systems.memory_container_crawler import MemoryContainerCrawler
from plugins.systems.memory_host_crawler import MemoryHostCrawler
from plugins.systems.memory_vm_crawler import MemoryVmCrawler
from plugins.systems.metric_container_crawler import MetricContainerCrawler
from plugins.systems.metric_host_crawler import MetricHostCrawler
from plugins.systems.metric_vm_crawler import MetricVmCrawler
from plugins.systems.os_container_crawler import OSContainerCrawler
from plugins.systems.os_host_crawler import OSHostCrawler
from plugins.systems.os_vm_crawler import os_vm_crawler
from plugins.systems.package_container_crawler import PackageContainerCrawler
from plugins.systems.package_host_crawler import PackageHostCrawler
from plugins.systems.process_container_crawler import ProcessContainerCrawler
from plugins.systems.process_host_crawler import ProcessHostCrawler
from plugins.systems.process_vm_crawler import process_vm_crawler
from container import Container
from utils.crawler_exceptions import CrawlError
from utils.features import (
OSFeature,
ConfigFeature,
DiskFeature,
PackageFeature,
MemoryFeature,
CpuFeature,
InterfaceFeature,
LoadFeature,
DockerPSFeature,
JarFeature)
# for OUTVM psvmi
class DummyContainer(Container):
def __init__(self, long_id):
self.pid = '1234'
self.long_id = long_id
def get_memory_cgroup_path(self, node):
return '/cgroup/%s' % node
def get_cpu_cgroup_path(self, node):
return '/cgroup/%s' % node
# for OUTVM psvmi
psvmi_sysinfo = namedtuple('psvmi_sysinfo',
'''boottime ipaddr osdistro osname osplatform osrelease
ostype osversion memory_used memory_buffered
memory_cached memory_free''')
psvmi_memory = namedtuple(
'psvmi_memory',
'memory_used memory_buffered memory_cached memory_free')
psvmi_interface = namedtuple(
'psvmi_interface',
'ifname bytes_sent bytes_recv packets_sent packets_recv errout errin')
os_stat = namedtuple(
'os_stat',
'''st_mode st_gid st_uid st_atime st_ctime st_mtime st_size''')
def mocked_os_walk(root_dir):
files = ['file1', 'file2', 'file3']
dirs = ['dir']
yield ('/', dirs, files)
# simulate the os_walk behavior (if a dir is deleted, we don't walk it)
if '/dir' in dirs:
files = ['file4']
dirs = []
yield ('/dir', dirs, files)
def mocked_os_walk_for_avoidsetns(root_dir):
files = ['file1', 'file2', 'file3']
dirs = ['dir']
yield ('/1/2/3', dirs, files)
# simulate the os_walk behavior (if a dir is deleted, we don't walk it)
if '/1/2/3/dir' in dirs:
files = ['file4']
dirs = []
yield ('/dir', dirs, files)
# XXX can't do self.count = for some reason
mcount = 0
class MockedMemCgroupFile(mock.Mock):
def __init__(self):
pass
def readline(self):
return '2'
def __iter__(self):
return self
def next(self):
global mcount
mcount += 1
if mcount == 1:
return 'total_cache 100'
if mcount == 2:
return 'total_active_file 200'
else:
raise StopIteration()
# XXX can't do self.count = for some reason
ccount = 0
ccount2 = 0
class MockedCpuCgroupFile(mock.Mock):
def __init__(self):
pass
def readline(self):
global ccount2
ccount2 += 1
if ccount2 == 1:
return '1e7'
else:
return '2e7'
def __iter__(self):
return self
def next(self):
global ccount
ccount += 1
if ccount == 1:
return 'system 20'
if ccount == 2:
return 'user 20'
else:
raise StopIteration()
class MockedFile(mock.Mock):
def __init__(self):
pass
def read(self):
return 'content'
def mocked_codecs_open(filename, mode, encoding, errors):
m = mock.Mock()
m.__enter__ = mock.Mock(return_value=MockedFile())
m.__exit__ = mock.Mock(return_value=False)
return m
def mocked_cpu_cgroup_open(filename, mode):
m = mock.Mock()
m.__enter__ = mock.Mock(return_value=MockedCpuCgroupFile())
m.__exit__ = mock.Mock(return_value=False)
print filename
return m
def mocked_memory_cgroup_open(filename, mode):
m = mock.Mock()
m.__enter__ = mock.Mock(return_value=MockedMemCgroupFile())
m.__exit__ = mock.Mock(return_value=False)
print filename
return m
partition = namedtuple('partition', 'device fstype mountpoint opts')
pdiskusage = namedtuple('pdiskusage', 'percent total')
meminfo = namedtuple('meminfo', 'rss vms')
ioinfo = namedtuple('ioinfo', 'read_bytes write_bytes')
psutils_memory = namedtuple('psutils_memory', 'used free buffers cached')
psutils_cpu = namedtuple(
'psutils_cpu',
'idle nice user iowait system irq steal')
psutils_net = namedtuple(
'psutils_net',
'bytes_sent bytes_recv packets_sent packets_recv errout errin')
def mocked_disk_partitions(all):
return [partition('/dev/a', 'type', '/a', 'opts'),
partition('/dev/b', 'type', '/b', 'opts')]
class Connection():
def __init__(self):
self.laddr = ['1.1.1.1', '22']
self.raddr = ['2.2.2.2', '22']
self.status = 'Established'
class Process():
def __init__(self, name):
self.name = name
self.cmdline = ['cmd']
self.pid = 123
self.status = 'Running'
self.cwd = '/bin'
self.ppid = 1
self.create_time = 1000
def num_threads(self):
return 1
def username(self):
return 'don quijote'
def get_open_files(self):
return []
def get_connections(self):
return [Connection()]
def get_memory_info(self):
return meminfo(10, 20)
def get_io_counters(self):
return ioinfo(10, 20)
def get_cpu_percent(self, interval):
return 30
def get_memory_percent(self):
return 30
STAT_DIR_MODE = 16749
def mocked_os_lstat(path):
print path
if path == '/':
return os_stat(STAT_DIR_MODE, 2, 3, 4, 5, 6, 7)
elif path == '/file1':
return os_stat(1, 2, 3, 4, 5, 6, 7)
elif path == '/file2':
return os_stat(1, 2, 3, 4, 5, 6, 7)
elif path == '/file3':
return os_stat(1, 2, 3, 4, 5, 6, 7)
elif path == '/dir':
return os_stat(STAT_DIR_MODE, 2, 3, |
Pharylon/PiClock | clock.py | Python | mit | 1,209 | 0.013234 | import datetime
import RPi.GPIO as GPIO
import time
GPIO.setmode(GPIO.BCM)
#GPIO.cleanup()
ypins = [17, 18, 27, 22, 23, 24, 25]
xpins = [5, 6, 12]
def | setArray(myInt, array):
asBinary = "{0:b}".format(myInt).zfill(7)
for i in range(0, 7):
if (asBinary[i] == "0"):
array[i] = False
else:
array[i] = True
for i in xpins:
GPIO.setup(i, GPIO.IN)
#GPIO.output(i, False)
for i in ypins:
GPIO.setup(i, GPIO.IN)
#GPIO.output(i, False)
grid = [[0 for x in range(7)] for x in range(3)]
'''
GPIO.setup(17, GPIO.OUT)
GPIO.setup(5, GPIO.OUT)
GPIO.out | put(17, False)
GPIO.output(5, True)
time.sleep(1)
'''
while True:
now = datetime.datetime.now()
setArray(now.hour, grid[0])
setArray(now.minute, grid[1])
setArray(now.second, grid[2])
for i in range(0, 7):
for j in range(0, 3):
if (grid[j][i]):
GPIO.setup(xpins[j], GPIO.OUT)
GPIO.setup(ypins[i], GPIO.OUT)
GPIO.output(xpins[j], True)
GPIO.output(ypins[i], False)
GPIO.setup(xpins[j], GPIO.IN)
GPIO.setup(ypins[i], GPIO.IN)
GPIO.cleanup()
|
maxim5/hyper-engine | hyperengine/tests/named_dict_test.py | Python | apache-2.0 | 3,262 | 0.007357 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'maxim'
import six
import unittest
import numpy as np
import hyperengine as hype
from hyperengine.base import NamedDict
class NamedDictTest(unittest.TestCase):
def test_embedded_dict(self):
spec = hype.spec.new({
'foo': {
'bar': {
'baz': 999
},
'baz': []
}
})
instance = self._instantiate(spec)
self.assertEqual(repr(instance), "{'foo': {'bar': {'baz': 999}, 'baz': []}}")
six.assertCountEqual(self, instance.foo.keys(), ['bar', 'baz'])
self.assertTrue('foo' in instance)
self.assertTrue('bar' in instance.foo)
self.assertTrue('baz' in instance.foo.bar)
self.assertEqual(type(instance), NamedDict)
self.assertEqual(type(instance.foo), NamedDict)
self.assertEqual(instance.foo.bar.baz, 999)
self.assertEqual(instance.foo.baz, [])
def test_embedded_list(self):
spec = hype.spec.new(
value = [[1], [[2]], (3, 4), {'foo': 5}],
)
instance = self._instantiate(spec)
self.assertEqual(type(instance), NamedDict)
self.assertEqual(type(instance.value), list)
self.assertEqual(type(instance.value[3]), NamedDict)
self.assertEqual(repr(instance), "{'value': [[1], [[2]], [3, 4], {'foo': 5}]}")
self.assertEqual(instance.value[3].foo, 5)
def test_dict_inside_list(self):
spec = hype.spec.new(
foo = [
{ 'bar': 0 },
{ 'bar': 1 },
{ 'bar': 2 },
]
)
instance = self._instantiate(spec)
self.assertEqual(type(instance), NamedDict)
self.assertEqual(type(instance.foo), list)
self.assertEqual(type(instance.foo[0]), NamedDict)
self.assertEqual(instance.foo[0].bar, 0)
self.assertEqual(instance.foo[1].bar, 1)
self.assertEqual(instance.foo[2].bar, 2)
def test_real(self):
hyper_params_spec = hype.spec.new(
learning_rate=10 ** hype.spec.uniform(-2, -3),
conv=hype.spec.new(
filters=[hype.spec.choice([20, 32, 48]), hype.spec.choice([64, 96, 128])],
residual=hype.spec.random_bit(),
),
dropout=hype.spec.uniform(0.5, 0.9),
)
instance = self._instantiate(hyper_params_spec)
self.assertEqual(repr(instance),
"{'conv': {'filters': [20, 64], 'residual': 0}, 'dropout': 0.500000, 'learning_rate': 0.001000}")
six.assertCountEqual(self, instance.keys(), ['learning_rate', 'conv', 'dropout'])
self.assertEqual(type(instance), NamedDict)
self.assertEqual(type(instance.conv), NamedDict)
self.assertEqual(instance.learning_rate, 0.001)
self.assertEqual(instance['learning_rate'], 0.001)
self.assertEqual(instance.get('le | arning_rate'), 0.001)
self.assertEqua | l(instance.foo, None)
self.assertEqual(instance.get('foo'), None)
self.assertEqual(instance.get('foo', 'bar'), 'bar')
six.assertCountEqual(self, instance.conv.keys(), ['filters', 'residual'])
self.assertEqual(instance.conv.filters, [20, 64])
self.assertEqual(instance.conv.filters[0], 20)
self.assertEqual(instance.conv.filters[1], 64)
def _instantiate(self, spec):
parsed = hype.spec.ParsedSpec(spec)
points = np.zeros([parsed.size()])
instance = parsed.instantiate(points)
return instance
|
cgwalters/anaconda | pyanaconda/regexes.py | Python | gpl-2.0 | 7,533 | 0.00292 | #
# regexes.py: anaconda regular expressions
#
# Copyright (C) 2013 Red Hat, Inc. All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Author(s): David Shea <dshea@redhat.com>
#
import re
# Validation expressions
# The full name field can contain anything except a colon.
# The empty string allowed.
GECOS_VALID = re.compile(r'^[^:]*$')
# Everyone has different ideas for what can go in a username. Here's ours:
# POSIX recommends that user and group names use only the characters within
# the portable filesystem character set (ASCII alnum plus dot, underscore,
# and hyphen), with the additional restriction that names not start with a
# hyphen. The Red Hat modification to shadow-utils starts with these rules
# and additionally allows a final $, because Samba.
#
# shadow-utils also defines length limits for names: 32 for group names,
# and UT_NAMESIZE for user names (which is defined as 32 bits/utmp.h). This
# expression captures all of that: the initial character, followed by either
# up to 30 portable characters and a do | llar sign or up to 31 portable characters,
# both for a maximum total of 32. The empty string is not allowed. "root" is not
# allowed.
# a base expression without anchors, helpful for building other expres | sions
# If the string is the right length to match "root", use a lookback expression
# to make sure it isn't.
_USERNAME_BASE = r'[a-zA-Z0-9._](([a-zA-Z0-9._-]{0,2})|([a-zA-Z0-9._-]{3}(?<!root))|([a-zA-Z0-9._-]{4,31})|([a-zA-Z0-9._-]{,30}\$))'
USERNAME_VALID = re.compile(r'^' + _USERNAME_BASE + '$')
GROUPNAME_VALID = USERNAME_VALID
# A comma-separated list of groups, validated as in GROUPNAME_VALID
# Any number of spaces are allowed at the start and end of the list and
# before and after the commas. The empty string is allowed.
GROUPLIST_SIMPLE_VALID = re.compile(r'^\s*(' + _USERNAME_BASE + r'(\s*,\s*' + _USERNAME_BASE + r')*)?\s*$')
# Parse the <gr-name> (<gid>) strings in the group list.
#
# The name match is non-greedy so that it doesn't match the whitespace betweeen
# the name and ID.
#
# There's some non-capturing groups ("clusters" in the perlre parlance) thrown
# in there, and, haha, wow, that's confusing to look at. There are two groups
# that actually end up in the match object, and they're named to try to make
# it a little easier: the first is "name", and the second is "gid".
#
# EVERY STRING IS MATCHED. This expression cannot be used for validation.
# If there is no GID, or the GID contains non-digits, everything except
# leading or trailing whitespace ends up in the name group. The result needs to
# be validated with GROUPNAME_VALID.
GROUPLIST_FANCY_PARSE = re.compile(r'^(?:\s*)(?P<name>.*?)\s*(?:\((?P<gid>\d+)\))?(?:\s*)$')
# IPv4 address without anchors
IPV4_PATTERN_WITHOUT_ANCHORS = r'(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)'
# IPv6 address without anchors
# Adapted from the IPv6address ABNF definition in RFC 3986, so it has all those
# IPv4 compatibility bits too. All groups are non-capturing to make it easy to
# use in an expression with groups and completely impossible to read
IPV6_PATTERN_WITHOUT_ANCHORS = r'(?:(?:(?:[0-9a-fA-F]{1,4}:){6})(?:(?:(?:[0-9a-fA-F]{1,4}):(?:[0-9a-fA-F]{1,4}))|(?:' + IPV4_PATTERN_WITHOUT_ANCHORS + r')))|' + \
r'(?:::(?:(?:[0-9a-fA-F]{1,4}:){5})(?:(?:(?:[0-9a-fA-F]{1,4}):(?:[0-9a-fA-F]{1,4}))|(?:' + IPV4_PATTERN_WITHOUT_ANCHORS + r')))|' + \
r'(?:(?:[0-9a-fA-F]{1,4})?::(?:(?:[0-9a-fA-F]{1,4}:){4})(?:(?:(?:[0-9a-fA-F]{1,4}):(?:[0-9a-fA-F]{1,4}))|(?:' + IPV4_PATTERN_WITHOUT_ANCHORS + r')))|' + \
r'(?:(?:(?:[0-9a-fA-F]{1,4}:){,1}(?:[0-9a-fA-F]{1,4}))?::(?:(?:[0-9a-fA-F]{1,4}:){3})(?:(?:(?:[0-9a-fA-F]{1,4}):(?:[0-9a-fA-F]{1,4}))|(?:' + IPV4_PATTERN_WITHOUT_ANCHORS + r')))|' + \
r'(?:(?:(?:[0-9a-fA-F]{1,4}:){,2}(?:[0-9a-fA-F]{1,4}))?::(?:(?:[0-9a-fA-F]{1,4}:){2})(?:(?:(?:[0-9a-fA-F]{1,4}):(?:[0-9a-fA-F]{1,4}))|(?:' + IPV4_PATTERN_WITHOUT_ANCHORS + r')))|' + \
r'(?:(?:(?:[0-9a-fA-F]{1,4}:){,3}(?:[0-9a-fA-F]{1,4}))?::(?:(?:[0-9a-fA-F]{1,4}:){1})(?:(?:(?:[0-9a-fA-F]{1,4}):(?:[0-9a-fA-F]{1,4}))|(?:' + IPV4_PATTERN_WITHOUT_ANCHORS + r')))|' + \
r'(?:(?:(?:[0-9a-fA-F]{1,4}:){,4}(?:[0-9a-fA-F]{1,4}))?::(?:(?:(?:[0-9a-fA-F]{1,4}):(?:[0-9a-fA-F]{1,4}))|(?:' + IPV4_PATTERN_WITHOUT_ANCHORS + r')))|' + \
r'(?:(?:(?:[0-9a-fA-F]{1,4}:){,5}(?:[0-9a-fA-F]{1,4}))?::(?:[0-9a-fA-F]{1,4}))|' + \
r'(?:(?:(?:[0-9a-fA-F]{1,4}:){,6}(?:[0-9a-fA-F]{1,4}))?::)'
# Hostname validation
# A hostname consists of sections separated by periods. Each of these sections
# must be between 1 and 63 characters, contain only alphanumeric characters or
# hyphens, and may not start or end with a hyphen. The whole string cannot start
# with a period, but it can end with one.
# This regex uses negative lookahead and lookback assertions to enforce the
# hyphen rules and make it way more confusing
HOSTNAME_PATTERN_WITHOUT_ANCHORS = r'(?:(?!-)[A-Za-z0-9-]{1,63}(?<!-)(?:\.(?!-)[A-Za-z0-9-]{1,63}(?<!-))*\.?)'
# URL Hostname
# This matches any hostname, IPv4 literal or properly encased IPv6 literal
# This does not match the "IPvFuture" form because come the hell on
URL_HOSTNAME_PATTERN_WITHOUT_ANCHORS = r'(?:' + IPV4_PATTERN_WITHOUT_ANCHORS + r')|(?:\[' + IPV6_PATTERN_WITHOUT_ANCHORS + r'])|(?:' + HOSTNAME_PATTERN_WITHOUT_ANCHORS + ')'
# Matches the "scheme" defined by RFC 3986
URL_SCHEME_PATTERN_WITHOUT_ANCHORS = r'[A-Za-z][A-Za-z0-9+.-]*'
# Matches any unreserved or percent-encoded character
URL_NORMAL_CHAR = r'[A-Za-z0-9._~-]|(?:%[0-9A-Fa-f]{2})'
# The above but also matches 'sub-delims' and :, @ and /
URL_PATH_CHAR = URL_NORMAL_CHAR + "|[!$&'()*+,;=:@/]"
# Parse a URL
# Parses a URL of the form [protocol://][username[:password]@]host[:port][path][?query][#fragment]
# into the following named groups:
# 1: protocol (e.g., http://)
# 2: username
# 3: password
# 4: host
# 5: port
# 6: path
# 7: query
# 8: fragment
URL_PARSE = re.compile(r'^(?P<protocol>' + URL_SCHEME_PATTERN_WITHOUT_ANCHORS + r'://)?' +
r'(?:(?P<username>(?:' + URL_NORMAL_CHAR + r')*)(?::(?P<password>(?:' + URL_NORMAL_CHAR + r')*))?@)?' +
r'(?P<host>' + URL_HOSTNAME_PATTERN_WITHOUT_ANCHORS + ')' +
r'(?::(?P<port>[0-9]+))?' +
r'(?P<path>/(?:' + URL_PATH_CHAR + r')*)?' +
r'(?:\?(?P<query>(?:' + URL_PATH_CHAR + r'|\?)*))?' +
r'(?:#(?P<fragment>(?:' + URL_PATH_CHAR + r'|\?)*))?$')
# Valid characters for repository names
REPO_NAME_VALID = re.compile(r'^[a-zA-Z0-9_.:-]+$')
# Product Version string, just the starting numbers like 21 or 21.1
VERSION_DIGITS = r'([\d.]+)'
|
nhenezi/pymapper | validator.py | Python | mit | 669 | 0.019432 | #! /usr/bin/python
from urlparse import urlparse
def fullPath(baseUrl, link):
# converts baseUrl string to ParseResult (urlparse object)
# for constructing simple links we can ignore everything after
# last slash, i.e. on http://test.com/super.ext?mag=ic
# relative links are constructed with http://test.com/ prefix
baseUrl = urlparse(baseUrl[:baseUrl.rfind('/') + 1])
if link.startswith('http'):
'''Full link'''
return link
if link.startswith('javascript'):
'''We are not interested in following these links'''
re | turn None
if link.starts | with('/'):
return "http://" + baseUrl.netloc + link
return baseUrl.geturl() + link
|
inducer/codery | pieces/admin.py | Python | mit | 1,253 | 0.002394 | from django.contrib import admin
from pieces.models import (
PieceTag,
Piece, Venue, Study, Keyword,
PieceToStudyAssociation)
# {{{ studies
class KeywordInline(admin.TabularInline):
model = Keyword
extra = 10
class StudyAdmin(admin.ModelAdmin):
list_display = ("id", "name", "start_date")
date_hierarchy = "start_date"
inlines = [KeywordInline]
admin.site.register(Study, StudyAdmin)
# }}}
# {{{ pieces
admin.site.register(PieceTag)
class PieceToStudyInline(admin.StackedInline):
model = PieceToStudyAssociation
extra = 2
class PieceAdmin(admin.ModelAdmin):
list_display = ("id", "title", "venue", "pub_date", "create_date")
list_filter = ("tags", "studies", "publication_type", "samples", "venue")
list_display_links = ("id", "title")
search_fields = ('title', 'con | tent', 'id')
date_hierarchy = "pub_date"
filter_horizontal = ("tags",)
save_on_top = True
inlines = [PieceToStudyInline]
admin.site.register(Piece, PieceAdmin)
# }}}
class VenueAdmin(admin.ModelAdmin):
list_display = ("id", "name")
list_display_links = ("id", "name")
search_fields = ("name", "id")
admin.site.register(Venue, VenueAdmin)
# vim: foldmet | hod=marker
|
Yalnix/BarryBot | config.py | Python | mpl-2.0 | 290 | 0.003448 | # Details used to log into Reddit.
reddit_client_id = ""
reddit_client_secret = ""
r | eddit_user = ""
reddit_pass = ""
# Auth key used to log into Discord.
discord_key = ""
# Command/feature modules.
module_names = (
"default",
)
# Do not c | hange this value!
config_version = 2
|
AutorestCI/azure-sdk-for-python | azure-mgmt-compute/azure/mgmt/compute/v2017_03_30/models/virtual_machine_scale_set_managed_disk_parameters.py | Python | mit | 1,259 | 0.000794 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------- | -----------------
from msrest.serialization import Model
class VirtualMachineScaleSetManagedDiskParameters(Model):
"""Describes the parameters of a ScaleSet managed disk.
:param storage_account_type: Specifies the storage account type for the
managed disk. Possible values are: Standard_LRS or Premium_LRS. Possible
values include: 'Standard_LRS', 'Premium_LRS'
:type storage_account_type: str or
~azure.mgmt.compute.v2017_03_30.models.StorageAccountTypes
"""
_attrib | ute_map = {
'storage_account_type': {'key': 'storageAccountType', 'type': 'StorageAccountTypes'},
}
def __init__(self, storage_account_type=None):
super(VirtualMachineScaleSetManagedDiskParameters, self).__init__()
self.storage_account_type = storage_account_type
|
benagricola/exabgp | qa/self/operational/operational-send.py | Python | bsd-3-clause | 1,319 | 0.015163 | #!/usr/bin/env python
import os
import sys
import time
# When the parent dies we are seeing continual newlines, so we only access so many before stopping
counter = 1
# sleep a little bit or we will never see the asm in the configuration file
# and the message | received just before we go to the established loop will be printed twice
time.sleep(1)
print 'announce operational rpcq afi ipv4 safi unicast sequence %d' % counter
print 'announce operational rpcp afi ipv4 safi unicast se | quence %d counter 200' % counter
time.sleep(1)
counter += 1
print 'announce operational apcq afi ipv4 safi unicast sequence %d' % counter
print 'announce operational apcp afi ipv4 safi unicast sequence %d counter 150' % counter
time.sleep(1)
counter += 1
print 'announce operational lpcq afi ipv4 safi unicast sequence %d' % counter
print 'announce operational lpcp afi ipv4 safi unicast sequence %d counter 250' % counter
time.sleep(1)
while True:
try:
time.sleep(1)
if counter % 2:
print 'announce operational adm afi ipv4 safi unicast advisory "this is dynamic message #%d"' % counter
sys.stdout.flush()
else:
print 'announce operational asm afi ipv4 safi unicast advisory "we SHOULD not send asm from the API"'
sys.stdout.flush()
counter += 1
except KeyboardInterrupt:
pass
except IOError:
break
|
jcdouet/pyDatalog | pyDatalog/examples/datalog.py | Python | lgpl-2.1 | 2,172 | 0.01151 | """
This file shows how to use pyDatalog using facts stored in datalog.
It has 3 parts:
1. create facts for 2 employees in the datalog engine
2. define business rules
3. Query the datalog engine
"""
from pyDatalog import pyDatalog
""" 1. create facts for 3 employees in the datalog engine """
pyDatalog.create_atoms('salary', 'manager')
# John is the manager of Mary, who is the manager of Sam
+ (salary['John'] == 6800)
+ (manager['Mary'] == 'John')
+ (salary['Mary'] == 6300)
+ (manager['Sam'] == 'Mary')
+ (salary['Sam'] == 5900)
""" 2. define business rules """
pyDatalog.create_atoms('salary_class', 'indirect_manager', 'report_count', 'budget', 'lowest',
'X', 'Y', 'Z', 'N')
# the salary class of employee X is computed as a function of his/her salary
salary_class[X] = salary[X]//1000
# all the indirect managers of employee X are derived from his manager, recursively
indirect_manager(X,Y) <= (manager[X] == Y) & (Y != None)
indirect_manager(X,Y) <= (manager[X] == Z) & indirect_manager(Z,Y) & (Y != None)
# count the number of reports of X
(report_count[X] == len_(Y)) <= indirect_manager(Y,X)
""" 3. Query the datalog engine """
# | what is the salary class of John ?
print(salary_class['John'] == Y) # Y is 6
# who has a salary of 6300 ?
print(salary[X] == 6300) # X is Mary
# who are the indirect managers of Mary ?
print(indirect_manager('Mary', X)) # X is John
# Who are the employees of John with a salary below 6000 ?
print((salary[X] < 6000) & indirect_manager(X, 'John')) # X is Sam
| # who is his own indirect manager ?
print(indirect_manager('X', X)) # prints []
# who has 2 reports ?
print(report_count[X] == 2) # X is John
# what is the total salary of the employees of John ?
(budget[X] == sum_(N, for_each=Y)) <= (indirect_manager(Y, X)) & (salary[Y]==N)
print(budget['John']==N) # N is 12200
# who has the lowest salary ?
(lowest[1] == min_(X, order_by=N)) <= (salary[X]==N)
print(lowest[1]==X) # X is Sam
# start the datalog console, for interactive querying
from pyDatalog.examples import console
console = console.datalogConsole(locals=locals())
console.interact('Type exit() when done.')
|
VitalPet/addons-onestein | account_activity_based_costing/tests/__init__.py | Python | agpl-3.0 | 189 | 0 | # - | *- coding: utf-8 -*-
# Copyright 2017 Onestein (<http://www.onestein.eu>)
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
f | rom . import test_activity_based_costing
|
zedshaw/learn-python3-thw-code | ex51/gothonweb/form_test.py | Python | mit | 379 | 0.005277 | from flask import Flask
fro | m flask import render_template
from flask import request
app = Flask(__name__)
@app.route("/")
def index():
name = request.args.get('name', 'Nobody')
if name:
greeting = f"Hello, {name}"
else:
greet | ing = "Hello World"
return render_template("index.html", greeting=greeting)
if __name__ == "__main__":
app.run()
|
meine-stadt-transparent/meine-stadt-transparent | mainapp/migrations/0025_auto_20190917_2038.py | Python | mit | 564 | 0 | # Generated by Django 2.1.11 on 2019-09-17 18:38
from django.db import migrations, models
class Migr | ation(migrations.Migration):
dependencies = [
('mainapp', '0024_merge_20190405_0941'),
]
operations = [
migrations.AlterField(
model_name='historicalmeeting',
name='cancelled',
field=models.BooleanField(default=False),
),
migrations.AlterField(
model_name='meeting',
name='cancelled',
field=models.BooleanField(default=False),
),
| ]
|
tux-00/ansible | lib/ansible/plugins/connection/chroot.py | Python | gpl-3.0 | 7,616 | 0.002495 | # Based on local.py (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
# (c) 2013, Maykel Moya <mmoya@speedyrails.com>
# (c) 2015, Toshio Kuratomi <tkuratomi@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import distutils.spawn
import os
import os.path
import subprocess
import traceback
from ansible import constants as C
from ansible.errors import AnsibleError
from ansible.module_utils.basic import is_executable
from ansible.module_utils.six.moves import shlex_quote
from ansible.module_utils._text import to_bytes
from ansible.plugins.connection import ConnectionBase, BUFSIZE
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
class Connection(ConnectionBase):
''' Local chroot based connections '''
transport = 'chroot'
has_pipelining = True
# su currently has an undiagnosed issue with calculating the file
# checksums (so copy, for instance, doesn't work right)
# Have to look into that before re-enabling this
become_methods = frozenset(C.BECOME_METHODS).difference(('su',))
def __init__(self, play_context, new_stdin, *args, **kwargs):
super(Connection, self).__init__(play_context, new_stdin, *args, **kwargs)
self.chroot = self._play_context.remote_addr
if os.geteuid() != 0:
raise AnsibleError("chroot connection requires running as root")
# we're running as root on the local system so do some
# trivial checks for ensuring 'host' is actually a chroot'able dir
if not os.path.isdir(self.chroot):
raise AnsibleError("%s is not a directory" % self.chroot)
chrootsh = os.path.join(self.chroot, 'bin/sh')
# Want to check for a usable bourne shell inside the chroot.
# is_executable() == True is sufficient. For symlinks it
# gets really complicated really fast. So we punt on finding that
# out. As long as it's a symlink we assume that it will work
if not (is_executable(chrootsh) or (os.path.lexists(chrootsh) and os.path.islink(chrootsh))):
raise AnsibleError("%s does not look like a chrootable dir (/bin/sh missing)" % self.chroot)
self.chroot_cmd = distutils.spawn.find_executable('chroot')
if not self.chroot_cmd:
raise AnsibleError("chroot command not found in PATH")
def _connect(self):
''' connect to the chroot; nothing to do here '''
super(Connection, self)._connect()
if not self._connected:
display.vvv("THIS IS A LOCAL CHROOT DIR", host=self.chroot)
self._connected = True
def _buffered_exec_command(self, cmd, stdin=subprocess.PIPE):
''' run a command on the chroot. This | is only needed for implementing
put_file() get_file() so that we don't have to read the whole file
into memory.
compared to exec_command() it looses some niceties like being able to
return the process's exit code immediately.
'''
executable = C.DEFAULT_EXECUTABLE.split()[0] if C.DEFAULT_EXECUTABLE else '/bin/sh'
local_cmd = [self.chroot_cmd, self.chroot, executable, '-c', cmd]
display.vvv("EXEC %s" % (local_cmd), host=self.chroot)
| local_cmd = [to_bytes(i, errors='surrogate_or_strict') for i in local_cmd]
p = subprocess.Popen(local_cmd, shell=False, stdin=stdin,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
return p
def exec_command(self, cmd, in_data=None, sudoable=False):
''' run a command on the chroot '''
super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable)
p = self._buffered_exec_command(cmd)
stdout, stderr = p.communicate(in_data)
return (p.returncode, stdout, stderr)
def _prefix_login_path(self, remote_path):
''' Make sure that we put files into a standard path
If a path is relative, then we need to choose where to put it.
ssh chooses $HOME but we aren't guaranteed that a home dir will
exist in any given chroot. So for now we're choosing "/" instead.
This also happens to be the former default.
Can revisit using $HOME instead if it's a problem
'''
if not remote_path.startswith(os.path.sep):
remote_path = os.path.join(os.path.sep, remote_path)
return os.path.normpath(remote_path)
def put_file(self, in_path, out_path):
''' transfer a file from local to chroot '''
super(Connection, self).put_file(in_path, out_path)
display.vvv("PUT %s TO %s" % (in_path, out_path), host=self.chroot)
out_path = shlex_quote(self._prefix_login_path(out_path))
try:
with open(to_bytes(in_path, errors='surrogate_or_strict'), 'rb') as in_file:
try:
p = self._buffered_exec_command('dd of=%s bs=%s' % (out_path, BUFSIZE), stdin=in_file)
except OSError:
raise AnsibleError("chroot connection requires dd command in the chroot")
try:
stdout, stderr = p.communicate()
except:
traceback.print_exc()
raise AnsibleError("failed to transfer file %s to %s" % (in_path, out_path))
if p.returncode != 0:
raise AnsibleError("failed to transfer file %s to %s:\n%s\n%s" % (in_path, out_path, stdout, stderr))
except IOError:
raise AnsibleError("file or module does not exist at: %s" % in_path)
def fetch_file(self, in_path, out_path):
''' fetch a file from chroot to local '''
super(Connection, self).fetch_file(in_path, out_path)
display.vvv("FETCH %s TO %s" % (in_path, out_path), host=self.chroot)
in_path = shlex_quote(self._prefix_login_path(in_path))
try:
p = self._buffered_exec_command('dd if=%s bs=%s' % (in_path, BUFSIZE))
except OSError:
raise AnsibleError("chroot connection requires dd command in the chroot")
with open(to_bytes(out_path, errors='surrogate_or_strict'), 'wb+') as out_file:
try:
chunk = p.stdout.read(BUFSIZE)
while chunk:
out_file.write(chunk)
chunk = p.stdout.read(BUFSIZE)
except:
traceback.print_exc()
raise AnsibleError("failed to transfer file %s to %s" % (in_path, out_path))
stdout, stderr = p.communicate()
if p.returncode != 0:
raise AnsibleError("failed to transfer file %s to %s:\n%s\n%s" % (in_path, out_path, stdout, stderr))
def close(self):
''' terminate the connection; nothing to do here '''
super(Connection, self).close()
self._connected = False
|
metaperl/metaperl-proxy | myapp.py | Python | mit | 797 | 0.007528 | # imports
## core
import importlib
import logging
import os
import pprint
import sys
import StringIO
## 3rd party
import cherrypy
import requests
## local
def full_path(*extra):
return os.path.join(os.path.dirname(__file__), *extra)
sys.path.insert(0, full_path())
import db
logging.basicConfig()
sorry = 'This is only for US Citizens. Sorry and thank you for your time.'
class Ro | ot(object):
@cherrypy.expose
def index(self, tag):
redirect_url = db.urls[tag]
ip = | cherrypy.request.headers['Remote-Addr']
request_url = 'http://ipinfo.io/{0}/country'.format(ip)
r = requests.get(request_url)
country = r.text.strip()
if country == 'US':
raise cherrypy.HTTPRedirect(redirect_url)
else:
return sorry
|
emillynge/pyangular | backend/googleauth.py | Python | apache-2.0 | 12,555 | 0.000956 | # Copyright 2016 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Google ID Token helpers."""
import asyncio
import json
import urllib.parse
from datetime import timedelta, datetime
from http import HTTPStatus
from typing import NamedTuple, Awaitable
import time
import aiohttp
import u | vloop
from aiohttp import ClientSession, ClientResponse
from google.auth import exceptions, _helpers
from google.auth import jwt
from google.oauth2 import service_account
from google.oauth2._client import _JWT_GRANT_TYPE, _URLENCODED_CONTENT_TYPE, _handle_error_res | ponse, _parse_expiry, \
_REFRESH_GRANT_TYPE
from google.oauth2.service_account import Credentials as _Credentials, _DEFAULT_TOKEN_LIFETIME_SECS
# The URL that provides public certificates for verifying ID tokens issued
# by Google's OAuth 2.0 authorization server.
_GOOGLE_OAUTH2_CERTS_URL = 'https://www.googleapis.com/oauth2/v1/certs'
# The URL that provides public certificates for verifying ID tokens issued
# by Firebase and the Google APIs infrastructure
_GOOGLE_APIS_CERTS_URL = (
'https://www.googleapis.com/robot/v1/metadata/x509'
'/securetoken@system.gserviceaccount.com')
CERTS_CACHE_TTL = timedelta(seconds=300)
CERTS_CACHE = dict()
async def _fetch_certs(session: ClientSession, certs_url):
"""Fetches certificates.
If non-expired certificates exists in CERT_CACHE these are returned.
Otherwise new certs are fetched from certs_url and placed into cache.
Google-style cerificate endpoints return JSON in the format of
``{'key id': 'x509 certificate'}``.
Args:
session (aiohhtp.Session): The object used to make
HTTP requests.
certs_url (str): The certificate endpoint URL.
Returns:
Mapping[str, str]: A mapping of public key ID to x.509 certificate
data.
"""
try:
certs, expiry = CERTS_CACHE[certs_url]
except KeyError:
pass
else:
if datetime.now() > expiry:
del CERTS_CACHE[certs_url]
else:
return certs
async with session.get(certs_url) as response:
# data = await resp.json()
if response.status != HTTPStatus.OK:
raise exceptions.TransportError(
'Could not fetch certificates at {}'.format(certs_url))
certs = await response.json()
CERTS_CACHE[certs_url] = (certs, datetime.now() + CERTS_CACHE_TTL)
return certs
async def verify_token(id_token, session: ClientSession, audience=None,
certs_url=_GOOGLE_OAUTH2_CERTS_URL):
"""Verifies an ID token and returns the decoded token.
Args:
id_token (Union[str, bytes]): The encoded token.
session (aiohhtp.Session): The object used to make
HTTP requests.
audience (str): The audience that this token is intended for. If None
then the audience is not verified.
certs_url (str): The URL that specifies the certificates to use to
verify the token. This URL should return JSON in the format of
``{'key id': 'x509 certificate'}``.
Returns:
Mapping[str, Any]: The decoded token.
"""
certs = await _fetch_certs(session, certs_url)
return jwt.decode(id_token, certs=certs, audience=audience)
def verify_oauth2_token(id_token, session: ClientSession, audience=None):
"""Verifies an ID Token issued by Google's OAuth 2.0 authorization server.
Args:
id_token (Union[str, bytes]): The encoded token.
session (aiohhtp.Session): The object used to make
HTTP requests.
audience (str): The audience that this token is intended for. This is
typically your application's OAuth 2.0 client ID. If None then the
audience is not verified.
Returns:
Mapping[str, Any]: The decoded token.
"""
return verify_token(
id_token, session, audience=audience,
certs_url=_GOOGLE_OAUTH2_CERTS_URL)
class TokenInfo(NamedTuple):
uid: str
email: str
expiry: int
email_verified: bool
scope: str
aud: str
azp: str
offline: bool
@property
def expires_in(self) -> int:
return self.expiry - time.time()
async def verify_oauth2_token_simple(token, session: ClientSession, audience, scopes: list = None) -> Awaitable[TokenInfo]:
async with session.get('https://www.googleapis.com/oauth2/v3/tokeninfo',
params={'access_token': token}) as resp:
resp: ClientResponse
if resp.status != HTTPStatus.OK:
c = await resp.content.read()
raise ValueError(resp.reason)
data = await resp.json()
if data['aud'] != audience:
raise ValueError('Token was not issued for this application')
if scopes and not all(scope in data['scope'] for scope in scopes):
raise ValueError("Token does not provide requested scopes.")
data['expiry'] = int(data.pop('exp'))
data['uid'] = data.pop('sub')
data['email_verified'] = data.pop('email_verified') == 'true'
data['offline'] = data.pop('access_type') == "offline"
data.pop('expires_in')
return TokenInfo(**data)
def verify_firebase_token(id_token, session: ClientSession, audience=None):
"""Verifies an ID Token issued by Firebase Authentication.
Args:
id_token (Union[str, bytes]): The encoded token.
session (aiohhtp.Session): The object used to make
HTTP requests.
audience (str): The audience that this token is intended for. This is
typically your Firebase application ID. If None then the audience
is not verified.
Returns:
Mapping[str, Any]: The decoded token.
"""
return verify_token(
id_token, session, audience=audience, certs_url=_GOOGLE_APIS_CERTS_URL)
async def _token_endpoint_request(session: ClientSession, token_uri, body):
"""Makes a request to the OAuth 2.0 authorization server's token endpoint.
Args:
request (google.auth.transport.Request): A callable used to make
HTTP requests.
token_uri (str): The OAuth 2.0 authorizations server's token endpoint
URI.
body (Mapping[str, str]): The parameters to send in the request body.
Returns:
Mapping[str, str]: The JSON-decoded response data.
Raises:
google.auth.exceptions.RefreshError: If the token endpoint returned
an error.
"""
body = urllib.parse.urlencode(body)
headers = {
'content-type': _URLENCODED_CONTENT_TYPE,
}
async with session.post(url=token_uri, headers=headers, data=body) as response:
response_body = await response.content.read()
if response.status != HTTPStatus.OK:
_handle_error_response(response_body)
response_data = json.loads(response_body)
return response_data
async def jwt_grant(session: ClientSession, token_uri, assertion):
"""Implements the JWT Profile for OAuth 2.0 Authorization Grants.
For more details, see `rfc7523 section 4`_.
Args:
request (google.auth.transport.Request): A callable used to make
HTTP requests.
token_uri (str): The OAuth 2.0 authorizations server's token endpoint
URI.
assertion (str): The OAuth 2.0 assertion.
Returns:
Tuple[str, Optional[datetime], Mapping[str, str]]: The access token,
expiration, and additional data returned by the token endpoint.
Raises:
google.auth.exceptions.RefreshError: If the token endpoint returned
an error.
.. _rfc7523 section 4: ht |
JackDanger/sentry | src/sentry/south_migrations/0029_auto__del_field_projectmember_is_superuser__del_field_projectmember_pe.py | Python | bsd-3-clause | 12,133 | 0.008077 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'ProjectMember.is_superuser'
db.delete_column('sentry_projectmember', 'is_superuser')
# Deleting field 'ProjectMember.permissions'
db.delete_column('sentry_projectmember', 'permissions')
# Adding field 'ProjectMember.type'
db.add_column('sentry_projectmember', 'type', self.gf('django.db.models.fields.IntegerField')(default=0), keep_default=False)
def backwards(self, orm):
# Adding field 'ProjectMember.is_superuser'
db.add_column('sentry_projectmember', 'is_superuser', self.gf('django.db.models.fields.BooleanField')(default=False), keep_default=False)
# User chose to not deal with backwards NULL issues for 'ProjectMember.permissions'
raise RuntimeError("Cannot reverse this migration. 'ProjectMember.permissions' and its values cannot be restored.")
# Deleting field 'ProjectMember.type'
db.delete_column('sentry_projectmember', 'type')
models = {
'sentry.user': {
'Meta': {'object_name': 'User', 'db_table': "'auth_user'"},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'sentry.event': {
'Meta': {'object_name': 'Event', 'db_table': "'sentry_message'"},
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'culprit': ('djang | o.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'db_column': "'view'", 'blank': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_ind | ex': 'True'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True', 'db_column': "'message_id'"}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'blank': 'True', 'related_name': "'event_set'", 'null': 'True', 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}),
'logger': ('django.db.models.fields.CharField', [], {'default': "'root'", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'server_name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'db_index': 'True'}),
'site': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'db_index': 'True'}),
'time_spent': ('django.db.models.fields.FloatField', [], {'null': 'True'})
},
'sentry.filtervalue': {
'Meta': {'unique_together': "(('project', 'key', 'value'),)", 'object_name': 'FilterValue'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.group': {
'Meta': {'unique_together': "(('project', 'logger', 'culprit', 'checksum'),)", 'object_name': 'Group', 'db_table': "'sentry_groupedmessage'"},
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'culprit': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'db_column': "'view'", 'blank': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}),
'logger': ('django.db.models.fields.CharField', [], {'default': "'root'", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'time_spent_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'time_spent_total': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1', 'db_index': 'True'})
},
'sentry.messagecountbyminute': {
'Meta': {'unique_together': "(('project', 'group', 'date'),)", 'object_name': 'MessageCountByMinute'},
'date': ('django.db.models.fields.DateTimeField', [], {}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'time_spent_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'time_spent_total': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'sentry.messagefiltervalue': {
'Meta': {'unique_together': "(('project', 'key', |
misaksen/umediaproxy | mediaproxy/interfaces/__init__.py | Python | gpl-2.0 | 113 | 0.00885 | # Copyright (C) 2008 AG-Projects.
#
"""Interfaces between Mediaproxy a | nd the other compon | ents in the system"""
|
mabuchilab/Instrumental | instrumental/drivers/cameras/_pixelfly/_cffi_build/build_errortext.py | Python | gpl-3.0 | 586 | 0 | import sys
import os.path
import setuptools # Fix distutils issues
from cffi import FFI
ffi = FFI()
mod_name = 'instrumental.drivers.cameras._ | pixelfly.errortext'
if sys.platform.startswith('win'):
ffi.set_source(mod_name, """
#define PCO_ERR_H_CREATE_OBJECT
#define PCO_ERRT_H_CREATE_OBJECT
#include <windows.h>
#include "PCO_errt.h"
""", include_dirs=[os.path.dirname(__file__)])
ffi.cdef("void PCO_GetErrorText(DWORD dwerr, char* pbuf, DWORD dwlen);")
els | e:
ffi.set_source(mod_name, '')
if __name__ == '__main__':
ffi.compile()
|
skraghu/softlayer-python | SoftLayer/fixtures/SoftLayer_Virtual_Guest_Block_Device_Template_Group.py | Python | mit | 820 | 0 | IMAGES = [{
'accountId': 1234,
'blockDevices': [],
'createDate': '2013-12-05T2 | 1:53:03-06:00',
'globalIdentifier': '0B5DEAF4-643D-46CA-A695-CECBE8832C9D',
'id': 100,
'name': 'test_image',
'parentId': '',
'publicFlag': True,
}, {
'accountId': 1234,
'blockDevices': [],
'createDate': '2013-12-05T21:53:03-06:00',
'globalIdentifier': 'EB3841 | 4C-2AB3-47F3-BBBD-56A5F689620B',
'id': 101,
'name': 'test_image2',
'parentId': '',
'publicFlag': True,
}]
getObject = IMAGES[0]
getPublicImages = IMAGES
deleteObject = {}
editObject = True
setTags = True
createFromExternalSource = [{
'createDate': '2013-12-05T21:53:03-06:00',
'globalIdentifier': '0B5DEAF4-643D-46CA-A695-CECBE8832C9D',
'id': 100,
'name': 'test_image',
}]
copyToExternalSource = True
|
wfxiang08/ansible | lib/ansible/executor/playbook_executor.py | Python | gpl-3.0 | 12,206 | 0.004424 | # (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import getpass
import locale
import signal
import sys
from ansible import constants as C
from ansible.errors import *
from ansible.executor.task_queue_manager import TaskQueueManager
from ansible.playbook import Playbook
from ansible.plugins import module_loader
from ansible.template import Templar
from ansible.utils.color import colorize, hostcolor
from ansible.utils.debug import debug
class PlaybookExecutor:
'''
This is the primary class for executing playbooks, and thus the
basis for bin/ansible-playbook operation.
'''
def __init__(self, playbooks, inventory, variable_manager, loader, display, options, passwords):
self._playbooks = playbooks
self._inventory = inventory
self._variable_manager = variable_manager
self._loader = loader
self._display = display
self._options = options
self.passwords = passwords
# make sure the module path (if specified) is parsed and
# added to the module_loader object
if options.module_path is not None:
for path in options.module_path.split(os.pathsep):
module_loader.add_directory(path)
if options.listhosts or options.listtasks or options.listtags or options.syntax:
self._tqm = None
else:
self._tqm = TaskQueueManager(inventory=inventory, variable_manager=variable_manager, loader=loader, display=display, options=options, passwords=self.passwords)
def run(self):
'''
Run the given playbook, based on the settings in the play which
may limit the runs to serialized groups, etc.
'''
signal.signal(signal.SIGINT, self._cleanup)
result = 0
entrylist = []
entry = {}
try:
for playbook_path in self._playbooks:
pb = Playbook.load(playbook_path, variable_manager=self._variable_manager, loader=self._loader)
self._inventory.set_playbook_basedir(os.path.dirname(playbook_path))
if self._tqm is None: # we are doing a listing
entry = {'playbook': playbook_path}
entry['plays'] = []
i = 1
plays = pb.get_plays()
self._display.vv('%d plays in %s' % (len(plays), playbook_path))
for play in plays:
# clear any filters which may have been applied to the inventory
self._inventory.remove_restriction()
if play.vars_prompt:
for var in play.vars_prompt:
| if 'name' not in var:
raise AnsibleError("'vars_prompt' item is missing 'name:'", obj=play._ds)
vname = var['name']
| prompt = var.get("prompt", vname)
default = var.get("default", None)
private = var.get("private", True)
confirm = var.get("confirm", False)
encrypt = var.get("encrypt", None)
salt_size = var.get("salt_size", None)
salt = var.get("salt", None)
if vname not in play.vars:
self._tqm.send_callback('v2_playbook_on_vars_prompt', vname, private, prompt, encrypt, confirm, salt_size, salt, default)
play.vars[vname] = self._do_var_prompt(vname, private, prompt, encrypt, confirm, salt_size, salt, default)
# Create a temporary copy of the play here, so we can run post_validate
# on it without the templating changes affecting the original object.
all_vars = self._variable_manager.get_vars(loader=self._loader, play=play)
templar = Templar(loader=self._loader, variables=all_vars)
new_play = play.copy()
new_play.post_validate(templar)
if self._options.syntax:
continue
if self._tqm is None:
# we are just doing a listing
pname = new_play.get_name().strip()
if pname == 'PLAY: <no name specified>':
pname = 'PLAY: #%d' % i
p = { 'name': pname }
if self._options.listhosts:
p['pattern']=play.hosts
p['hosts']=set(self._inventory.get_hosts(new_play.hosts))
#TODO: play tasks are really blocks, need to figure out how to get task objects from them
elif self._options.listtasks:
p['tasks'] = []
for task in play.get_tasks():
p['tasks'].append(task)
#p['tasks'].append({'name': task.get_name().strip(), 'tags': task.tags})
elif self._options.listtags:
p['tags'] = set(new_play.tags)
for task in play.get_tasks():
p['tags'].update(task)
#p['tags'].update(task.tags)
entry['plays'].append(p)
else:
# make sure the tqm has callbacks loaded
self._tqm.load_callbacks()
# we are actually running plays
for batch in self._get_serialized_batches(new_play):
if len(batch) == 0:
self._tqm.send_callback('v2_playbook_on_play_start', new_play)
self._tqm.send_callback('v2_playbook_on_no_hosts_matched')
break
# restrict the inventory to the hosts in the serialized batch
self._inventory.restrict_to_hosts(batch)
# and run it...
result = self._tqm.run(play=play)
# if the last result wasn't zero, break out of the serial batch loop
if result != 0:
break
# if the last result wasn't zero, break out of the play loop
if result != 0:
break
i = i + 1 # per play
if entry:
entrylist.append(entry) # per playbook
# if the last result wasn't zero, break out of the playbook file name loop
if result != 0:
break
if entrylist:
return entrylist
finally:
if self._tqm is not None:
self._cleanup()
if self._options.syntax:
self.display.display("No issues encountered")
return result
# FIXME: this stat summary stuff should be cleaned up and moved
# to a new method, if it even belongs here...
self._display.banner("PLAY RECAP")
|
kidchang/compassv2-api | compass/db/api/database.py | Python | apache-2.0 | 9,312 | 0.000537 | # Copyright 2014 Huawei Technologies Co. Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Provider interface to manipulate database."""
import logging
import netaddr
from contextlib import contextmanager
from sqlalchemy import create_engine
from sqlalchemy.orm import scoped_session
from sqlalchemy.orm import sessionmaker
from threading import local
from compass.db import exception
from compass.db import models
from compass.utils import setting_wrapper as setting
ENGINE = None
SESSION = sessionmaker(autocommit=False, autoflush=False)
SCOPED_SESSION = None
SESSION_HOLDER = local()
def init(database_url):
"""Initialize database.
:param database_url: string, database url.
"""
global ENGINE
global SCOPED_SESSION
ENGINE = create_engine(database_url, convert_unicode=True)
SESSION.configure(bind=ENGINE)
SCOPED_SESSION = scoped_session(SESSION)
models.BASE.query = SCOPED_SESSION.query_property()
init(setting.SQLALCHEMY_DATABASE_URI)
def in_session():
"""check if in database session scope."""
if hasattr(SESSION_HOLDER, 'session'):
return True
else:
return Fals | e
@contextmanager
def session():
"""database session scope.
.. note::
To operate database, it should be called in database session.
"""
import traceback
if has | attr(SESSION_HOLDER, 'session'):
logging.error('we are already in session')
raise exception.DatabaseException('session already exist')
else:
new_session = SCOPED_SESSION()
setattr(SESSION_HOLDER, 'session', new_session)
try:
yield new_session
new_session.commit()
except Exception as error:
new_session.rollback()
logging.error('failed to commit session')
logging.exception(error)
if isinstance(error, exception.DatabaseException):
raise error
else:
raise exception.DatabaseException(str(error))
finally:
new_session.close()
SCOPED_SESSION.remove()
delattr(SESSION_HOLDER, 'session')
def current_session():
"""Get the current session scope when it is called.
:return: database session.
"""
try:
return SESSION_HOLDER.session
except Exception as error:
logging.error('It is not in the session scope')
logging.exception(error)
if isinstance(error, exception.DatabaseException):
raise error
else:
raise exception.DatabaseException(str(error))
def _setup_user_table(user_session):
"""Initialize default user."""
logging.info('setup user table')
from compass.db.api import user
user.add_user_internal(
user_session,
setting.COMPASS_ADMIN_EMAIL,
setting.COMPASS_ADMIN_PASSWORD,
is_admin=True
)
def _setup_permission_table(permission_session):
"""Initialize permission table."""
logging.info('setup permission table.')
from compass.db.api import permission
permission.add_permissions_internal(
permission_session
)
def _setup_switch_table(switch_session):
"""Initialize switch table."""
logging.info('setup switch table')
from compass.db.api import switch
switch.add_switch_internal(
switch_session, long(netaddr.IPAddress(setting.DEFAULT_SWITCH_IP))
)
def _setup_os_installers(installer_session):
"""Initialize os_installer table."""
logging.info('setup os installer table')
from compass.db.api import installer
installer.add_os_installers_internal(
installer_session
)
def _setup_package_installers(installer_session):
"""Initialize package_installer table."""
logging.info('setup package installer table')
from compass.db.api import installer
installer.add_package_installers_internal(
installer_session
)
def _setup_oses(os_session):
"""Initialize os table."""
logging.info('setup os table')
from compass.db.api import adapter
adapter.add_oses_internal(
os_session
)
def _setup_distributed_systems(distributed_system_session):
"""Initialize distributed system table."""
logging.info('setup distributed system table')
from compass.db.api import adapter
adapter.add_distributed_systems_internal(
distributed_system_session
)
def _setup_os_adapters(adapter_session):
"""Initialize os adapter table."""
logging.info('setup os adapter table')
from compass.db.api import adapter
adapter.add_os_adapters_internal(
adapter_session)
def _setup_package_adapters(adapter_session):
"""Initialize package adapter table."""
logging.info('setup package adapter table')
from compass.db.api import adapter
adapter.add_package_adapters_internal(
adapter_session)
def _setup_adapters(adapter_session):
"""Initialize adapter table."""
logging.info('setup adapter table')
from compass.db.api import adapter
adapter.add_adapters_internal(adapter_session)
def _setup_os_fields(field_session):
"""Initialize os field table."""
logging.info('setup os field table')
from compass.db.api import metadata
metadata.add_os_field_internal(field_session)
def _setup_package_fields(field_session):
"""Initialize package field table."""
logging.info('setup package field table')
from compass.db.api import metadata
metadata.add_package_field_internal(field_session)
def _setup_os_metadatas(metadata_session):
"""Initialize os metadata table."""
logging.info('setup os metadata table')
from compass.db.api import metadata
metadata.add_os_metadata_internal(metadata_session)
def _setup_package_metadatas(metadata_session):
"""Initialize package metadata table."""
logging.info('setup package metadata table')
from compass.db.api import metadata
metadata.add_package_metadata_internal(metadata_session)
def _setup_package_adapter_roles(role_session):
"""Initialize package adapter role table."""
logging.info('setup package adapter role table')
from compass.db.api import adapter
adapter.add_roles_internal(role_session)
def create_db():
"""Create database."""
models.BASE.metadata.create_all(bind=ENGINE)
with session() as my_session:
_setup_permission_table(my_session)
_setup_user_table(my_session)
_setup_switch_table(my_session)
_setup_os_installers(my_session)
_setup_package_installers(my_session)
_setup_oses(my_session)
_setup_distributed_systems(my_session)
_setup_os_adapters(my_session)
_setup_package_adapters(my_session)
_setup_package_adapter_roles(my_session)
_setup_adapters(my_session)
_setup_os_fields(my_session)
_setup_package_fields(my_session)
_setup_os_metadatas(my_session)
_setup_package_metadatas(my_session)
def drop_db():
"""Drop database."""
models.BASE.metadata.drop_all(bind=ENGINE)
def create_table(table):
"""Create table.
:param table: Class of the Table defined in the model.
"""
table.__table__.create(bind=ENGINE, checkfirst=True)
with session() as my_session:
if table == models.User:
_setup_user_table(my_session)
elif table == models.Permission:
_setup_permission_table(my_session)
elif table == models.Switch:
_setup_switch_table(my_session)
elif table in [
models.OSInstaller,
models.PackageInstaller,
models.OperatingSystem,
models.DistributedSystems,
models.OSAdapter,
models.PackageAdapt |
almarklein/scikit-image | skimage/segmentation/tests/test_slic.py | Python | bsd-3-clause | 4,145 | 0.000241 | import itertools as it
import warnings
import numpy as np
from numpy.testing import assert_equal, assert_raises
from skimage.segmentation import slic
def test_color_2d():
rnd = np.random.RandomState(0)
img = np.zeros((20, 21, 3))
img[:10, :10, 0] = 1
img[10:, :10, 1] = 1
img[10:, 10:, 2] = 1
img += 0.01 * rnd.normal(size=img.shape)
img[img > 1] = 1
img[img < 0] = 0
with warnings.catch_warnings():
warnings.simplefilter("ignore")
seg = slic(img, n_segments=4, sigma=0)
# we expect 4 segments
assert_equal(len(np.unique(seg)), 4)
assert_equal(seg.shape, img.shape[:-1])
assert_equal(seg[:10, :10], 0)
assert_equal(seg[10:, :10], 2)
assert_equal(seg[:10, 10:], 1)
assert_equal(seg[10:, 10:], 3)
def test_gray_2d():
rnd = np.random.RandomState(0)
img = np.zeros((20, 21))
img[:10, :10] = 0.33
img[10:, :10] = 0.67
img[10:, 10:] = 1.00
img += 0.0033 * rnd.normal(size=img.shape)
img[img > 1] = 1
img[img < 0] = 0
seg = slic(img, sigma=0, n_segments=4, compactness=1,
multichannel=False, convert2lab=False)
assert_equal(len(np.unique(seg)), 4)
assert_equal(seg.shape, img.shape)
assert_equal(seg[:10, :10], 0)
assert_equal(seg[10:, :10], 2)
assert_equal(seg[:10, 10:], 1)
assert_equal(seg[10:, 10:], 3)
def test_color_3d():
rnd = np.random.RandomState(0)
img = np.zeros((20, 21, 22, 3))
slices = []
for dim_size in img.shape[:-1]:
midpoint = dim_size // 2
slices.append((slice(None, midpoint), slice(midpoint, None)))
slices = list(it.product(*slices))
colors = list(it.product(*(([0, 1],) * 3)))
for s, c in zip(slices, colors):
img[s] = c
img += 0.01 * rnd.normal(size=img.shape)
img[img > 1] = 1
img[img < 0] = 0
seg = slic(img, sigma=0, n_segments=8)
assert_equal(len(np.unique(seg)), 8)
for s, c in zip(slices, range(8)):
assert_equal(seg[s], c)
def test_gray_3d():
rnd = np.random.RandomState(0)
img = np.zeros((20, 21, 22))
slices = []
for dim_size in img.shape:
midpoint = dim_size // 2
slices.append((slice(None, midpoint), slice(midpoint, None)))
slices = list(it.product(*slices))
shades = np.arange(0, 1.000001, 1.0/7)
for s, sh in zip(slices, shades):
img[s] = sh
img += 0.001 * rnd.normal(size=img.shape)
img[img > 1] = 1
img[img < 0] = 0
seg = slic(img, sigma=0, n_segments=8, compactness=1,
multichannel=False, convert2lab=False)
assert_equal(len(np.unique(seg)), 8)
for s, c in zip(slices, range(8)):
assert_equal(seg[s], c)
def test_list_sigma():
rnd = np.random.RandomState(0)
img = np.array([[1, 1, 1, 0, 0, 0],
[0, 0, 0, 1, 1, 1]], np.float)
img += 0.1 * rnd.normal(size=img.shape)
result_sigma = np.array([[0, 0, 0, 1, 1, 1],
[0, 0, 0, 1, 1, 1]], np.int)
seg_sigma = slic(img, n_segments=2, sigma=[1, 50, 1], multichannel=False)
assert_equal(seg_sigma, result_sigma)
def test_spacing():
rnd = np.random.RandomState(0)
img = np.array([[1, 1, 1, 0, 0],
[1, 1, 0, 0, 0]], np.float)
result_non_spaced = np.array([[0, 0, 0, 1, 1],
| [0, 0, 1, 1, 1]], np.int)
result_spaced = np.array([[0, 0, 0, 0, 0],
[1, 1, 1, 1, 1]], np.int)
img += 0.1 * rnd.normal( | size=img.shape)
seg_non_spaced = slic(img, n_segments=2, sigma=0, multichannel=False,
compactness=1.0)
seg_spaced = slic(img, n_segments=2, sigma=0, spacing=[1, 500, 1],
compactness=1.0, multichannel=False)
assert_equal(seg_non_spaced, result_non_spaced)
assert_equal(seg_spaced, result_spaced)
def test_invalid_lab_conversion():
img = np.array([[1, 1, 1, 0, 0],
[1, 1, 0, 0, 0]], np.float)
assert_raises(ValueError, slic, img, multichannel=True, convert2lab=True)
if __name__ == '__main__':
from numpy import testing
testing.run_module_suite()
|
flavoi/diventi | diventi/ebooks/migrations/0016_auto_20190505_1854.py | Python | apache-2.0 | 830 | 0.00241 | # Generated by Django 2.1.7 on 2019-05-05 16:5 | 4
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('ebooks', '0015_remove_book_category'),
]
operations = [
migrations.AlterField(
model_name='book',
name='book_product',
field=models.OneToOneField(blank=True, null | =True, on_delete=django.db.models.deletion.SET_NULL, related_name='books', to='products.Product', verbose_name='product'),
),
migrations.AlterField(
model_name='chapter',
name='chapter_book',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='chapters', to='ebooks.Book', verbose_name='book'),
),
]
|
UrLab/beta402 | users/urls.py | Python | agpl-3.0 | 278 | 0 | from dja | ngo.urls import path
import users.views
urlpatterns = [
path("settings/", users.views.user_settings, name="settings"),
path("reset_token/", users.views.r | eset_token, name="reset_token"),
path("panel_hide/", users.views.panel_hide, name="hide_new_panel"),
]
|
ETegro/ETConf | giver/urls.py | Python | agpl-3.0 | 1,178 | 0.012733 | # ETConf -- web-based user-friendly computer hardware configurator
# Copyright (C) 2010-2011 ETegro Technologies, | PLC <http://etegro.com/>
# Sergey Matveev <sergey.matveev@etegro.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHAN | TABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.conf.urls.defaults import *
urlpatterns = patterns( "configurator.giver.views",
( r"^perform/(?P<computermodel_alias>.+)/$", "perform" ),
( r"^configurator/(?P<computermodel_alias>.+)/$", "configurator" ),
( r"^computermodel/request/(?P<computermodel_alias>.+)$", "computermodel_request" ),
)
|
sajuptpm/murano | murano/dsl/exceptions.py | Python | apache-2.0 | 3,632 | 0 | # Copyright (c) 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# | http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for | the specific language governing permissions and limitations
# under the License.
class InternalFlowException(Exception):
pass
class ReturnException(InternalFlowException):
def __init__(self, value):
self._value = value
@property
def value(self):
return self._value
class BreakException(InternalFlowException):
pass
class ContinueException(InternalFlowException):
pass
class DslInvalidOperationError(Exception):
pass
class NoMethodFound(Exception):
def __init__(self, name):
super(NoMethodFound, self).__init__('Method "%s" is not found' % name)
class NoClassFound(Exception):
def __init__(self, name):
super(NoClassFound, self).__init__('Class "%s" is not found' % name)
class NoPackageFound(Exception):
def __init__(self, name):
super(NoPackageFound, self).__init__(
'Package "%s" is not found' % name)
class NoPackageForClassFound(Exception):
def __init__(self, name):
super(NoPackageForClassFound, self).__init__('Package for class "%s" '
'is not found' % name)
class NoObjectFoundError(Exception):
def __init__(self, object_id):
super(NoObjectFoundError, self).__init__(
'Object "%s" is not found in object store' % object_id)
class AmbiguousMethodName(Exception):
def __init__(self, name):
super(AmbiguousMethodName, self).__init__(
'Found more that one method "%s"' % name)
class DslContractSyntaxError(Exception):
pass
class ContractViolationException(Exception):
pass
class ValueIsMissingError(Exception):
pass
class DslSyntaxError(Exception):
pass
class PropertyAccessError(Exception):
pass
class AmbiguousPropertyNameError(PropertyAccessError):
def __init__(self, name):
super(AmbiguousPropertyNameError, self).__init__(
'Found more that one property "%s"' % name)
class NoWriteAccess(PropertyAccessError):
def __init__(self, name):
super(NoWriteAccess, self).__init__(
'Property "%s" is immutable to the caller' % name)
class NoWriteAccessError(PropertyAccessError):
def __init__(self, name):
super(NoWriteAccessError, self).__init__(
'Property "%s" is immutable to the caller' % name)
class PropertyReadError(PropertyAccessError):
def __init__(self, name, murano_class):
super(PropertyAccessError, self).__init__(
'Property "%s" in class "%s" cannot be read' %
(name, murano_class.name))
class PropertyWriteError(PropertyAccessError):
def __init__(self, name, murano_class):
super(PropertyAccessError, self).__init__(
'Property "%s" in class "%s" cannot be written' %
(name, murano_class.name))
class UninitializedPropertyAccessError(PropertyAccessError):
def __init__(self, name, murano_class):
super(PropertyAccessError, self).__init__(
'Access to uninitialized property '
'"%s" in class "%s" is forbidden' % (name, murano_class.name))
|
fake-name/ReadableWebProxy | WebMirror/management/rss_parser_funcs/feed_parse_extractMtllightnovelCom.py | Python | bsd-3-clause | 635 | 0.029921 | def extractMtllightnovelCom(item):
'''
Parser for 'mtllightnovel.com'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
t | agmap = [
('devil\'s son-in-law', 'Devil\'s Son-in-Law', 'translated'),
('PRC', 'PRC', 'translated'),
('Loiterous', 'Loiterous', 'oel'),
]
for tagname, name, tl_type in | tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False |
hail-is/hail | datasets/load/old/load.gtex_v7_transcript_tpm.GRCh38.liftover.py | Python | mit | 415 | 0.007229 |
import hail as hl
mt = hl.read_matrix_table('gs://hail-datasets/hail-data/gtex_v7_transcript_tpm.GRCh37.mt')
b37 = hl.get_ref | erence('GRCh37')
b37.add_liftover('gs://hail-common/references/grch37_to_grch38.over.chain.gz', 'GRCh38')
mt = mt.annotate_rows(interval=hl.liftover(mt.interval, 'GRCh38'))
mt.describe()
mt.write('gs://hail-datasets/hail-data/gtex_v7_transcript_tpm.GRCh38.liftover.mt', overwrite | =True)
|
tazmanrising/ppa | genCSV/FileParsers/Cadence/SignOffSum.py | Python | mit | 1,596 | 0.006266 | from FileParsers.Parser import Parser
class CadenceSignOffSum(Parser):
def __init__(self, file):
super(CadenceSignOffSum, self).__init__(file)
@staticmethod
def match_line(regex1, line):
import re
line_variables = '.*(%s)[^\|]*\|[^\|]*\|[\s]*([\d.]*).*' % regex1
result = re.search(line_variables, line, re.I)
return result
def search_file(self):
wns_metric_name = self.replace_space('apr REG2REG WNS')
apr_tns_metric_name = self.replace_space('apr REG2REG TNS')
max_cap_metric_name = self.replace_space('apr max cap viols')
max_trans_metric_name = self.replace_space('apr max trans viols')
for line in self.get_file_lines():
found_wns = self.match_line('WNS', line)
found_tns = self.match_line('TNS', line)
found_max_cap = self.match_line('max_cap', line)
found_max_trans = self.match_line('max_tran', line)
if self.add_to_metrics(found_wns, wns_metric_name):
pass
elif self.add_to_metrics(found_tns, apr_tns_metric_name):
self.metrics.append((apr_tns_metric_name, self.format_metric_values(found_tns.group(2))))
elif self.add_to_metrics(found_max_cap, max_cap_metric_name):
self.metrics.append((max_cap_metric_name, self.format_metric_values(found_max_cap.group(2))))
elif self.add_to_metrics(found_max_trans, max_trans_metric_name):
| self.metrics.append((max_trans_m | etric_name, self.format_metric_values(found_max_trans.group(2)))) |
coin-or/GrUMPy | src/grumpy/examples/Dippy.py | Python | epl-1.0 | 630 | 0.01746 | from __future__ import print_function
DEBUGGING = False
import sys
#sys.path.append("C:\\CO | IN\\GIMPy\\GrUMPy\\trunk")
#sys.path.append("C:\\COIN\\GIMPy\\trunk")
from pulp import *
from coinor.dippy import DipProblem, Solve
prob = DipProb | lem("Dippy Example", display_mode = 'matplotlib',
display_interval = 1)
x1 = LpVariable("x_1", 0, None, LpInteger)
x2 = LpVariable("x_2", 0, cat=LpInteger)
prob += -x1 - x2, "min"
prob += -2 * x1 + 2 * x2 >= 1
prob += -8 * x1 + 10 * x2 <= 13
Solve(prob, {
'CutCGL': 0,
})
for var in prob.variables():
print(var.name, "=", var.value())
prob.Tree.display()
|
MarauderXtreme/sipa | sipa/model/pycroft/__init__.py | Python | mit | 3,323 | 0 | # -*- coding: utf-8 -*-
from ipaddress import IPv4Network
from sipa.backends import DataSource, Dormitory
from sipa.backends.exceptions import InvalidConfiguration
from . import user, api, userdb
def init_pycroft_api(app):
try:
app.extensions['pycroft_api'] = api.PycroftApi(
endpoint=app.config['PYCROFT_ENDPOINT'],
api_key=app.config['PYCROFT_API_KEY'],
)
except KeyError as exception:
raise Inva | lidConfiguration(*exception.args)
def init_userdb(app):
userdb.register_userdb_extension(app)
def init_context(app):
init_pycroft_api(app)
init_userdb(app)
datasource = DataSource(
name='pycroft',
user_class=user.User,
mail_server="agdsn.me",
support_mail="support@agdsn.de",
webmailer_url="https://mail.agdsn.de",
init_context=init_context
)
[ # pylint: disable=expression-not-assigned
Dormitory(n | ame=dorm[0], display_name=dorm[1], datasource=datasource,
subnets=dorm[2])
for dorm in [
('wu', "Wundtstraße", [
IPv4Network('141.30.216.0/24'), # Wu11
IPv4Network('141.30.222.0/24'), # Wu1
IPv4Network('141.30.223.0/24'), # Wu3
IPv4Network('141.30.228.0/24'), # Wu5
IPv4Network('141.30.224.0/24'), # Wu7
IPv4Network('141.30.202.0/24'), # Wu9
]),
('zw', "Zellescher Weg", [
IPv4Network('141.30.226.0/23'), # ZW41*
]),
('borsi', "Borsbergstraße", [
IPv4Network('141.76.121.0/24'), # Borsi34
]),
('zeu', "Zeunerstraße", [
IPv4Network('141.30.234.128/26'), # Zeu1f
IPv4Network('141.30.234.192/27'), # Zeu1f
]),
('buda', "Budapester Straße", [
IPv4Network('141.30.204.0/24'), # Bu22
IPv4Network('141.30.205.0/24'), # Bu24
]),
('fl', "Fritz-Löffler-Straße", [
IPv4Network('141.30.28.0/24'), # FL16
]),
('gps', "Gret-Palucca-Straße", [
IPv4Network('141.30.207.0/24'), # GPS11
]),
('gerok', "Gerokstraße", [
IPv4Network('141.76.124.0/24'), # Ger
]),
('neu', "Neuberinstraße", [
IPv4Network('141.30.203.0/26'), # Neu15
]),
('rei', "Reichenbachstraße", [
IPv4Network('141.30.211.0/24'), # Rei35
]),
('gu', "Gutzkowstraße", [
IPv4Network('141.30.212.0/24'), # Gu29a
IPv4Network('141.30.213.0/24'), # Gu29b
]),
('hoy', "Hoyerswerdaer Straße", [
IPv4Network('141.76.119.0/25'), # Hoy10
]),
('mar', "Marschnerstraße", [
IPv4Network('141.30.221.0/24'), # Mar31
]),
('gue', "Güntzstraße", [
IPv4Network('141.30.225.0/24'), # Gue29
]),
('hss', 'Hochschulstraße', [
IPv4Network('141.30.217.0/24'),
IPv4Network('141.30.234.0/25'),
IPv4Network('141.30.218.0/24'),
IPv4Network('141.30.215.128/25'),
IPv4Network('141.30.219.0/24'),
IPv4Network('141.30.234.224/27'),
]),
('bla', "Blasewitzer Straße", [
IPv4Network('141.30.29.0/24'), # Bla84
]),
]
]
__all__ = ['datasource']
|
lino-framework/book | docs/admin/mypy/prj1/settings.py | Python | bsd-2-clause | 724 | 0.008287 | # -*- coding: UTF-8 -*-
from lino.projects.std.settings import *
import logging
logging.getLogger('weasyprint').setLevel("ERROR") # see #1462
class Site(Site):
title = "Lino@prj1"
# server_url = "https://prj1.mydomain.com"
SITE = Site(globals())
# locally override attributes of | individual plugins
# SITE.plugins.finan.suggest_future_vouchers = True
# MySQL
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'mysite', #database name
'USER': 'django',
'PASSWORD': 'my cool password',
'HOST': 'localhost', |
'PORT': 3306,
'OPTIONS': {
"init_command": "SET storage_engine=MyISAM",
}
}
}
|
EricZaporzan/evention | evention/events/migrations/0010_ignoredevent.py | Python | mit | 1,007 | 0.002979 | # -*- coding: utf-8 -*-
# Generated by Djan | go 1.9.4 on 2016-04-11 11:22
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import | django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('events', '0009_event_title'),
]
operations = [
migrations.CreateModel(
name='IgnoredEvent',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('ignored', models.BooleanField(default=True)),
('since', models.DateTimeField(auto_now=True)),
('event', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='events.Event')),
('owner', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
|
dustlab/noisemapper | scripts/nmcollector.py | Python | mit | 259 | 0.015444 | #!/usr/bin/python
| from noisemapper.mapper import *
#from collectors.lib import utils
### Define the object mapper and start mapping
def main():
# utils.drop_privileges()
mapper = NoiseMapper( | )
mapper.run()
if __name__ == "__main__":
main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.