code stringlengths 2 1.05M | repo_name stringlengths 5 104 | path stringlengths 4 251 | language stringclasses 1 value | license stringclasses 15 values | size int32 2 1.05M |
|---|---|---|---|---|---|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2017-10-27 19:05
from __future__ import unicode_literals
import django.contrib.auth.models
import django.contrib.auth.validators
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0008_alter_user_username_max_length'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('username', models.CharField(error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=150, unique=True, validators=[django.contrib.auth.validators.UnicodeUsernameValidator()], verbose_name='username')),
('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff status')),
('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active')),
('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')),
('email', models.EmailField(max_length=254, verbose_name='Email')),
('first_name', models.CharField(max_length=50, verbose_name='First Name')),
('last_name', models.CharField(max_length=50, verbose_name='Last Name')),
('phone', models.CharField(max_length=20, verbose_name='Phone')),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'verbose_name': 'user',
'verbose_name_plural': 'users',
'abstract': False,
},
managers=[
('objects', django.contrib.auth.models.UserManager()),
],
),
]
| we-inc/mms-snow-white-and-the-seven-pandas | webserver/apps/users/migrations/0001_initial.py | Python | mit | 2,969 |
import requests
from bs4 import BeautifulSoup
import urllib2 # require python 2.0
"""
1. Get all subreddit name from redditlist.com
using urllib and BeautifulSoup library
"""
def get_subreddit_list(max_pages):
"""
Get all of the top ~4000 subreddits
from http://www.redditlist.com
"""
page = 1
subs = []
print("Getting subreddits...")
while page <= max_pages:
print("Crawling Page "+ str(page))
if page == 1 :
url = "http://www.redditlist.com"
else:
url = "http://www.redditlist.com?page="+str(page)
source_code = requests.get(url)
plain_text = source_code.text
soup = BeautifulSoup(plain_text, "html.parser")
for link in soup.findAll("a",{"class":"sfw"}):
href = link.get("href")
subs.append(href)
title = link.string
# comment
page += 1
# comment
result = []
subreddits = list(set(subs))
subreddits_count = 0
for subreddit in subreddits:
subreddit_url = "http://reddit.com/r/"
if subreddit_url in subreddit:
print subreddit[20:]
#subreddit_list.append(subreddit[20:])
with open("./Resources/subreddit_list.txt", "a") as myfile:
# comment (important)
myfile.write("{} \n".format(subreddit[20:]))
subreddits_count += 1
print("Collect "+str(subreddits_count)+" subreddits")
# Query on 33 PAGES of http://www.redditlist.com
get_subreddit_list(33) | parndepu/Recommendit | 01_RedditlistCrawler.py | Python | mit | 1,557 |
#!/usr/bin/env python
"""stack.py: Stack implementation"""
__author__ = 'Rohit Sinha'
class Stack:
def __init__(self):
self.items = []
def isEmpty(self):
return self.items == []
def push(self, item):
self.items.append(item)
def pop(self):
return self.items.pop()
def peek(self):
return self.items[len(self.items) - 1]
def size(self):
return len(self.items)
def __str__(self):
return str(self.items).strip('[]')
if __name__ == '__main__':
s = Stack()
print(s.isEmpty())
s.push(5)
s.push('Hello')
print(s.peek())
s.push(True)
print(s.peek())
print(s.size())
print(s.pop())
print(s)
| rohitsinha54/Learning-Python | algorithms/stack.py | Python | mit | 716 |
# -*- coding: utf-8 -*-
"""
@author: mthh
"""
import matplotlib
import numpy as np
from geopandas import GeoDataFrame, pd
from shapely.geometry import MultiPolygon, Polygon, Point
from . import RequestConfig, Point as _Point
from .core import table
if not matplotlib.get_backend():
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from scipy.interpolate import griddata
def contour_poly(gdf, field_name, n_class):
"""
Interpolate the time values (stored in the column `field_name`)
from the points contained in `gdf` and compute the contour polygons
in `n_class`.
Parameters
----------
gdf : :py:obj:`geopandas.GeoDataFrame`
The GeoDataFrame containing points and associated values.
field_name : str
The name of the column of *gdf* containing the value to use.
n_class : int
The number of class to use for contour polygons if levels is an
integer (exemple: levels=8).
Returns
-------
collection_polygons : :py:obj:matplotlib.contour.QuadContourSet
The shape of the computed polygons.
levels : list of ints/floats
The levels actually used when making the contours, excluding
the minimum (should be a list of `n_class` values).
"""
# Dont take point without value :
gdf = gdf.iloc[gdf[field_name].to_numpy().nonzero()[0]][:]
# Try to avoid unvalid geom :
if len(gdf.geometry.valid()) != len(gdf):
# Invalid geoms have been encountered :
valid_geoms = gdf.geometry.valid()
valid_geoms = valid_geoms.reset_index()
valid_geoms['idx'] = valid_geoms['index']
del valid_geoms['index']
valid_geoms[field_name] = \
valid_geoms.idx.apply(lambda x: gdf[field_name][x])
else:
valid_geoms = gdf[['geometry', field_name]][:]
# Always in order to avoid invalid value which will cause the fail
# of the griddata function :
try: # Normal way (fails if a non valid geom is encountered)
x = np.array([geom.coords.xy[0][0] for geom in valid_geoms.geometry])
y = np.array([geom.coords.xy[1][0] for geom in valid_geoms.geometry])
z = valid_geoms[field_name].values
except: # Taking the long way to load the value... :
x = np.array([])
y = np.array([])
z = np.array([], dtype=float)
for idx, geom, val in gdf[['geometry', field_name]].itertuples():
try:
x = np.append(x, geom.coords.xy[0][0])
y = np.append(y, geom.coords.xy[1][0])
z = np.append(z, val)
except Exception as err:
print(err)
# # compute min and max and values :
minx = np.nanmin(x)
miny = np.nanmin(y)
maxx = np.nanmax(x)
maxy = np.nanmax(y)
# Assuming we want a square grid for the interpolation
xi = np.linspace(minx, maxx, 200)
yi = np.linspace(miny, maxy, 200)
zi = griddata(x, y, z, xi, yi, interp='linear')
interval_time = int(round(np.nanmax(z) / n_class))
nb_inter = n_class + 1
# jmp = int(round((np.nanmax(z) - np.nanmin(z)) / 15))
# levels = [nb for nb in range(0, int(round(np.nanmax(z))+1)+jmp, jmp)]
levels = tuple([nb for nb in range(0, int(
np.nanmax(z) + 1) + interval_time, interval_time)][:nb_inter+1])
collec_poly = plt.contourf(
xi, yi, zi, levels, cmap=plt.cm.rainbow,
vmax=abs(zi).max(), vmin=-abs(zi).max(), alpha=0.35
)
return collec_poly, levels[1:]
def isopoly_to_gdf(collec_poly, field_name, levels):
"""
Transform a collection of matplotlib polygons (:py:obj:`QuadContourSet`)
to a :py:obj:`GeoDataFrame` with a columns (`field_name`) filled by the
values contained in `levels`.
Parameters
----------
collec_poly : :py:obj:matplotlib.contour.QuadContourSet
The previously retrieved collections of contour polygons.
field_name : str
The name of the column to create which will contain values from `levels`.
levels : list of ints/floats
The values to be used when creating the `GeoDataFrame` of polygons,
likely the values corresponding to the bins values
used to create the polygons in the contourf function.
Returns
-------
gdf_polygons : :py:obj:`GeoDataFrame`
The contour polygons as a GeoDataFrame, with a column filled
with the corresponding levels.
"""
polygons, data = [], []
for i, polygon in enumerate(collec_poly.collections):
mpoly = []
for path in polygon.get_paths():
path.should_simplify = False
poly = path.to_polygons()
exterior, holes = [], []
if len(poly) > 0 and len(poly[0]) > 3:
exterior = poly[0]
if len(poly) > 1: # There's some holes
holes = [h for h in poly[1:] if len(h) > 3]
mpoly.append(Polygon(exterior, holes))
if len(mpoly) > 1:
mpoly = MultiPolygon(mpoly)
polygons.append(mpoly)
if levels:
data.append(levels[i])
elif len(mpoly) == 1:
polygons.append(mpoly[0])
if levels:
data.append(levels[i])
if len(data) == len(polygons):
return GeoDataFrame(geometry=polygons,
data=data,
columns=[field_name])
else:
return GeoDataFrame(geometry=polygons)
def make_grid(gdf, nb_points):
"""
Return a grid, based on the shape of *gdf* and on a *height* value (in
units of *gdf*).
Parameters
----------
gdf : GeoDataFrame
The collection of polygons to be covered by the grid.
nb_points : int
The number of expected points of the grid.
Returns
-------
grid : GeoDataFrame
A collection of polygons.
"""
xmin, ymin, xmax, ymax = gdf.total_bounds
rows = int(nb_points**0.5)
cols = int(nb_points**0.5)
height = (ymax-ymin) / rows
width = (xmax-xmin) / cols
x_left_origin = xmin
x_right_origin = xmin + width
y_top_origin = ymax
y_bottom_origin = ymax - height
res_geoms = []
for countcols in range(cols):
y_top = y_top_origin
y_bottom = y_bottom_origin
for countrows in range(rows):
res_geoms.append((
(x_left_origin + x_right_origin) / 2, (y_top + y_bottom) / 2
))
y_top = y_top - height
y_bottom = y_bottom - height
x_left_origin = x_left_origin + width
x_right_origin = x_right_origin + width
return GeoDataFrame(
geometry=pd.Series(res_geoms).apply(lambda x: Point(x)),
crs=gdf.crs
)
class AccessIsochrone:
"""
Object allowing to query an OSRM instance for a matrix of distance within
a defined radius, store the distance (to avoid making the same query again
when not needed), interpolate time values on a grid and render the contour
polygons.
Parameters
----------
point_origin : 2-floats tuple
The coordinates of the center point to use as (x, y).
points_grid : int
The number of points of the underlying grid to use.
size : float
Search radius (in degree).
url_config : osrm.RequestConfig
The OSRM url to be requested.
Attributes
----------
center_point : collections.namedtuple
The coordinates of the point used a center (potentially moved from the
original point in order to be on the network).
grid : geopandas.GeoDataFrame
The point locations retrieved from OSRM (ie. potentially moved
to be on the routable network).
times : numpy.ndarray
The time-distance table retrieved from OSRM.
Methods
-------
render_contour(nb_class)
Render the contour polygon according to the choosen number of class.
"""
def __init__(self, point_origin, points_grid=250,
size=0.4, url_config=RequestConfig):
gdf = GeoDataFrame(geometry=[Point(point_origin).buffer(size)])
grid = make_grid(gdf, points_grid)
coords_grid = \
[(i.coords.xy[0][0], i.coords.xy[1][0]) for i in grid.geometry]
self.times, new_pt_origin, pts_dest = \
table([point_origin], coords_grid, url_config=url_config)
self.times = (self.times[0] / 60.0).round(2) # Round values in minutes
geoms, values = [], []
for time, coord in zip(self.times, pts_dest):
if time:
geoms.append(Point(coord))
values.append(time)
self.grid = GeoDataFrame(geometry=geoms, data=values, columns=['time'])
self.center_point = _Point(
latitude=new_pt_origin[0][0], longitude=new_pt_origin[0][1])
def render_contour(self, n_class):
"""
Parameters
----------
n_class : int
The desired number of class.
Returns
-------
gdf_poly : GeoDataFrame
The shape of the computed accessibility polygons.
"""
collec_poly, levels = contour_poly(self.grid, 'time', n_class=n_class)
gdf_poly = isopoly_to_gdf(collec_poly, 'time', levels)
return gdf_poly
| ustroetz/python-osrm | osrm/extra.py | Python | mit | 9,281 |
# The slug of the panel to be added to HORIZON_CONFIG. Required.
PANEL = 'visualization'
# The slug of the dashboard the PANEL associated with. Required.
PANEL_DASHBOARD = 'project'
# The slug of the panel group the PANEL is associated with.
PANEL_GROUP = 'network'
# Python panel class of the PANEL to be added.
ADD_PANEL = 'openstack_dashboard.dashboards.project.visualization.panel.Visualization'
| garyphone/com_nets | visualization/enabled/_10010_project_visualization_panel.py | Python | mit | 401 |
import sys
import math
import CSVReader
import DecisionTree
# GLOBALS
attributes = list()
data = list(list())
pre_prune_tree = True
# MATH FUNCTIONS
def Entropy( yesNo ):
yes = yesNo[0]; no = yesNo[1]
if no == 0 or yes == 0: return 0
total = no + yes
return ( -( yes / total ) * math.log( yes / total, 2 )
- ( no / total ) * math.log( no / total, 2 ) )
def Gain( S, Attr ):
entropy_S = Entropy( resultsOfSet( S ) )
entropy_sum = 0
for label in AttributeLabels( S, Attr ):
subset_S = setWithLabel( S, Attr, label )
entropy_sum += ( ( len( subset_S ) / len( S ) ) * Entropy( resultsOfSet( subset_S ) ) )
return entropy_S - entropy_sum
# HELPER
def indexOfAttribute( Attr ):
return attributes.index( Attr )
def AttributeLabels( S, Attr ):
index = indexOfAttribute( Attr )
return list( set( [ row[ index ] for row in S ] ) )
def setWithLabel( S, Attr, Label ):
return list( filter( lambda row: row[ indexOfAttribute( Attr ) ] == Label, S ) )
def resultsOfSet( S ):
no = len( list( filter( lambda row: row[-1] is False, S ) ) )
return ( len( S ) - no, no )
def convertRowToDict( row ):
return { attributes[ i ] : row[ i ] for i in range( len( row ) ) }
def extractDecisions( S ):
return [ row[-1] for row in S ]
def compareDecisions( D1, D2 ):
return sum( [ 1 if D1[i] is D2[i] else 0 for i in range( min( len( D1 ), len( D2 ) ) ) ] ) / min( len( D1 ), len( D2 ) )
def findBestAttribute( S, attrs ):
bestAttributeAndGain = ( None, -1 ) if not pre_prune_tree else ( None, 0 )
#print( "+-- Gain ---" )
for attr in attrs:
attrGain = Gain( S, attr )
#print( "|", attr, "%0.7f" % ( attrGain ) )
if attrGain > bestAttributeAndGain[ 1 ]:
bestAttributeAndGain = ( attr, attrGain )
#print( "+-------------" )
#print( " > Best attribute:", bestAttributeAndGain[0], "\n" )
return bestAttributeAndGain[ 0 ]
# Prediction is by higher percentage
def getPrediction( S ):
res = resultsOfSet( S )
return True if res[ 0 ] > res[ 1 ] else False
def createNextNodes( parent ):
if len( parent.attributes ) == 0: # No remaining attributes
return
trueParentDataSubset = setWithLabel( parent.dataSet, parent.attribute, True )
trueBestAttribute = findBestAttribute( trueParentDataSubset, parent.attributes )
if trueBestAttribute is not None:
parent.newTruePath( trueBestAttribute, trueParentDataSubset )
createNextNodes( parent.truePath )
falseParentDataSubset = setWithLabel( parent.dataSet, parent.attribute, False )
falseBestAttribute = findBestAttribute( falseParentDataSubset, parent.attributes )
if falseBestAttribute is not None:
parent.newFalsePath( falseBestAttribute, falseParentDataSubset )
createNextNodes( parent.falsePath )
# ID3
def createDecisionTree( attrs, rows ):
tree = DecisionTree.DecisionTree( attrs )
rootAttributes = attrs[:-1]
bestAttribute = findBestAttribute( rows, rootAttributes )
outcomes = [ row[-1] for row in rows ]
allSame = True
for outcome in outcomes:
if outcome != outcomes[0]: allSame = False; continue
if allSame:
tree.newRoot( None, rootAttributes, rows )
return tree
tree.newRoot( bestAttribute, rootAttributes, rows )
createNextNodes( tree.root ) # Recursively builds tree
return tree
# MAIN
def main( argv ):
if len(argv) != 3:
return print( "ERROR: Usage \"python3 id3.py <training-set> <test-set> <model-file>\"" )
training_tup = CSVReader.readBooleanCSV( argv[ 0 ] )
global attributes; attributes = training_tup[ 0 ]
global data ; data = training_tup[ 1 ]
testing_tup = CSVReader.readBooleanCSV( argv[ 1 ] )
test_attributes = testing_tup[ 0 ]
test_data = testing_tup[ 1 ]
test_decisions = extractDecisions( test_data )
print( "Attributes" )
print( ', '.join( attributes ), "\n" )
tree = createDecisionTree( attributes, data )
predictions = [ getPrediction( tree.dataSetFromDecisions( convertRowToDict( row ) ) ) for row in test_data ]
print( "\nPrediction accuracy vs. testing data:", "{}%\n\n".format( 100 * compareDecisions( predictions, test_decisions ) ) )
tree.printTree( argv[2] )
if __name__=='__main__':
main( sys.argv[1:] )
| CKPalk/MachineLearning | Assignment1/id3.py | Python | mit | 4,116 |
#!/usr/bin/python
import sys
sys.path.append("../../")
#import pyRay as ra
import pyRay.scene as scn
# TODO : how to pass arguments from function header?
object1 = ("obj1",(), [( "U","sdBox" ,"%s",((1.0,1.0,1.0),) ),( "S","sdSphere","%s",(1.2,) )])
object2 = ("obj1",("f","f3"),[( "U","sdBox" ,"%s",("2",) ),( "S","sdSphere","%s",("1",) )])
object3 = ("obj2",("f","f2"),[( "U","sdBox" ,"%s",(("2",1.0),) ),( "S","sdSphere","%s",("1",) )])
scene = [
( "U","sdBox" ,"%s",((1.0,1.0,1.0),) ),
( "S","sdSphere","%s",(1.2,) ),
]
scene_src = scn.parseSceneList(scene)
print scene_src
| ProkopHapala/SimpleSimulationEngine | python/pyRay/tests/testSceneList.py | Python | mit | 624 |
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
import os
import sys
import json
import re
import uuid
import base64
import models
from models.movie_trailer_model import Base, MovieTrailerModel
rows = models.delete_movie_trailers()
print("%d rows were deleted from table", rows)
| DuCalixte/last_week_rotten_trailer | app/database_cleanup.py | Python | mit | 311 |
"""962. Maximum Width Ramp
https://leetcode.com/problems/maximum-width-ramp/
Given an array A of integers, a ramp is a tuple (i, j) for which
i < j and A[i] <= A[j]. The width of such a ramp is j - i.
Find the maximum width of a ramp in A. If one doesn't exist, return 0.
Example 1:
Input: [6,0,8,2,1,5]
Output: 4
Explanation:
The maximum width ramp is achieved at (i, j) = (1, 5): A[1] = 0 and A[5] = 5.
Example 2:
Input: [9,8,1,0,1,9,4,0,4,1]
Output: 7
Explanation:
The maximum width ramp is achieved at (i, j) = (2, 9): A[2] = 1 and A[9] = 1.
Note:
2 <= A.length <= 50000
0 <= A[i] <= 50000
"""
import bisect
from typing import List
class Solution:
def max_width_ramp(self, a: List[int]) -> int:
ramp, size = 0, len(a)
candidates = [(a[size - 1], size - 1)]
# candidates: i's decreasing, by increasing value of a[i]
for i in range(size - 2, -1, -1):
max_j = bisect.bisect(candidates, (a[i],))
if max_j < len(candidates):
ramp = max(ramp, candidates[max_j][1] - i)
else:
candidates.append((a[i], i))
return ramp
| isudox/leetcode-solution | python-algorithm/leetcode/problem_962.py | Python | mit | 1,140 |
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend', # default
'guardian.backends.ObjectPermissionBackend',
)
# DJANGO GUARDIAN
ANONYMOUS_USER_ID = -1 | michailbrynard/django-skeleton | src/config/settings/extensions/guardian.py | Python | mit | 180 |
# must use python 2
from classy import Class
import matplotlib.pyplot as plt
import numpy as np
import math
max_l = 5000
max_scalars = '5000'
ell = np.array( range(1, max_l+1) )
def getDl( pii1=0.5e-10, pii2=1e-9, pri1=1e-13 ):
# Define your cosmology (what is not specified will be set to CLASS default parameters)
params = {
'output': 'tCl lCl pCl',
'modes': 's', # scalar perturbations
'lensing': 'yes',
'ic': 'ad&cdi',
'l_max_scalars':max_scalars,
'P_k_ini type': 'two_scales',
'k1': 0.002,
'k2': 0.1,
'P_{RR}^1': 2.34e-9,
'P_{RR}^2': 2.115e-9,
'P_{II}^1' : pii1,
'P_{II}^2' : pii2,
'P_{RI}^1' : pri1}
cosmo = Class()
cosmo.set(params)
cosmo.compute()
# print(dir(cosmo)) # use this command to see what is in the cosmo
# It is a dictionary that contains the fields: tt, te, ee, bb, pp, tp
cls = cosmo.raw_cl(max_l) # Access the cl until l=1000
yy = np.array( cls['ee'][1:] )
zz = np.array( cls['tt'][1:] )
yz = np.array( cls['te'][1:] )
ee = ((ell)*(ell+1) * yy / (2 * math.pi))
tt = ((ell)*(ell+1) * zz / (2 * math.pi))
te = ((ell)*(ell+1) * yz / (2 * math.pi))
cosmo.struct_cleanup()
return tt, te, ee
# Print on screen to see the output
# print len(cls['tt'])
pii1 = 0.5e-10
pii2 = 1e-9
pri1 = 1e-13
dpii1 = pii1 / 10000.0
dpii2 = pii2 / 10000.0
dpri1 = pri1 / 10000.0
pii1_tt1, pii1_te1, pii1_ee1 = getDl( pii1 = pii1 - dpii1 )
pii1_tt2, pii1_te2, pii1_ee2 = getDl( pii1 = pii1 + dpii1 )
pii2_tt1, pii2_te1, pii2_ee1 = getDl( pii2 = pii2 - dpii2 )
pii2_tt2, pii2_te2, pii2_ee2 = getDl( pii2 = pii2 + dpii2 )
pri1_tt1, pri1_te1, pri1_ee1 = getDl( pri1 = pri1 - dpri1 )
pri1_tt2, pri1_te2, pri1_ee2 = getDl( pri1 = pri1 + dpri1 )
# plot something with matplotlib...
plt.plot( (pii1_tt2 - pii1_tt1)/(2 * dpii1), label='$P_{II}^1$', markersize=0 )
plt.plot( (pii2_tt2 - pii2_tt1)/(2 * dpii2), label='$P_{II}^2$', markersize=0 )
# plt.plot( (pri1_tt2 - pri1_tt1)/(2 * dpri1), label='$P_{RI}^1$', markersize=0 )
plt.title('TT Derivatives')
plt.ylabel(r'$d \mathcal{D}_l / d P_{II}^j$')
plt.xlabel(r'$l$')
plt.legend()
plt.savefig('tt.pdf')
plt.clf()
plt.plot( (pii1_te2 - pii1_te1)/(2 * dpii1), label='$P_{II}^1$', markersize=0 )
plt.plot( (pii2_te2 - pii2_te1)/(2 * dpii2), label='$P_{II}^2$', markersize=0 )
# plt.plot( (pri1_te2 - pri1_te1)/(2 * dpri1), label='$P_{RI}^1$', markersize=0 )
plt.title('TE Derivatives')
plt.ylabel(r'$d \mathcal{D}_l / d P_{II}^j$')
plt.xlabel(r'$l$')
plt.legend()
plt.savefig('te.pdf')
plt.clf()
plt.plot( (pii1_ee2 - pii1_ee1)/(2 * dpii1), label='$P_{II}^1$', markersize=0 )
plt.plot( (pii2_ee2 - pii2_ee1)/(2 * dpii2), label='$P_{II}^2$', markersize=0 )
# plt.plot( (pri1_ee2 - pri1_ee1)/(2 * dpri1), label='$P_{RI}^1$', markersize=0 )
plt.title('EE Derivatives')
plt.ylabel(r'$d \mathcal{D}_l / d P_{II}^j$')
plt.xlabel(r'$l$')
plt.legend()
plt.savefig('ee.pdf')
plt.clf()
plt.plot( (pii1_ee2 - pii1_ee1)/(2 * dpii1), label='$P_{II}^1$', markersize=0 )
plt.plot( (pii2_ee2 - pii2_ee1)/(2 * dpii2), label='$P_{II}^2$', markersize=0 )
# plt.plot( (pri1_ee2 - pri1_ee1)/(2 * dpri1), label='$P_{RI}^1$', markersize=0 )
plt.title('EE Derivatives')
plt.ylabel(r'$d \mathcal{D}_l / d P_{II}^j$')
plt.xlabel(r'$l$')
plt.yscale('log')
plt.legend()
plt.savefig('logee.pdf')
plt.clf()
plt.plot( (pii1_tt2 - pii1_tt1)/(2 * dpii1), label='$P_{II}^1$', markersize=0 )
plt.plot( (pii2_tt2 - pii2_tt1)/(2 * dpii2), label='$P_{II}^2$', markersize=0 )
# plt.plot( (pri1_tt2 - pri1_tt1)/(2 * dpri1), label='$P_{RI}^1$', markersize=0 )
plt.title('TT Derivatives')
plt.ylabel(r'$d \mathcal{D}_l / d P_{II}^j$')
plt.xlabel(r'$l$')
plt.yscale('log')
plt.legend()
plt.savefig('logtt.pdf')
plt.clf()
| xzackli/isocurvature_2017 | analysis/plot_isocurvature_spectra_effects/deriv_iso.py | Python | mit | 3,825 |
#!/usr/bin/env python
# coding=utf-8
import pytest
import sacred.optional as opt
from sacred.config.config_scope import (
ConfigScope,
dedent_function_body,
dedent_line,
get_function_body,
is_empty_or_comment,
)
from sacred.config.custom_containers import DogmaticDict, DogmaticList
@pytest.fixture
def conf_scope():
@ConfigScope
def cfg():
# description for a
a = 1
# description for b and c
b, c = 2.0, True
# d and dd are both strings
d = dd = "string"
e = [1, 2, 3] # inline description for e
f = {"a": "b", "c": "d"}
composit1 = a + b
# pylint: this comment is filtered out
composit2 = f["c"] + "ada"
func1 = lambda: 23
deriv = func1()
def func2(a):
return "Nothing to report" + a
some_type = int
cfg()
return cfg
def test_result_of_config_scope_is_dict(conf_scope):
cfg = conf_scope()
assert isinstance(cfg, dict)
def test_result_of_config_scope_contains_keys(conf_scope):
cfg = conf_scope()
assert set(cfg.keys()) == {
"a",
"b",
"c",
"d",
"dd",
"e",
"f",
"composit1",
"composit2",
"deriv",
"func1",
"func2",
"some_type",
}
assert cfg["a"] == 1
assert cfg["b"] == 2.0
assert cfg["c"]
assert cfg["d"] == "string"
assert cfg["dd"] == "string"
assert cfg["e"] == [1, 2, 3]
assert cfg["f"] == {"a": "b", "c": "d"}
assert cfg["composit1"] == 3.0
assert cfg["composit2"] == "dada"
assert cfg["func1"]() == 23
assert cfg["func2"](", sir!") == "Nothing to report, sir!"
assert cfg["some_type"] == int
assert cfg["deriv"] == 23
def test_fixing_values(conf_scope):
cfg = conf_scope({"a": 100})
assert cfg["a"] == 100
assert cfg["composit1"] == 102.0
def test_fixing_nested_dicts(conf_scope):
cfg = conf_scope({"f": {"c": "t"}})
assert cfg["f"]["a"] == "b"
assert cfg["f"]["c"] == "t"
assert cfg["composit2"] == "tada"
def test_adding_values(conf_scope):
cfg = conf_scope({"g": 23, "h": {"i": 10}})
assert cfg["g"] == 23
assert cfg["h"] == {"i": 10}
assert cfg.added == {"g", "h", "h.i"}
def test_typechange(conf_scope):
cfg = conf_scope({"a": "bar", "b": "foo", "c": 1})
assert cfg.typechanged == {
"a": (int, type("bar")),
"b": (float, type("foo")),
"c": (bool, int),
}
def test_nested_typechange(conf_scope):
cfg = conf_scope({"f": {"a": 10}})
assert cfg.typechanged == {"f.a": (type("a"), int)}
def test_config_docs(conf_scope):
cfg = conf_scope()
assert cfg.docs == {
"a": "description for a",
"b": "description for b and c",
"c": "description for b and c",
"d": "d and dd are both strings",
"dd": "d and dd are both strings",
"e": "inline description for e",
"seed": "the random seed for this experiment",
}
def is_dogmatic(a):
if isinstance(a, (DogmaticDict, DogmaticList)):
return True
elif isinstance(a, dict):
return any(is_dogmatic(v) for v in a.values())
elif isinstance(a, (list, tuple)):
return any(is_dogmatic(v) for v in a)
def test_conf_scope_is_not_dogmatic(conf_scope):
assert not is_dogmatic(conf_scope({"e": [1, 1, 1]}))
@pytest.mark.skipif(not opt.has_numpy, reason="requires numpy")
def test_conf_scope_handles_numpy_bools():
@ConfigScope
def conf_scope():
a = opt.np.bool_(1)
cfg = conf_scope()
assert "a" in cfg
assert cfg["a"]
def test_conf_scope_can_access_preset():
@ConfigScope
def conf_scope(a):
answer = 2 * a
cfg = conf_scope(preset={"a": 21})
assert cfg["answer"] == 42
def test_conf_scope_contains_presets():
@ConfigScope
def conf_scope(a):
answer = 2 * a
cfg = conf_scope(preset={"a": 21, "unrelated": True})
assert set(cfg.keys()) == {"a", "answer", "unrelated"}
assert cfg["a"] == 21
assert cfg["answer"] == 42
assert cfg["unrelated"] is True
def test_conf_scope_cannot_access_undeclared_presets():
@ConfigScope
def conf_scope():
answer = 2 * a
with pytest.raises(NameError):
conf_scope(preset={"a": 21})
def test_conf_scope_can_access_fallback():
@ConfigScope
def conf_scope(a):
answer = 2 * a
cfg = conf_scope(fallback={"a": 21})
assert cfg["answer"] == 42
def test_conf_scope_does_not_contain_fallback():
@ConfigScope
def conf_scope(a):
answer = 2 * a
cfg = conf_scope(fallback={"a": 21, "b": 10})
assert set(cfg.keys()) == {"answer"}
def test_conf_scope_cannot_access_undeclared_fallback():
@ConfigScope
def conf_scope():
answer = 2 * a
with pytest.raises(NameError):
conf_scope(fallback={"a": 21})
def test_conf_scope_can_access_fallback_and_preset():
@ConfigScope
def conf_scope(a, b):
answer = a + b
cfg = conf_scope(preset={"b": 40}, fallback={"a": 2})
assert cfg["answer"] == 42
def test_conf_raises_for_unaccessible_arguments():
@ConfigScope
def conf_scope(a, b, c):
answer = 42
with pytest.raises(KeyError):
conf_scope(preset={"a": 1}, fallback={"b": 2})
def test_can_access_globals_from_original_scope():
from .enclosed_config_scope import cfg as conf_scope
cfg = conf_scope()
assert set(cfg.keys()) == {"answer"}
assert cfg["answer"] == 42
SEVEN = 7
def test_cannot_access_globals_from_calling_scope():
from .enclosed_config_scope import cfg2 as conf_scope
with pytest.raises(NameError):
conf_scope() # would require SEVEN
def test_fixed_subentry_of_preset():
@ConfigScope
def conf_scope():
pass
cfg = conf_scope(preset={"d": {"a": 1, "b": 2}}, fixed={"d": {"a": 10}})
assert set(cfg.keys()) == {"d"}
assert set(cfg["d"].keys()) == {"a", "b"}
assert cfg["d"]["a"] == 10
assert cfg["d"]["b"] == 2
# fmt: off
@pytest.mark.parametrize("line,indent,expected", [
(' a=5', ' ', 'a=5'),
(' a=5', ' ', 'a=5'),
('a=5', ' ', 'a=5'),
(' a=5', ' ', ' a=5'),
(' a=5', '', ' a=5'),
(' a=5', '\t', ' a=5'),
(' a=5', ' ', 'a=5'),
(' a=5', ' \t', ' a=5')
])
def test_dedent_line(line, indent, expected):
assert dedent_line(line, indent) == expected
@pytest.mark.parametrize("line,expected", [
('', True),
(' ', True),
('\n', True),
(' \n', True),
(' \t \n', True),
('#comment', True),
(' #comment', True),
(' a=5 # not comment', False),
('a=5', False),
('"""', False),
("'''", False)
])
def test_is_empty_or_comment(line, expected):
assert is_empty_or_comment(line) == expected
def evil_indentation_func(a,
b,
c, d): # test comment
# Lets do the most evil things with indentation
# 1
# 2
# ran
""" and also in the docstring
atrne
uiaeue
utdr
"""
alpha = 0.1
d = ('even', 'more',
'evilness')
wat = """ multi
line
strings
"""
# another comment
# this one is ok
# madness
foo=12
def subfunc():
return 23
body = '''# Lets do the most evil things with indentation
# 1
# 2
# ran
""" and also in the docstring
atrne
uiaeue
utdr
"""
alpha = 0.1
d = ('even', 'more',
'evilness')
wat = """ multi
line
strings
"""
# another comment
# this one is ok
# madness
foo=12
def subfunc():
return 23
'''
dedented_body = '''# Lets do the most evil things with indentation
# 1
# 2
# ran
""" and also in the docstring
atrne
uiaeue
utdr
"""
alpha = 0.1
d = ('even', 'more',
'evilness')
wat = """ multi
line
strings
"""
# another comment
# this one is ok
# madness
foo=12
def subfunc():
return 23
'''
# fmt: on
def test_dedent_body():
assert dedent_function_body(body) == dedented_body
def test_get_function_body():
func_body, line_offset = get_function_body(evil_indentation_func)
assert func_body == body
def test_config_scope_can_deal_with_indentation_madness():
# assert_no_raise:
ConfigScope(evil_indentation_func)
| IDSIA/sacred | tests/test_config/test_config_scope.py | Python | mit | 8,409 |
# -*- coding: utf-8 -*-
from django.contrib import admin
from .models import User
admin.site.register(User)
| ba1dr/tplgenerator | templates/django/__APPNAME__/apps/user_auth/admin.py | Python | mit | 110 |
# --------------------------------------------------------
# Tensorflow Faster R-CNN
# Licensed under The MIT License [see LICENSE for details]
# Written by Xinlei Chen
# --------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
#import cv2
from scipy.misc import imresize
from scipy.misc import imread
import numpy as np
try:
import cPickle as pickle
except ImportError:
import pickle
import os
import math
import tensorflow as tf
from utils.timer import Timer
from utils.cython_nms import nms, nms_new
from utils.boxes_grid import get_boxes_grid
from utils.blob import im_list_to_blob
from model.config import cfg, get_output_dir
from model.bbox_transform import clip_boxes, bbox_transform_inv
def _get_image_blob(im):
"""Converts an image into a network input.
Arguments:
im (ndarray): a color image in BGR order
Returns:
blob (ndarray): a data blob holding an image pyramid
im_scale_factors (list): list of image scales (relative to im) used
in the image pyramid
"""
im_orig = im.astype(np.float32, copy=True)
im_orig -= cfg.PIXEL_MEANS
im_shape = im_orig.shape
im_size_min = np.min(im_shape[0:2])
im_size_max = np.max(im_shape[0:2])
processed_ims = []
im_scale_factors = []
for target_size in cfg.TEST.SCALES:
im_scale = float(target_size) / float(im_size_min)
# Prevent the biggest axis from being more than MAX_SIZE
if np.round(im_scale * im_size_max) > cfg.TEST.MAX_SIZE:
im_scale = float(cfg.TEST.MAX_SIZE) / float(im_size_max)
im_row,im_col,_ = im.shape
im = imresize(im_orig, (int(im_row*im_scale), int(im_col*im_scale)))
im_scale_factors.append(im_scale)
processed_ims.append(im)
# Create a blob to hold the input images
blob = im_list_to_blob(processed_ims)
return blob, np.array(im_scale_factors)
def _get_blobs(im):
"""Convert an image and RoIs within that image into network inputs."""
blobs = {}
blobs['data'], im_scale_factors = _get_image_blob(im)
return blobs, im_scale_factors
def _clip_boxes(boxes, im_shape):
"""Clip boxes to image boundaries."""
# x1 >= 0
boxes[:, 0::4] = np.maximum(boxes[:, 0::4], 0)
# y1 >= 0
boxes[:, 1::4] = np.maximum(boxes[:, 1::4], 0)
# x2 < im_shape[1]
boxes[:, 2::4] = np.minimum(boxes[:, 2::4], im_shape[1] - 1)
# y2 < im_shape[0]
boxes[:, 3::4] = np.minimum(boxes[:, 3::4], im_shape[0] - 1)
return boxes
def _rescale_boxes(boxes, inds, scales):
"""Rescale boxes according to image rescaling."""
for i in range(boxes.shape[0]):
boxes[i,:] = boxes[i,:] / scales[int(inds[i])]
return boxes
def im_detect(sess, net, im):
blobs, im_scales = _get_blobs(im)
assert len(im_scales) == 1, "Only single-image batch implemented"
im_blob = blobs['data']
# seems to have height, width, and image scales
# still not sure about the scale, maybe full image it is 1.
blobs['im_info'] = \
np.array([[im_blob.shape[1], im_blob.shape[2], im_scales[0]]], dtype=np.float32)
_, scores, bbox_pred, rois = \
net.test_image(sess, blobs['data'], blobs['im_info'])
boxes = rois[:, 1:5] / im_scales[0]
# print(scores.shape, bbox_pred.shape, rois.shape, boxes.shape)
scores = np.reshape(scores, [scores.shape[0], -1])
bbox_pred = np.reshape(bbox_pred, [bbox_pred.shape[0], -1])
if cfg.TEST.BBOX_REG:
# Apply bounding-box regression deltas
box_deltas = bbox_pred
pred_boxes = bbox_transform_inv(boxes, box_deltas)
pred_boxes = _clip_boxes(pred_boxes, im.shape)
else:
# Simply repeat the boxes, once for each class
pred_boxes = np.tile(boxes, (1, scores.shape[1]))
return scores, pred_boxes
def apply_nms(all_boxes, thresh):
"""Apply non-maximum suppression to all predicted boxes output by the
test_net method.
"""
num_classes = len(all_boxes)
num_images = len(all_boxes[0])
nms_boxes = [[[] for _ in range(num_images)] for _ in range(num_classes)]
for cls_ind in range(num_classes):
for im_ind in range(num_images):
dets = all_boxes[cls_ind][im_ind]
if dets == []:
continue
x1 = dets[:, 0]
y1 = dets[:, 1]
x2 = dets[:, 2]
y2 = dets[:, 3]
scores = dets[:, 4]
inds = np.where((x2 > x1) & (y2 > y1) & (scores > cfg.TEST.DET_THRESHOLD))[0]
dets = dets[inds,:]
if dets == []:
continue
keep = nms(dets, thresh)
if len(keep) == 0:
continue
nms_boxes[cls_ind][im_ind] = dets[keep, :].copy()
return nms_boxes
def test_net(sess, net, imdb, weights_filename, experiment_setup=None,
max_per_image=100, thresh=0.05):
np.random.seed(cfg.RNG_SEED)
"""Test a Fast R-CNN network on an image database."""
num_images = len(imdb.image_index)
# num_images = 2
# all detections are collected into:
# all_boxes[cls][image] = N x 5 array of detections in
# (x1, y1, x2, y2, score)
all_boxes = [[[] for _ in range(num_images)]
for _ in range(imdb.num_classes)]
output_dir = get_output_dir(imdb, weights_filename)
print('using output_dir: ', output_dir)
# timers
_t = {'im_detect' : Timer(), 'misc' : Timer()}
# define a writer to write the histogram of summaries
# test_tbdir = '/home/shuang/projects/tf-faster-rcnn/tensorboard/'
# if not os.path.exists(test_tbdir):
# print('making directory for test tensorboard result')
# os.mkdir(test_tbdir)
# writer = tf.summary.FileWriter(test_tbdir,sess.graph)
# define a folder for activation results
test_actdir = '../activations_retrained'
if not os.path.exists(test_actdir):
os.mkdir(test_actdir)
# define a folder for zero fractions
test_zerodir = './zero_fractions'
if not os.path.exists(test_zerodir):
os.mkdir(test_zerodir)
for i in range(num_images):
im = imread(imdb.image_path_at(i))
_t['im_detect'].tic()
scores, boxes = im_detect(sess, net, im)
_t['im_detect'].toc()
# write act summaries to tensorboard
# writer.add_summary(act_summaries)
# record the zero fraction -> only for vgg16
# zero_frac = []
# for layer_ind in range(13):
# batch_num,row,col,filter_num = acts[layer_ind].shape
# zero_frac.append([])
# for j in range(filter_num):
# # print(acts[0][:,:,:,i].shape)
# fraction = 1-np.count_nonzero(acts[layer_ind][:,:,:,j])/(batch_num*row*col)
# zero_frac[layer_ind].append(fraction)
_t['misc'].tic()
# skip j = 0, because it's the background class
chosen_classes = []
for j in range(1, imdb.num_classes):
# for j, clas in enumerate(imdb._classes[1:]):
inds = np.where(scores[:, j] > thresh)[0]
cls_scores = scores[inds, j]
cls_boxes = boxes[inds, j*4:(j+1)*4]
cls_dets = np.hstack((cls_boxes, cls_scores[:, np.newaxis])) \
.astype(np.float32, copy=False)
keep = nms(cls_dets, cfg.TEST.NMS)
cls_dets = cls_dets[keep, :]
all_boxes[j][i] = cls_dets
# if len(cls_dets)!=0: # only for recording activations_res
# chosen_classes.append(imdb._classes[j])
# Limit to max_per_image detections *over all classes*
if max_per_image > 0:
image_scores = np.hstack([all_boxes[j][i][:, -1]
for j in range(1, imdb.num_classes)])
if len(image_scores) > max_per_image:
image_thresh = np.sort(image_scores)[-max_per_image]
for j in range(1, imdb.num_classes):
keep = np.where(all_boxes[j][i][:, -1] >= image_thresh)[0]
all_boxes[j][i] = all_boxes[j][i][keep, :]
_t['misc'].toc()
# write acts to a seperate text file for each seprate image file -> only vgg
# f_name = '{}/{}.txt'.format(test_actdir,i)
# act_file = open(f_name,'w')
# act_file.write('\n'.join(chosen_classes))
# act_file.write('\n')
# sum_act = []
# for arr in acts:
# temp = np.sum(arr,axis = (0,1,2))
# sum_act.append(temp)
# for item in sum_act:
# act_file.write('{}\n'.format(str(item)))
# act_file.close()
# chosen_classes = []
# write zero fractions to text files -> only vgg
# file_name = '{}/{}.txt'.format(test_zerodir,i)
# zero_file = open(file_name,'w')
# zero_file.write('\n'.join(chosen_classes))
# zero_file.write('\n')
# for arr in zero_frac:
# zero_file.write('{}\n'.format(str(arr)))
# zero_file.close()
# chosen_classes = []
if i%1000==0:
print('im_detect: {:d}/{:d} {:.3f}s {:.3f}s' \
.format(i + 1, num_images, _t['im_detect'].average_time,
_t['misc'].average_time))
# writer.close()
det_file = os.path.join(output_dir, 'detections.pkl')
with open(det_file, 'wb') as f:
pickle.dump(all_boxes, f, pickle.HIGHEST_PROTOCOL)
print('Evaluating detections')
imdb.evaluate_detections(all_boxes, output_dir, experiment_setup)
| shuang1330/tf-faster-rcnn | lib/model/test.py | Python | mit | 8,856 |
__author__ = 'minhtule'
from request import *
class Tracker(object):
"""
"""
def __init__(self, tracking_id, visitor):
self.__tracking_id = tracking_id
self.__visitor = visitor
self.__debug_enabled = False
@property
def tracking_id(self):
return self.__tracking_id
@property
def client_id(self):
return self.__visitor.id
@property
def visitor(self):
return self.__visitor
@property
def debug_enabled(self):
return self.__debug_enabled
@debug_enabled.setter
def debug_enabled(self, value):
self.__debug_enabled = value
@property
def original_request_ip(self):
return self.visitor.ip_address
@property
def original_request_user_agent(self):
return self.visitor.user_agent
@property
def original_request_language(self):
return self.visitor.language
# Public method
def send_page(self, hostname=None, path=None, title=None):
PageTrackingRequest(
self,
document_hostname=hostname if hostname else self.visitor.document_host,
document_path=path if path else self.visitor.document_path,
document_title=title
).send()
def send_transaction(self, transaction_id, transaction_affiliation=None, transaction_revenue=None, transaction_shipping=None, transaction_tax=None, currency_code=None):
TransactionTrackingRequest(
self,
transaction_id,
transaction_affiliation=transaction_affiliation,
transaction_revenue=transaction_revenue,
transaction_shipping=transaction_shipping,
transaction_tax=transaction_tax,
currency_code=currency_code
).send()
def send_item(self, transaction_id, item_name, item_price=None, item_quantity=None, item_code=None, item_category=None, currency_code=None):
ItemTrackingRequest(
self,
transaction_id,
item_name,
item_price=item_price,
item_quantity=item_quantity,
item_code=item_code,
item_category=item_category,
currency_code=currency_code
).send()
class CustomVariable(object):
@property
def index(self):
return self.__index
@property
def value(self):
return self.__value
def __init__(self, index, value):
self.__index = index
self.__value = value | Labgoo/google-analytics-for-python | gap/tracker.py | Python | mit | 2,493 |
"""
Project main settings file. These settings are common to the project
if you need to override something do it in local.pt
"""
from sys import path
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
# PATHS
# Path containing the django project
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
path.append(BASE_DIR)
# Path of the top level directory.
# This directory contains the django project, apps, libs, etc...
PROJECT_ROOT = os.path.dirname(BASE_DIR)
# Add apps and libs to the PROJECT_ROOT
path.append(os.path.join(PROJECT_ROOT, "apps"))
path.append(os.path.join(PROJECT_ROOT, "libs"))
# SITE SETTINGS
# https://docs.djangoproject.com/en/1.10/ref/settings/#site-id
SITE_ID = 1
# https://docs.djangoproject.com/en/1.10/ref/settings/#allowed-hosts
ALLOWED_HOSTS = []
# https://docs.djangoproject.com/en/1.10/ref/settings/#installed-apps
INSTALLED_APPS = [
# Django apps
'django.contrib.admin',
'django.contrib.admindocs',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.humanize',
'django.contrib.sitemaps',
'django.contrib.syndication',
'django.contrib.staticfiles',
# Third party apps
'compressor',
# Local apps
'base',
]
# https://docs.djangoproject.com/en/1.10/topics/auth/passwords/#using-argon2-with-django
PASSWORD_HASHERS = (
'django.contrib.auth.hashers.Argon2PasswordHasher',
'django.contrib.auth.hashers.BCryptSHA256PasswordHasher',
'django.contrib.auth.hashers.BCryptPasswordHasher',
'django.contrib.auth.hashers.PBKDF2PasswordHasher',
'django.contrib.auth.hashers.PBKDF2SHA1PasswordHasher',
'django.contrib.auth.hashers.SHA1PasswordHasher',
'django.contrib.auth.hashers.MD5PasswordHasher',
'django.contrib.auth.hashers.CryptPasswordHasher',
)
# DEBUG SETTINGS
# https://docs.djangoproject.com/en/1.10/ref/settings/#debug
DEBUG = False
# https://docs.djangoproject.com/en/1.10/ref/settings/#internal-ips
INTERNAL_IPS = ('127.0.0.1')
# LOCALE SETTINGS
# Local time zone for this installation.
# https://docs.djangoproject.com/en/1.10/ref/settings/#time-zone
TIME_ZONE = 'America/Los_Angeles'
# https://docs.djangoproject.com/en/1.10/ref/settings/#language-code
LANGUAGE_CODE = 'en-us'
# https://docs.djangoproject.com/en/1.10/ref/settings/#use-i18n
USE_I18N = True
# https://docs.djangoproject.com/en/1.10/ref/settings/#use-l10n
USE_L10N = True
# https://docs.djangoproject.com/en/1.10/ref/settings/#use-tz
USE_TZ = True
# MEDIA AND STATIC SETTINGS
# Absolute filesystem path to the directory that will hold user-uploaded files.
# https://docs.djangoproject.com/en/1.10/ref/settings/#media-root
MEDIA_ROOT = os.path.join(PROJECT_ROOT, 'public/media')
# URL that handles the media served from MEDIA_ROOT. Use a trailing slash.
# https://docs.djangoproject.com/en/1.10/ref/settings/#media-url
MEDIA_URL = '/media/'
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# https://docs.djangoproject.com/en/1.10/ref/settings/#static-root
STATIC_ROOT = os.path.join(PROJECT_ROOT, 'public/static')
# URL prefix for static files.
# https://docs.djangoproject.com/en/1.10/ref/settings/#static-url
STATIC_URL = '/static/'
# Additional locations of static files
# https://docs.djangoproject.com/en/1.10/ref/settings/#staticfiles-dirs
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static'),
)
# https://docs.djangoproject.com/en/1.10/ref/contrib/staticfiles/#staticfiles-finders
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
# TEMPLATE SETTINGS
# https://docs.djangoproject.com/en/1.10/ref/settings/#templates
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages'
],
},
},
]
# URL SETTINGS
# https://docs.djangoproject.com/en/1.10/ref/settings/#root-urlconf.
ROOT_URLCONF = 'shaping_templeat.urls'
# MIDDLEWARE SETTINGS
# See: https://docs.djangoproject.com/en/1.10/ref/settings/#middleware-classes
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
# LOGGING
# https://docs.djangoproject.com/en/1.10/topics/logging/
LOGGING = {
'version': 1,
'loggers': {
'shaping_templeat': {
'level': "DEBUG"
}
}
}
| hamedtorky/django-starter-template | shaping_templeat/settings/base.py | Python | mit | 5,369 |
import sys
sys.path.insert(0, "../")
import unittest
from dip.typesystem import DNull, DBool, DInteger, DString, DList
from dip.compiler import BytecodeCompiler
from dip.interpreter import VirtualMachine
from dip.namespace import Namespace
class TestInterpreter(unittest.TestCase):
def _execute_simple(self, code, data):
result = [None]
def getresult(val):
result[0] = val
vm = VirtualMachine([], getresult)
globalns = Namespace("globals")
ctx = BytecodeCompiler("main", code, data, namespace=globalns)
globalns.set_func("main", ctx.mkfunc())
vm.setglobals(globalns)
vm.run(pass_argv=False)
return result[0]
def test_add(self):
result = self._execute_simple("""
ADD 0 1 2 # 0
RET 2 # 1
""", [
DInteger.new_int(32), # data0
DInteger.new_int(64), # data1
DInteger(), # data2
])
self.assertEqual(result.int_py(), 96)
def test_sub(self):
result = self._execute_simple("""
SUB 0 1 2 # 0
RET 2 # 1
""", [
DInteger.new_int(64), # data0
DInteger.new_int(32), # data1
DInteger(), # data2
])
self.assertEqual(result.int_py(), 32)
def test_mul(self):
result = self._execute_simple("""
MUL 0 1 2 # 0
RET 2 # 1
""", [
DInteger.new_int(64), # data0
DInteger.new_int(32), # data1
DInteger(), # data2
])
self.assertEqual(result.int_py(), 2048)
def test_div(self):
result = self._execute_simple("""
DIV 0 1 2 # 0
RET 2 # 1
""", [
DInteger.new_int(64), # data0
DInteger.new_int(2), # data1
DInteger(), # data2
])
self.assertEqual(result.int_py(), 32)
def test_jump(self):
result = self._execute_simple("""
JMP 2 # 0
RET 0 # 1
RET 1 # 2
""", [
DInteger.new_int(16), # data0
DInteger.new_int(32), # data1
])
self.assertEqual(result.int_py(), 32)
def test_len(self):
result = self._execute_simple("""
LEN 0 1 # 0
RET 1 # 1
""", [
DString.new_str("neat"), # data0
DInteger(), # data1
])
self.assertEqual(result.int_py(), 4)
def test_eq(self):
result = self._execute_simple("""
EQ 0 1 2 # 0
RET 2 # 1
""", [
DInteger.new_int(4), # data0
DInteger.new_int(5), # data1
DBool(), # data2
])
self.assertEqual(result.int_py(), False)
result = self._execute_simple("""
EQ 0 1 2 # 0
RET 2 # 1
""", [
DString.new_str("neat"), # data0
DString.new_str("neat"), # data1
DBool(), # data2
])
self.assertEqual(result.int_py(), True)
def test_branch(self):
result = self._execute_simple("""
EQ 0 1 2 # 0
BF 2 3 # 1
RET 0 # 2
LABEL :some_label # 3
RET 3 # 4
""", [
DInteger.new_int(4), # data0
DInteger.new_int(5), # data1
DBool(), # data2
DInteger.new_int(999), # data3
])
self.assertEqual(result.int_py(), 999)
def test_lists(self):
result = self._execute_simple("""
LIST_NEW 0
LIST_ADD 0 1 # 0 data0.append(data1)
LIST_ADD 0 1 # 1 data0.append(data1)
LIST_ADD 0 2 # 2 data0.append(data2)
LEN 0 3 # 3 data3 = len(data0)
EQ 3 5 6 # 4 data6 = (data3 == data5)
LIST_REM 0 4 # 5 data0.remove(data4 (represents an index))
LEN 0 3 # 6 data3 = len(data0)
NEQ 3 5 7 # 7 data7 = (data3 != data5)
EQ 6 7 8 # 8 data8 = (data6 == data7)
RET 8 # 9 return data8
""", [
DList(), # data0, list
DInteger.new_int(5), # data1, fake value to add to the list
DString.new_str("hi"), # data2, fake value to add to the list
DInteger(), # data3, list length
DInteger.new_int(2), # data4, list index
DInteger.new_int(3), # data5, expected list length
DBool(), # data6, comp1
DBool(), # data7, comp2
DBool(), # data8, output
])
self.assertEqual(result.int_py(), True)
if __name__ == '__main__':
unittest.main() | juddc/Dipper | dip/tests/test_interpreter.py | Python | mit | 5,801 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class VirtualNetworkPeeringsOperations(object):
"""VirtualNetworkPeeringsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2016_09_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def _delete_initial(
self,
resource_group_name, # type: str
virtual_network_name, # type: str
virtual_network_peering_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-09-01"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'virtualNetworkPeeringName': self._serialize.url("virtual_network_peering_name", virtual_network_peering_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/virtualNetworkPeerings/{virtualNetworkPeeringName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
virtual_network_name, # type: str
virtual_network_peering_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes the specified virtual network peering.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_name: The name of the virtual network.
:type virtual_network_name: str
:param virtual_network_peering_name: The name of the virtual network peering.
:type virtual_network_peering_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
virtual_network_name=virtual_network_name,
virtual_network_peering_name=virtual_network_peering_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'virtualNetworkPeeringName': self._serialize.url("virtual_network_peering_name", virtual_network_peering_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/virtualNetworkPeerings/{virtualNetworkPeeringName}'} # type: ignore
def get(
self,
resource_group_name, # type: str
virtual_network_name, # type: str
virtual_network_peering_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.VirtualNetworkPeering"
"""Gets the specified virtual network peering.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_name: The name of the virtual network.
:type virtual_network_name: str
:param virtual_network_peering_name: The name of the virtual network peering.
:type virtual_network_peering_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: VirtualNetworkPeering, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2016_09_01.models.VirtualNetworkPeering
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualNetworkPeering"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-09-01"
accept = "application/json, text/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'virtualNetworkPeeringName': self._serialize.url("virtual_network_peering_name", virtual_network_peering_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('VirtualNetworkPeering', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/virtualNetworkPeerings/{virtualNetworkPeeringName}'} # type: ignore
def _create_or_update_initial(
self,
resource_group_name, # type: str
virtual_network_name, # type: str
virtual_network_peering_name, # type: str
virtual_network_peering_parameters, # type: "_models.VirtualNetworkPeering"
**kwargs # type: Any
):
# type: (...) -> "_models.VirtualNetworkPeering"
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualNetworkPeering"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-09-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json, text/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'virtualNetworkPeeringName': self._serialize.url("virtual_network_peering_name", virtual_network_peering_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(virtual_network_peering_parameters, 'VirtualNetworkPeering')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('VirtualNetworkPeering', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('VirtualNetworkPeering', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/virtualNetworkPeerings/{virtualNetworkPeeringName}'} # type: ignore
def begin_create_or_update(
self,
resource_group_name, # type: str
virtual_network_name, # type: str
virtual_network_peering_name, # type: str
virtual_network_peering_parameters, # type: "_models.VirtualNetworkPeering"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.VirtualNetworkPeering"]
"""Creates or updates a peering in the specified virtual network.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_name: The name of the virtual network.
:type virtual_network_name: str
:param virtual_network_peering_name: The name of the peering.
:type virtual_network_peering_name: str
:param virtual_network_peering_parameters: Parameters supplied to the create or update virtual
network peering operation.
:type virtual_network_peering_parameters: ~azure.mgmt.network.v2016_09_01.models.VirtualNetworkPeering
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either VirtualNetworkPeering or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2016_09_01.models.VirtualNetworkPeering]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualNetworkPeering"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
virtual_network_name=virtual_network_name,
virtual_network_peering_name=virtual_network_peering_name,
virtual_network_peering_parameters=virtual_network_peering_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('VirtualNetworkPeering', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'virtualNetworkPeeringName': self._serialize.url("virtual_network_peering_name", virtual_network_peering_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/virtualNetworkPeerings/{virtualNetworkPeeringName}'} # type: ignore
def list(
self,
resource_group_name, # type: str
virtual_network_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.VirtualNetworkPeeringListResult"]
"""Gets all virtual network peerings in a virtual network.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_name: The name of the virtual network.
:type virtual_network_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either VirtualNetworkPeeringListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2016_09_01.models.VirtualNetworkPeeringListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualNetworkPeeringListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-09-01"
accept = "application/json, text/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('VirtualNetworkPeeringListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/virtualNetworkPeerings'} # type: ignore
| Azure/azure-sdk-for-python | sdk/network/azure-mgmt-network/azure/mgmt/network/v2016_09_01/operations/_virtual_network_peerings_operations.py | Python | mit | 22,805 |
import sys
if __name__ == "__main__":
# Parse command line arguments
if len(sys.argv) < 2:
sys.exit("python {} <datasetFilename> {{<maxPoints>}}".format(sys.argv[0]))
datasetFilename = sys.argv[1]
if len(sys.argv) >= 3:
maxPoints = int(sys.argv[2])
else:
maxPoints = None
# Perform initial pass through file to determine line count (i.e. # of points)
lineCount = 0
with open(datasetFilename, "r") as f:
line = f.readline()
while line:
lineCount += 1
line = f.readline()
# Read first line and use to make assumption about the dimensionality of each point
numDimensions = 0
with open(datasetFilename, "r") as f:
firstLine = f.readline()
numDimensions = len(firstLine.split())
# If dimensionality of dataset is 0, print error message and exit
if numDimensions == 0:
sys.exit("Could not determine dimensionality of dataset")
# Print initial header at END of file (so we have number of points already)
if maxPoints:
numPoints = min(lineCount, maxPoints)
else:
numPoints = lineCount
print("{} {}".format(numDimensions, numPoints))
# Output dataset header which defines dimensionality of data and number of points
# Read entire file line-by-line, printing out each line as a point
with open(datasetFilename, "r") as f:
pointsRead = 0
line = f.readline()
while line:
fields = line.split()
floatFields = [ str(float(x)) for x in fields ]
print(" ".join(floatFields))
# Stop reading file is maximum number of points have been read
pointsRead += 1
if maxPoints and pointsRead >= maxPoints:
break
# Read next line of file
line = f.readline() | DonaldWhyte/multidimensional-search-fyp | scripts/read_multifield_dataset.py | Python | mit | 1,612 |
"""Wechatkit exception module."""
class WechatKitBaseException(Exception):
"""Wechatkit base Exception."""
def __init__(self, error_info):
"""Init."""
super(WechatKitBaseException, self).__init__(error_info)
self.error_info = error_info
class WechatKitException(WechatKitBaseException):
"""Wechatkit Exception."""
class WechatSignException(WechatKitException):
"""Wechat Sign Exception."""
| istommao/wechatkit | wechatkit/exceptions.py | Python | mit | 437 |
# This module is part of the Divmod project and is Copyright 2003 Amir Bakhtiar:
# amir@divmod.org. This is free software; you can redistribute it and/or
# modify it under the terms of version 2.1 of the GNU Lesser General Public
# License as published by the Free Software Foundation.
#
from tkinter import *
import tkinter.filedialog
import tkinter.simpledialog
import tkinter.messagebox
import os
from .util import Command, StatusBar, Notebook
from .tester import TestView
class PoolView(Frame):
def __init__(self, master=None, guesser=None, app=None):
Frame.__init__(self, master, bg='lightblue3')
self.pack()
self.listView = Frame(self)
self.listView.pack()
bp = Button(self, text="New Pool", command=self.newPool)
bp.pack(side=LEFT, anchor=SE)
self.addLoadSave()
self.columnHeadings()
self.model = {}
self.guesser = guesser
self.app = app
self.reload()
def reload(self):
self.listView.destroy()
self.listView = Frame(self)
self.listView.pack()
for pool in self.guesser.poolNames():
self.addPool(self.guesser.pools[pool])
self.addPool(self.guesser.corpus, 'Total')
def upload(self):
pass
def addLoadSave(self):
frame = Frame(self)
frame.pack(side=RIGHT)
bp = Button(frame, text="Upload", command=self.upload, state=DISABLED)
bp.pack(side=BOTTOM, fill=X)
bp = Button(frame, text="Save", command=self.save)
bp.pack(side=BOTTOM, fill=X)
bp = Button(frame, text="Load", command=self.load)
bp.pack(side=BOTTOM, fill=X)
def addPool(self, pool, name=None):
col=None
tTok = IntVar()
train = IntVar()
line = Frame(self.listView)
line.pack()
if name is None:
name = pool.name
idx = self.guesser.poolNames().index(name)
col = self.defaultColours()[idx]
l = Label(line, text=name, anchor=W, width=10)
l.grid(row=0, column=0)
colourStripe = Label(line, text=' ', width=1, bg=col, anchor=W, relief=GROOVE)
colourStripe.grid(row=0, column=1)
train = IntVar()
train.set(pool.trainCount)
l = Label(line, textvariable=train, anchor=E, width=10, relief=SUNKEN)
l.grid(row=0, column=2)
uTok = IntVar()
uTok.set(len(pool))
l = Label(line, textvariable=uTok, anchor=E, width=12, relief=SUNKEN)
l.grid(row=0, column=3)
tTok = IntVar()
tTok.set(pool.tokenCount)
l = Label(line, textvariable=tTok, anchor=E, width=10, relief=SUNKEN)
l.grid(row=0, column=4)
self.model[name]=(pool, uTok, tTok, train)
def refresh(self):
for pool, ut, tt, train in list(self.model.values()):
ut.set(len(pool))
tt.set(pool.tokenCount)
train.set(pool.trainCount)
def save(self):
path = tkinter.filedialog.asksaveasfilename()
if not path:
return
self.guesser.save(path)
self.app.dirty = False
def load(self):
path = tkinter.filedialog.askopenfilename()
if not path:
return
self.guesser.load(path)
self.reload()
self.app.dirty = False
def newPool(self):
p = tkinter.simpledialog.askstring('Create Pool', 'Name for new pool?')
if not p:
return
if p in self.guesser.pools:
tkinter.messagebox.showwarning('Bad pool name!', 'Pool %s already exists.' % p)
self.guesser.newPool(p)
self.reload()
self.app.poolAdded()
self.app.status.log('New pool created: %s.' % p, clear=3)
def defaultColours(self):
return ['green', 'yellow', 'lightblue', 'red', 'blue', 'orange', 'purple', 'pink']
def columnHeadings(self):
# FIXME factor out and generalize
title = Label(self, text='Pools', relief=RAISED, borderwidth=1)
title.pack(side=TOP, fill=X)
msgLine = Frame(self, relief=RAISED, borderwidth=1)
msgLine.pack(side=TOP)
currCol = 0
colHeadings = [('Name', 10), ('', 1), ('Trained', 10), ('Unique Tokens', 12), ('Tokens', 10)]
for cHdr, width in colHeadings:
l = Label(msgLine, text=cHdr, width=width, bg='lightblue')
l.grid(row=0, column=currCol)
currCol += 1
class Trainer(Frame):
def __init__(self, parent, guesser=None, itemClass=None):
self.status = StatusBar(parent)
self.status.pack(side=BOTTOM, fill=X)
Frame.__init__(self, parent)
self.pack(side=TOP, fill=BOTH)
self.itemsPerPage = 20
self.rows = []
for i in range(self.itemsPerPage):
self.rows.append(ItemRow())
self.items = []
self.files = []
self.cursor = 0
self.dirty = False
if guesser is None:
from reverend.thomas import Bayes
self.guesser = Bayes()
else:
self.guesser = guesser
if itemClass is None:
self.itemClass = TextItem
else:
self.itemClass = itemClass
for row in self.rows:
row.summary.set('foo')
self.initViews()
def initViews(self):
self.nb = Notebook(self)
## frame1 = Frame(self.nb())
## self.poolView = PoolView(frame1, guesser=self.guesser, app=self)
## self.poolView.pack(side=TOP)
frame2 = Frame(self.nb())
self.poolView = PoolView(frame2, guesser=self.guesser, app=self)
self.poolView.pack(side=TOP)
self.listView = Canvas(frame2, relief=GROOVE)
self.listView.pack(padx=3)
bn = Button(self.listView, text="Load training", command=self.loadCorpus)
bn.pack(side=RIGHT, anchor=NE, fill=X)
self.columnHeadings()
self.addNextPrev()
frame3 = Frame(self.nb())
self.testView = TestView(frame3, guesser=self.guesser, app=self)
self.testView.pack()
frame4 = Frame(self.nb())
bp = Button(frame4, text="Quit", command=self.quitNow)
bp.pack(side=BOTTOM)
#self.nb.add_screen(frame1, 'Reverend')
self.nb.add_screen(frame2, 'Training')
self.nb.add_screen(frame3, 'Testing')
self.nb.add_screen(frame4, 'Quit')
def addNextPrev(self):
npFrame = Frame(self.listView)
npFrame.pack(side=BOTTOM, fill=X)
bn = Button(npFrame, text="Prev Page", command=self.prevPage)
bn.grid(row=0, column=0)
bn = Button(npFrame, text="Next Page", command=self.nextPage)
bn.grid(row=0, column=1)
def loadCorpus(self):
path = tkinter.filedialog.askdirectory()
if not path:
return
self.loadFileList(path)
self.displayItems()
self.displayRows()
def bulkTest(self):
dirs = []
for pool in self.guesser.poolNames():
path = tkinter.filedialog.askdirectory()
dirs.append((pool, path))
for pool, path in dirs:
print(pool, path)
def displayList(self):
for item in self.items:
self.itemRow(item)
def displayRows(self):
for row in self.rows:
self.displayRow(row)
def loadFileList(self, path):
listing = os.listdir(path)
self.files = [os.path.join(path, file) for file in listing]
self.cursor = 0
def prevPage(self):
self.cursor = max(0, self.cursor - self.itemsPerPage)
self.displayItems()
def nextPage(self):
self.cursor = min(len(self.files), self.cursor + self.itemsPerPage)
self.displayItems()
def displayItems(self):
theseFiles = self.files[self.cursor:self.cursor + self.itemsPerPage]
items = []
for file, row in zip(theseFiles, self.rows):
fp = open(file, 'rb')
try:
item = self.itemClass.fromFile(fp)
finally:
fp.close()
if item is None:
continue
items.append(item)
guesses = self.guesser.guess(item)
summary = item.summary()
cols = item.columnDefs()
s = ''
for c, ignore in cols:
s += summary[c] + ' '
row.initialize(item, s, guesses, self.guesser.poolNames())
self.items = items
def quitNow(self):
if self.dirty:
if tkinter.messagebox.askyesno("You have unsaved changes!", "Quit without saving?"):
self.quit()
self.quit()
def columnHeadings(self):
# FIXME - Something better for columns and rows in general
line = Frame(self.listView, relief=RAISED, borderwidth=1)
line.pack(side=TOP, padx=2, pady=1)
colHeadings = self.itemClass.columnDefs()
currCol = 0
for cHdr, width in colHeadings:
l = Label(line, text=cHdr, width=width, bg='lightblue')
l.grid(row=0, column=currCol)
currCol += 1
line = Frame(self)
line.pack(fill=X)
def training(self, row):
sel = row.selection.get()
self.guesser.train(sel, row.original)
row.current = sel
self.guessAll()
def guessAll(self):
self.poolView.refresh()
pools = self.guesser.poolNames()
for row in self.rows:
row.setGuess(self.guesser.guess(row.original), pools)
def displayRow(self, row, bgc=None):
# UGH - REWRITE!
line = Frame(self.listView, bg=bgc)
line.pack(pady=1)
row.line = line
self.insertRadios(row)
Label(line, text=row.summary.get(), textvariable=row.summary, width=60, bg=bgc,
anchor=W).grid(row=0, column=2)
#Label(line, text=row.guess, width=7, bg=bgc, anchor=W).grid(row=0, column=1)
colourStripe = Label(line, text=' ', width=1, bg=bgc, anchor=W, relief=GROOVE)
colourStripe.grid(row=0, column=1)
line.colourStripe = colourStripe
pools = self.guesser.poolNames()
row.refreshColour(pools)
def poolAdded(self):
if not self.items:
return
pools = self.guesser.poolNames()
for row in self.rows:
for r in row.radios:
r.destroy()
self.insertRadios(row)
row.refreshColour(pools)
self.dirty = True
def insertRadios(self, row):
radioFrame = Frame(row.line)
radioFrame.grid(row=0, column=0)
currCol = 0
radios = []
v = row.selection
ci = 0
colours = row.defaultColours()
pools = self.guesser.poolNames()
for pool in pools:
rb = Radiobutton(radioFrame, text=pool, variable=v, value=pool, command=Command(self.training, row), bg=None)
rb.grid(row=0, column=currCol)
radios.append(rb)
currCol += 1
ci += 1
row.radios = radios
class TextItem(object):
def __init__(self, text):
self.text = text
def summary(self):
return {'Text': self.text}
def columnNames(self):
return ['Text']
def lower(self):
return self.text.lower()
def fromFile(self, fp):
"""Return the first line of the file.
"""
ti = self(fp.readline())
return ti
fromFile = classmethod(fromFile)
class ItemRow(object):
def __init__(self, orig=None):
self.line = None
self.radios = []
self.original = orig
self.current = ''
self.guess = []
self.summary = StringVar()
self.selection = StringVar()
def initialize(self, item=None, summary='', guess=None, pools=[]):
self.selection.set('')
self.original = item
self.summary.set(summary)
self.setGuess(guess, pools)
def setGuess(self, guess, pools):
if not guess:
guess = [['']]
self.guess = guess
self.selection.set(self.bestGuess())
self.current = self.bestGuess()
self.refreshColour(pools)
def refreshColour(self, pools):
col = None
if self.guess[0][0] in pools:
idx = pools.index(self.guess[0][0])
col = self.defaultColours()[idx]
if self.line:
self.line.colourStripe.config(bg=col)
def __repr__(self):
return self.original.__repr__()
def defaultColours(self):
return ['green', 'yellow', 'lightblue', 'red', 'blue', 'orange', 'purple', 'pink']
def bestGuess(self):
if self.guess:
return self.guess[0][0]
else:
return None
if __name__ == "__main__":
root = Tk()
root.title('Reverend Trainer')
root.minsize(width=300, height=300)
#root.maxsize(width=600, height=600)
display = Trainer(root)
root.mainloop()
| samuelclay/NewsBlur | vendor/reverend/ui/trainer.py | Python | mit | 13,036 |
from django.urls import path, include
from django.contrib import admin
from rest_framework.routers import DefaultRouter
from tasks.views import TaskItemViewSet, MainAppView, TagViewSet, ProjectViewSet, TaskCommentViewSet
admin.autodiscover()
router = DefaultRouter()
router.register(r'task', TaskItemViewSet)
router.register(r'tag', TagViewSet)
router.register(r'project', ProjectViewSet)
router.register(r'comments', TaskCommentViewSet)
urlpatterns = [
path('admin/', admin.site.urls),
path('', MainAppView.as_view(), {}, "index"),
path('api/', include(router.urls)),
path('rest-auth/', include('rest_auth.urls'))
]
| andreiavram/organizer | organizer/urls.py | Python | mit | 637 |
#!/usr/bin/env python3
import pwd
for p in pwd.getpwall():
if p.pw_shell.endswith('/bin/bash'):
print(p[0])
| thunderoy/dgplug_training | assignments/assign4.py | Python | mit | 121 |
# Download the Python helper library from twilio.com/docs/python/install
from twilio.rest.lookups import TwilioLookupsClient
try:
# Python 3
from urllib.parse import quote
except ImportError:
# Python 2
from urllib import quote
# Your Account Sid and Auth Token from twilio.com/console
account_sid = "ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"
auth_token = "your_auth_token"
client = TwilioLookupsClient(account_sid, auth_token)
encoded_number = quote('+4402077651182')
number = client.phone_numbers.get(encoded_number)
print(number.national_format)
| teoreteetik/api-snippets | lookups/lookup-international-basic/lookup-international-basic.5.x.py | Python | mit | 563 |
'''
Created on Oct 19, 2016
@author: jaime
'''
from django.conf.urls import url
from django.views.decorators.csrf import csrf_exempt
from products import views
urlpatterns = [
url(r'^categories/$', csrf_exempt(views.ProductCategoryView.as_view())),
url(r'^categories/(?P<uid>\w+)/$', csrf_exempt(views.ProductCategoryView.as_view())),
url(r'^$', csrf_exempt(views.ProductView.as_view())),
url(r'^(?P<uid>\w+)/$', csrf_exempt(views.ProductView.as_view())),
] | jroeland/teapot | project/web/app/products/urls.py | Python | mit | 489 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# MicroPython documentation build configuration file, created by
# sphinx-quickstart on Sun Sep 21 11:42:03 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('.'))
# Work out the port to generate the docs for
from collections import OrderedDict
micropy_port = os.getenv('MICROPY_PORT') or 'pyboard'
tags.add('port_' + micropy_port)
ports = OrderedDict((
('unix', 'unix'),
('pyboard', 'the pyboard'),
('wipy', 'the WiPy'),
('esp8266', 'the ESP8266'),
))
# The members of the html_context dict are available inside topindex.html
micropy_version = os.getenv('MICROPY_VERSION') or 'latest'
micropy_all_versions = (os.getenv('MICROPY_ALL_VERSIONS') or 'latest').split(',')
url_pattern = '%s/en/%%s/%%s' % (os.getenv('MICROPY_URL_PREFIX') or '/',)
html_context = {
'port':micropy_port,
'port_name':ports[micropy_port],
'port_version':micropy_version,
'all_ports':[
(port_id, url_pattern % (micropy_version, port_id))
for port_id, port_name in ports.items()
],
'all_versions':[
(ver, url_pattern % (ver, micropy_port))
for ver in micropy_all_versions
],
'downloads':[
('PDF', url_pattern % (micropy_version, 'micropython-%s.pdf' % micropy_port)),
],
}
# Specify a custom master document based on the port name
master_doc = micropy_port + '_' + 'index'
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx_selective_exclude.modindex_exclude',
'sphinx_selective_exclude.eager_only',
'sphinx_selective_exclude.search_auto_exclude',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
#master_doc = 'index'
# General information about the project.
project = 'MicroPython'
copyright = '2014-2016, Damien P. George and contributors'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.8'
# The full version, including alpha/beta/rc tags.
release = '1.8.4'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# on_rtd is whether we are on readthedocs.org
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd: # only import and set the theme if we're building docs locally
try:
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path(), '.']
except:
html_theme = 'default'
html_theme_path = ['.']
else:
html_theme_path = ['.']
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = ['.']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = '../../logo/trans-logo.png'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%d %b %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
html_additional_pages = {"index": "topindex.html"}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'MicroPythondoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'MicroPython.tex', 'MicroPython Documentation',
'Damien P. George and contributors', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'micropython', 'MicroPython Documentation',
['Damien P. George and contributors'], 1),
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'MicroPython', 'MicroPython Documentation',
'Damien P. George and contributors', 'MicroPython', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
# Append the other ports' specific folders/files to the exclude pattern
exclude_patterns.extend([port + '*' for port in ports if port != micropy_port])
modules_port_specific = {
'pyboard': ['pyb'],
'wipy': ['wipy'],
'esp8266': ['esp'],
}
modindex_exclude = []
for p, l in modules_port_specific.items():
if p != micropy_port:
modindex_exclude += l
# Exclude extra modules per port
modindex_exclude += {
'esp8266': ['cmath', 'select'],
'wipy': ['cmath'],
}.get(micropy_port, [])
| turbinenreiter/micropython | docs/conf.py | Python | mit | 10,766 |
"""Modify Group Entry Message."""
from enum import IntEnum
from pyof.foundation.base import GenericMessage
from pyof.foundation.basic_types import (
FixedTypeList, Pad, UBInt8, UBInt16, UBInt32)
from pyof.v0x04.common.header import Header, Type
from pyof.v0x04.controller2switch.common import Bucket
__all__ = ('GroupMod', 'GroupModCommand', 'GroupType', 'Group',
'ListOfBuckets')
class Group(IntEnum):
"""Group numbering. Groups can use any number up to attr:`OFPG_MAX`."""
#: Last usable group number.
OFPG_MAX = 0xffffff00
#: Fake groups.
#: Represents all groups for group delete commands.
OFPG_ALL = 0xfffffffc
#: Wildcard group used only for flow stats requests.
# Select all flows regardless of group (including flows with no group).
OFPG_ANY = 0xffffffff
class GroupModCommand(IntEnum):
"""Group commands."""
#: New group.
OFPGC_ADD = 0
#: Modify all matching groups.
OFPGC_MODIFY = 1
#: Delete all matching groups.
OFPGC_DELETE = 2
class GroupType(IntEnum):
"""Group types. Range [128, 255] is reserved for experimental use."""
#: All (multicast/broadcast) group.
OFPGT_ALL = 0
#: Select group.
OFPGT_SELECT = 1
#: Indirect group.
OFPGT_INDIRECT = 2
#: Fast failover group.
OFPGT_FF = 3
class ListOfBuckets(FixedTypeList):
"""List of buckets.
Represented by instances of Bucket.
"""
def __init__(self, items=None):
"""Create a ListOfBuckets with the optional parameters below.
Args:
items (Bucket): Instance or a list of instances.
"""
super().__init__(pyof_class=Bucket, items=items)
class GroupMod(GenericMessage):
"""Group setup and teardown (controller -> datapath)."""
header = Header(message_type=Type.OFPT_GROUP_MOD)
command = UBInt16(enum_ref=GroupModCommand)
group_type = UBInt8()
#: Pad to 64 bits.
pad = Pad(1)
group_id = UBInt32()
buckets = ListOfBuckets()
def __init__(self, xid=None, command=None, group_type=None, group_id=None,
buckets=None):
"""Create a GroupMod with the optional parameters below.
Args:
xid (int): Header's transaction id. Defaults to random.
command (GroupModCommand): One of OFPGC_*.
group_type (GroupType): One of OFPGT_*.
group_id (int): Group identifier.
buckets (:class:`ListOfBuckets`): The length of the bucket
array is inferred from the length field in the header.
"""
super().__init__(xid)
self.command = command
self.group_type = group_type
self.group_id = group_id
self.buckets = buckets
| cemsbr/python-openflow | pyof/v0x04/controller2switch/group_mod.py | Python | mit | 2,734 |
##########################################
# Check examples/0_simple_echo.py before #
##########################################
from cspark.Updater import Updater
from cspark.EventTypeRouter import EventTypeRouter
from cspark.UpdateHandler import UpdateHandler
from cspark.SQLiteContextEngine import SQLiteContextEngine
from cspark.MessageResponse import MessageResponse
updater = Updater(
access_token="",
)
class RoomMentionsCounterUpdateHandler(UpdateHandler, SQLiteContextEngine):
"""
Handler should process messages from user and response with answers.
This class inherited from UpdateHandler and PeeweeContextStorage.
UpdateHandler gives you "self.send_response" to send answers.
PeeweeContextStorage gives you "self.context" which is a dictionary.
You can save your data there for future. It's stateful container,
which stores your data in Peewee ORM (SQLite by default).
"""
def handle_update(self):
if 'counter' not in self.context.room:
self.context.room['counter'] = 1
else:
self.context.room['counter'] += 1
self.send_response(
MessageResponse("Room counter: " + str(self.context.room['counter']))
)
class Router(EventTypeRouter):
"""
Router should decide which message should be processed by which handler.
This router is inherited from EventTypeRouter which divide updates by their type.
For example this router set RoomMentionsCounterUpdateHandler for updates which are messages.
"""
new_message_handler = RoomMentionsCounterUpdateHandler
# Now we need to register router
updater.add_router(Router)
# And start "event loop"
updater.idle()
| Matvey-Kuk/cspark-python | examples/1_room_mentions_counter.py | Python | mit | 1,702 |
import phidl.geometry as pg
import gdsfactory as gf
from gdsfactory.component import Component
@gf.cell
def outline(elements, **kwargs) -> Component:
"""
Returns Component containing the outlined polygon(s).
wraps phidl.geometry.outline
Creates an outline around all the polygons passed in the `elements`
argument. `elements` may be a Device, Polygon, or list of Devices.
Args:
elements: Device(/Reference), list of Device(/Reference), or Polygon
Polygons to outline or Device containing polygons to outline.
Keyword Args:
distance: int or float
Distance to offset polygons. Positive values expand, negative shrink.
precision: float
Desired precision for rounding vertex coordinates.
num_divisions: array-like[2] of int
The number of divisions with which the geometry is divided into
multiple rectangular regions. This allows for each region to be
processed sequentially, which is more computationally efficient.
join: {'miter', 'bevel', 'round'}
Type of join used to create the offset polygon.
tolerance: int or float
For miter joints, this number must be at least 2 and it represents the
maximal distance in multiples of offset between new vertices and their
original position before beveling to avoid spikes at acute joints. For
round joints, it indicates the curvature resolution in number of
points per full circle.
join_first: bool
Join all paths before offsetting to avoid unnecessary joins in
adjacent polygon sides.
max_points: int
The maximum number of vertices within the resulting polygon.
open_ports: bool or float
If not False, holes will be cut in the outline such that the Ports are
not covered. If True, the holes will have the same width as the Ports.
If a float, the holes will be be widened by that value (useful for fully
clearing the outline around the Ports for positive-tone processes
layer: int, array-like[2], or set
Specific layer(s) to put polygon geometry on.)
"""
return gf.read.from_phidl(component=pg.outline(elements, **kwargs))
def test_outline():
e1 = gf.components.ellipse(radii=(6, 6))
e2 = gf.components.ellipse(radii=(10, 4))
c = outline([e1, e2])
assert int(c.area()) == 52
if __name__ == "__main__":
e1 = gf.components.ellipse(radii=(6, 6))
e2 = gf.components.ellipse(radii=(10, 4))
c = outline([e1, e2])
c.show()
| gdsfactory/gdsfactory | gdsfactory/geometry/outline.py | Python | mit | 2,655 |
# -*- coding: utf-8 -*-
"""
mchem.postgres
~~~~~~~~~~~~~~
Functions to build and benchmark PostgreSQL database for comparison.
:copyright: Copyright 2014 by Matt Swain.
:license: MIT, see LICENSE file for more details.
"""
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
import logging
import time
import click
import numpy as np
import psycopg2
from psycopg2.extensions import AsIs
log = logging.getLogger(__name__)
# Start by creating the database and loading the chembl dump via the command line:
# createdb chembl
# psql chembl < chembl_19.pgdump.sql
@click.group()
@click.option('--db', '-d', default='mchem', envvar='MCHEM_POSTGRES_DB', help='PostgreSQL database name (default: mchem).')
@click.option('--user', '-u', default='root', envvar='MCHEM_POSTGRES_USER', help='PostgreSQL username (default: root).')
@click.option('--password', '-p', default=None, envvar='MCHEM_POSTGRES_PASSWORD', help='PostgreSQL password.')
@click.option('--verbose', '-v', is_flag=True, help='Verbose debug logging.')
@click.help_option('--help', '-h')
@click.pass_context
def cli(ctx, db, user, password, verbose):
"""PostgreSQL command line interface."""
click.echo('Connecting %s@%s' % (user, db))
logging.basicConfig(level=logging.DEBUG if verbose else logging.INFO, format='%(levelname)s: %(message)s')
ctx.obj = psycopg2.connect(database=db, user=user, password=password)
@cli.command()
@click.pass_obj
def load(conn):
"""Build PostgreSQL database."""
cur = conn.cursor()
cur.execute('create extension if not exists rdkit;')
cur.execute('create schema rdk;')
cur.execute('drop table if exists biotherapeutics, drug_mechanism, activities, assays, assay_parameters, compound_records, compound_properties, molecule_hierarchy, ligand_eff, predicted_binding_domains, molecule_synonyms, docs, formulations, molecule_atc_classification cascade;')
cur.execute('select * into rdk.mols from (select molregno,mol_from_ctab(molfile::cstring) m from compound_structures) tmp where m is not null;')
cur.execute('create index molidx on rdk.mols using gist(m);')
cur.execute('alter table rdk.mols add primary key (molregno);')
cur.execute('select molregno, m into rdk.fps from rdk.mols;')
cur.execute('alter table rdk.fps add column m2l512 bfp;')
cur.execute('alter table rdk.fps add column m2l2048 bfp;')
cur.execute('alter table rdk.fps add column m2 sfp;')
cur.execute('alter table rdk.fps add column m3 sfp;')
cur.execute('update rdk.fps set m2 = morgan_fp(m);')
cur.execute('update rdk.fps set m3 = morgan_fp(m, 3);')
cur.execute('set rdkit.morgan_fp_size=2048;')
cur.execute('update rdk.fps set m2l2048 = morganbv_fp(m);')
cur.execute('set rdkit.morgan_fp_size=512;')
cur.execute('update rdk.fps set m2l512 = morganbv_fp(m);')
cur.execute('alter table rdk.fps drop column m;')
cur.execute('create index fps_m2_idx on rdk.fps using gist(m2);')
cur.execute('create index fps_m3_idx on rdk.fps using gist(m3);')
cur.execute('create index fps_m2l2048_idx on rdk.fps using gist(m2l2048);')
cur.execute('create index fps_m2l512_idx on rdk.fps using gist(m2l512);')
cur.execute('alter table rdk.fps add primary key (molregno);')
conn.commit()
cur.close()
conn.close()
@cli.command()
@click.option('--sample', '-s', type=click.File('r'), help='File containing sample ids.')
@click.option('--fp', '-f', default='m2', type=click.Choice(['m2', 'm3', 'm2l2048', 'm2l512', 'm3l2048', 'm3l512']), help='Fingerprint type (default: m2).')
@click.option('--threshold', '-t', default=0.8, help='Tanimoto threshold (default: 0.8).')
@click.pass_obj
def profile(conn, sample, fp, threshold):
cur = conn.cursor()
mol_ids = sample.read().strip().split('\n')
times = []
cur.execute("set rdkit.tanimoto_threshold=%s;", (threshold,))
for i, mol_id in enumerate(mol_ids[:100]):
log.debug('Query molecule %s of %s: %s' % (i+1, len(mol_ids), mol_id))
# ARGH! The CHEMBL ID vs. molregno thing is a nightmare
cur.execute("select entity_id from chembl_id_lookup where chembl_id = %s", (mol_id,))
molregno = cur.fetchone()[0]
#cur.execute("select m from rdk.mols where molregno = %s", (molregno,))
#smiles = cur.fetchone()[0]
cur.execute("select %s from rdk.fps where molregno = %s", (AsIs(fp), molregno,))
qfp = cur.fetchone()[0]
log.debug(mol_id)
start = time.time()
cur.execute("select molregno from rdk.fps where %s%%%s", (AsIs(fp), qfp,))
#cur.execute("select molregno from rdk.fps where %s%%morganbv_fp(%s)", (fp, smiles,)) # using smiles
results = cur.fetchall()
end = time.time()
times.append(end - start)
# Save results
result = {
'median_time': np.median(times),
'mean_time': np.mean(times),
'fp': fp,
'threshold': threshold
}
log.info(result)
cur.close()
conn.close()
@cli.command()
@click.option('--sample', '-s', type=click.File('r'), help='File containing sample ids.')
@click.option('--fp', default='m2', type=click.Choice(['m2', 'm3', 'm2l2048', 'm2l512', 'm3l2048', 'm3l512']), help='Fingerprint type (default: m2).')
@click.option('--threshold', default=0.8, help='Similarity search threshold (default 0.8).')
@click.pass_obj
def samplesim(conn, sample, threshold, fp):
"""Perform a similarity search on every molecule in sample and print results."""
click.echo('Fingerprint: %s, Threshold: %s' % (fp, threshold))
cur = conn.cursor()
mol_ids = sample.read().strip().split('\n')
cur.execute("set rdkit.tanimoto_threshold=%s;", (threshold,))
for i, mol_id in enumerate(mol_ids[:100]):
click.echo('Query: %s (%s of %s)' % (mol_id, i+1, len(mol_ids)))
cur.execute("select entity_id from chembl_id_lookup where chembl_id = %s", (mol_id,))
molregno = cur.fetchone()[0]
cur.execute("select %s from rdk.fps where molregno = %s", (AsIs(fp), molregno,))
qfp = cur.fetchone()[0]
cur.execute("select molregno from rdk.fps where %s%%%s", (AsIs(fp), qfp,))
results = [r[0] for r in cur.fetchall()]
chembl_ids = []
for mrn in results:
cur.execute("select chembl_id from chembl_id_lookup where entity_id = %s and entity_type = 'COMPOUND'", (mrn,))
chembl_ids.append(cur.fetchone()[0])
click.echo(chembl_ids)
cur.close()
conn.close()
| mcs07/mongodb-chemistry | mchem/postgres.py | Python | mit | 6,525 |
import _plotly_utils.basevalidators
class UirevisionValidator(_plotly_utils.basevalidators.AnyValidator):
def __init__(self, plotly_name="uirevision", parent_name="bar", **kwargs):
super(UirevisionValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
**kwargs
)
| plotly/plotly.py | packages/python/plotly/plotly/validators/bar/_uirevision.py | Python | mit | 398 |
import pyaudio
import wave
#CHUNK = 1024
CHUNK = 1
FORMAT = pyaudio.paInt16
#CHANNELS = 2
CHANNELS = 1
#RATE = 44100
RATE = 10025
RECORD_SECONDS = 5
WAVE_OUTPUT_FILENAME = "output.wav"
p = pyaudio.PyAudio()
stream = p.open(format=FORMAT,
channels=CHANNELS,
rate=RATE,
input=True,
frames_per_buffer=CHUNK)
print("* recording, CHUNK=%d" % CHUNK)
for i in range(0, int(RATE / CHUNK * RECORD_SECONDS)):
data = stream.read(CHUNK)
print('data=%s, len=%d' % (str(data), len(data)))
# print(str(data))
# print('%d' % ord(data))
print("* done recording")
stream.stop_stream()
stream.close()
p.terminate()
| rzzzwilson/morse | morse/test.py | Python | mit | 682 |
def is_tl(data):
return isinstance(data, tuple) or isinstance(data, list)
def get_depth(data):
'''
:type data: list or tuple
get the depth of nested list
'x' is 0
['x', 'y'] is 1
['x', ['y', 'z'] is 2
'''
if is_tl(data):
depths = []
for i in data:
depths.append(1+get_depth(i))
return max(depths)
else:
return 0
def reduce_d2(a, b):
'''
generate all combination from a, b
'''
if not is_tl(a):
a = [a]
if not is_tl(b):
b = [b]
result = []
for i in a:
for j in b:
result.append('%s%s' % (i, j))
return result
def _generate_d2(data):
return reduce(reduce_d2, data)
def _generate(data):
'''
recursively generate the list
'''
depth = get_depth(data)
if depth > 2:
temp = []
for i in data:
temp.append(_generate(i))
return _generate(temp)
elif depth == 2:
return _generate_d2(data)
elif depth == 1:
return data
else:
return [str(data)]
def generate(data):
'''
:rtype: list of str
:type data: list or tuple
generate the final result
'''
result = _generate(data)
# fix if initial data's depth == 1
if result == data:
result = _generate_d2(data)
return result
if __name__ == '__main__':
nested = [range(2), [range(3), range(4)]]
print(generate(nested))
print(generate([1, [2, 3]]))
print(generate([1, 2]))
print(generate(1))
| Revolution1/ID_generator | generator.py | Python | mit | 1,559 |
import ctypes
import random
import math
import numpy
import stats
import constants
#energy = mesh.bandstructure.random_
ext = ctypes.cdll.LoadLibrary("c_optimized/driftscatter.so")
def randomElectronMovement(particle,electric_field,density_func,mesh,reaper):
# global avg_lifetime
# lifetime = .001#lifetime(cell)
p = particle
dt = mesh.dt
p.momentum += drift(mesh,electric_field,p)*dt
dx = (p.momentum*dt/mesh.length_scale)/p.mass
#stats.avg_dx += numpy.linalg.norm(dx)
#stats.avg_momentum += numpy.linalg.norm(p.momentum)
#p.dx += dx
p.pos += dx
# scatter(mesh,particle)
#check for out of bounds
# this needs to be changed, we should
# do all momentum changes BEFORE movement
# this guarantees that we are on the mesh.
#if(dot(e.dx,e.dx) > meanpathlength**2):
# e.dx = array([0.,0.])
# e.momentum += array([0,0])#scatter(e.momentum,e.pos,mesh)
#if(p.lifetime < 0):
# p.dead = True
# reaper.append(p.part_id)
#print e.momentum
#p.lifetime -= dt
def scatter(mesh,particle):
scatter_type = random.random()
if scatter_type < .01:
print "scatter"
mag = numpy.sqrt(numpy.dot(particle.momentum,particle.momentum))
theta = random.random()*2*numpy.pi
particle.momentum = mag*numpy.cos(theta),mag*numpy.sin(theta)
def drift(mesh,func,particle):
p = particle
force = func[p.id]*(p.charge/10)*mesh.charge_particle/mesh.length_scale #self force?
#stats.avg_force += numpy.linalg.norm(force)
return force
| cwgreene/Nanostructure-Simulator | driftscatter.py | Python | mit | 1,432 |
"""
Harshad Number implementation
See: http://en.wikipedia.org/wiki/Harshad_number
"""
def is_harshad(n):
result=0
while n:
result+=n%10
n//=10
return n%result == 0 # Return if the remainder of n/result is 0 else return False
def main():
# test contains a set of harshad numbers
test=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
12, 18, 20, 21, 24, 27, 30, 36,
40, 42, 45, 48, 50, 54, 60, 63,
70, 72, 80, 81, 84, 90, 100, 102,
108, 110, 111, 112, 114, 117, 120,
126, 132, 133, 135, 140, 144, 150,
152,153, 156, 162, 171, 180, 190,
192, 195, 198, 200, 201]
flag=True
for i in test:
if not is_harshad(i):
flag=False
break
print("The test was", "Successful"if flag else "Unsuccessful!");
if __name__ == '__main__':
main()
| kennyledet/Algorithm-Implementations | 10_Harshad_Number/Python/wasi0013/HarshadNumber.py | Python | mit | 856 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
extract FileInfo object for local files
"""
from __future__ import (absolute_import, division, print_function, unicode_literals)
import multiprocessing as mp
from concurrent.futures import ProcessPoolExecutor
import os
from sync_app.util import get_md5, get_sha1
from sync_app.file_info import FileInfo, StatTuple
_pool = ProcessPoolExecutor(max_workers=mp.cpu_count())
class FileInfoLocal(FileInfo):
""" File Info Local """
def __init__(self, fn='', md5='', sha1='', fs=None, in_tuple=None):
""" Init function """
absfn = ''
_url = ''
if fn:
absfn = os.path.abspath(fn)
if not os.path.isfile(absfn):
print('ERROR')
raise TypeError
_url = 'file://%s' % absfn
FileInfo.__init__(self, fn=absfn, url=_url, md5=md5, sha1=sha1, fs=fs, in_tuple=in_tuple)
def get_md5(self):
""" Wrapper around sync_utils.get_md5 """
if os.path.exists(self.filename):
return _pool.submit(get_md5, self.filename)
else:
return self.md5sum
def get_sha1(self):
""" Wrapper around sync_utils.get_sha1 """
if os.path.exists(self.filename):
return _pool.submit(get_sha1, self.filename)
else:
return self.sha1sum
def get_stat(self):
""" Wrapper around os.stat """
if os.path.exists(self.filename):
self.filestat = StatTuple(os.stat(self.filename))
return self.filestat
def test_file_info_local():
""" Test FileInfoLocal """
from nose.tools import raises
@raises(TypeError)
def test_tmp():
""" ... """
FileInfoLocal(fn='apsodfij')
test_tmp()
from sync_app.file_info import StatTuple
test_dict = {'st_mtime': 1234567, 'st_size': 7654321}
fs_ = StatTuple(**test_dict)
fn_ = 'tests/test_dir/hello_world.txt'
afn_ = os.path.abspath(fn_)
tmp = '%s' % FileInfoLocal(
fn=fn_,
md5='8ddd8be4b179a529afa5f2ffae4b9858',
sha1='a0b65939670bc2c010f4d5d6a0b3e4e4590fb92b',
fs=fs_)
test = '<FileInfo(fn=%s, ' % afn_ + 'url=file://%s, ' % afn_ + \
'md5=8ddd8be4b179a529afa5f2ffae4b9858, ' \
'sha1=a0b65939670bc2c010f4d5d6a0b3e4e4590fb92b, size=7654321)>'
print(tmp)
print(test)
assert tmp == test
| ddboline/sync_app | sync_app/file_info_local.py | Python | mit | 2,404 |
# -*- coding: utf-8 -*-
"""
rdd.exceptions
~~~~~~~~~~~~~~
This module contains the exceptions raised by rdd.
"""
from requests.exceptions import *
class ReadabilityException(RuntimeError):
"""Base class for Readability exceptions."""
class ShortenerError(ReadabilityException):
"""Failed to shorten URL."""
class MetadataError(ReadabilityException):
"""Failed to retrieve metadata."""
| mlafeldt/rdd.py | rdd/exceptions.py | Python | mit | 407 |
from unittest import TestCase
from mangopi.site.mangapanda import MangaPanda
class TestMangaPanda(TestCase):
SERIES = MangaPanda.series('gantz')
CHAPTERS = SERIES.chapters
def test_chapter_count(self):
self.assertEqual(len(TestMangaPanda.SERIES.chapters), 383)
def test_chapter_title(self):
self.assertEqual(TestMangaPanda.CHAPTERS[-2].title, 'Lightning Counterstrike')
def test_chapter_pages(self):
self.assertEqual(len(TestMangaPanda.CHAPTERS[0].pages), 43)
def test_for_image_url(self):
url = TestMangaPanda.CHAPTERS[0].pages[0].image.url
self.assertTrue(len(url) > 0)
self.assertEqual(url[:7], 'http://')
| jiaweihli/mangopi | mangopi/tests/site/test_mangaPanda.py | Python | mit | 688 |
# coding=utf-8
# main codes, call functions at stokes_flow.py
# Zhang Ji, 20160410
import sys
import petsc4py
petsc4py.init(sys.argv)
# import warnings
# from memory_profiler import profile
import numpy as np
from src import stokes_flow as sf
# import stokes_flow as sf
from src.stokes_flow import problem_dic, obj_dic
from petsc4py import PETSc
from src.geo import *
from time import time
import pickle
from scipy.io import savemat, loadmat
from src.ref_solution import *
# @profile
def view_matrix(m, **kwargs):
args = {
'vmin': None,
'vmax': None,
'title': ' ',
'cmap': None
}
for key, value in args.items():
if key in kwargs:
args[key] = kwargs[key]
import matplotlib.pyplot as plt
fig, ax = plt.subplots()
cax = ax.matshow(m,
origin='lower',
vmin=args['vmin'],
vmax=args['vmax'],
cmap=plt.get_cmap(args['cmap']))
fig.colorbar(cax)
plt.title(args['title'])
plt.show()
def save_vtk(problem: sf.StokesFlowProblem):
t0 = time()
ref_slt = sphere_slt(problem)
comm = PETSc.COMM_WORLD.tompi4py()
rank = comm.Get_rank()
problem_kwargs = problem.get_kwargs()
fileHandle = problem_kwargs['fileHandle']
radius = problem_kwargs['radius']
u = problem_kwargs['u']
sphere_err = 0
# problem.vtk_obj(fileHandle)
# problem.vtk_velocity('%s_Velocity' % fileHandle)
# problem.vtk_self(fileHandle)
theta = np.pi / 2
sphere_check = sf.StokesFlowObj()
sphere_geo_check = sphere_geo() # force geo
if not 'r_factor' in problem_kwargs:
r_factor = np.ones(1)
else:
r_factor = problem_kwargs['r_factor']
sphere_err = r_factor.copy()
for i0, d0 in enumerate(r_factor):
sphere_geo_check.create_n(2000, radius * d0)
sphere_geo_check.set_rigid_velocity([u, 0, 0, 0, 0, 0])
sphere_geo_check.node_rotation(norm=np.array([0, 1, 0]), theta=theta)
sphere_check.set_data(sphere_geo_check, sphere_geo_check)
sphere_err[i0] = problem.vtk_check('%s_Check_%f' % (fileHandle, (radius * d0)), sphere_check, ref_slt)[0]
t1 = time()
PETSc.Sys.Print('%s: write vtk files use: %fs' % (str(problem), (t1 - t0)))
return sphere_err
def get_problem_kwargs(**main_kwargs):
OptDB = PETSc.Options()
radius = OptDB.getReal('r', 1)
deltaLength = OptDB.getReal('d', 0.3)
epsilon = OptDB.getReal('e', 0.3)
u = OptDB.getReal('u', 1)
fileHandle = OptDB.getString('f', 'sphere')
solve_method = OptDB.getString('s', 'gmres')
precondition_method = OptDB.getString('g', 'none')
plot_geo = OptDB.getBool('plot_geo', False)
debug_mode = OptDB.getBool('debug', False)
matrix_method = OptDB.getString('sm', 'rs')
restart = OptDB.getBool('restart', False)
twoPara_n = OptDB.getInt('tp_n', 1)
legendre_m = OptDB.getInt('legendre_m', 3)
legendre_k = OptDB.getInt('legendre_k', 2)
n_sphere_check = OptDB.getInt('n_sphere_check', 2000)
n_node_threshold = OptDB.getInt('n_threshold', 10000)
random_velocity = OptDB.getBool('random_velocity', False)
getConvergenceHistory = OptDB.getBool('getConvergenceHistory', False)
pickProblem = OptDB.getBool('pickProblem', False)
prb_index = OptDB.getInt('prb_index', -1)
n_obj = OptDB.getInt('n', 1)
n_obj_x = OptDB.getInt('nx', n_obj)
n_obj_y = OptDB.getInt('ny', n_obj)
distance = OptDB.getReal('dist', 3)
distance_x = OptDB.getReal('distx', distance)
distance_y = OptDB.getReal('disty', distance)
move_delta = np.array([distance_x, distance_y, 1])
# field_range: describe a sector area.
field_range = np.array([[-3, -3, -3], [n_obj_x - 1, n_obj_y - 1, 0] * move_delta + [3, 3, 3]])
n_grid = np.array([n_obj_x, n_obj_y, 1]) * 20
problem_kwargs = {
'name': 'spherePrb',
'matrix_method': matrix_method,
'deltaLength': deltaLength,
'epsilon': epsilon,
'delta': deltaLength * epsilon, # for rs method
'd_radia': deltaLength / 2, # for sf method
'solve_method': solve_method,
'precondition_method': precondition_method,
'field_range': field_range,
'n_grid': n_grid,
'plot_geo': plot_geo,
'debug_mode': debug_mode,
'fileHandle': fileHandle,
'region_type': 'rectangle',
'twoPara_n': twoPara_n,
'legendre_m': legendre_m,
'legendre_k': legendre_k,
'radius': radius,
'u': u,
'random_velocity': random_velocity,
'n_obj_x': n_obj_x,
'n_obj_y': n_obj_y,
'move_delta': move_delta,
'restart': restart,
'n_sphere_check': n_sphere_check,
'n_node_threshold': n_node_threshold,
'getConvergenceHistory': getConvergenceHistory,
'pickProblem': pickProblem,
'prb_index': prb_index,
}
for key in main_kwargs:
problem_kwargs[key] = main_kwargs[key]
return problem_kwargs
def print_case_info(**problem_kwargs):
comm = PETSc.COMM_WORLD.tompi4py()
rank = comm.Get_rank()
size = comm.Get_size()
fileHandle = problem_kwargs['fileHandle']
radius = problem_kwargs['radius']
deltaLength = problem_kwargs['deltaLength']
matrix_method = problem_kwargs['matrix_method']
u = problem_kwargs['u']
PETSc.Sys.Print('sphere radius: %f, delta length: %f, velocity: %f' % (radius, deltaLength, u))
err_msg = "Only 'pf', 'rs', 'tp_rs', and 'lg_rs' methods are accept for this main code. "
assert matrix_method in (
'rs', 'rs_plane', 'tp_rs', 'lg_rs', 'rs_precondition', 'tp_rs_precondition', 'lg_rs_precondition',
'pf'), err_msg
epsilon = problem_kwargs['epsilon']
if matrix_method in ('rs', 'rs_plane', 'rs_precondition', 'pf'):
PETSc.Sys.Print('create matrix method: %s, epsilon: %f'
% (matrix_method, epsilon))
elif matrix_method in ('tp_rs', 'tp_rs_precondition'):
twoPara_n = problem_kwargs['twoPara_n']
PETSc.Sys.Print('create matrix method: %s, epsilon: %f, order: %d'
% (matrix_method, epsilon, twoPara_n))
elif matrix_method in ('lg_rs', 'lg_rs_precondition'):
legendre_m = problem_kwargs['legendre_m']
legendre_k = problem_kwargs['legendre_k']
PETSc.Sys.Print('create matrix method: %s, epsilon: %f, m: %d, k: %d, p: %d'
% (matrix_method, epsilon, legendre_m, legendre_k, (legendre_m + 2 * legendre_k + 1)))
solve_method = problem_kwargs['solve_method']
precondition_method = problem_kwargs['precondition_method']
PETSc.Sys.Print('solve method: %s, precondition method: %s'
% (solve_method, precondition_method))
PETSc.Sys.Print('output file headle: ' + fileHandle)
PETSc.Sys.Print('MPI size: %d' % size)
# @profile
def main_fun(**main_kwargs):
comm = PETSc.COMM_WORLD.tompi4py()
rank = comm.Get_rank()
problem_kwargs = get_problem_kwargs(**main_kwargs)
restart = problem_kwargs['restart']
fileHandle = problem_kwargs['fileHandle']
radius = problem_kwargs['radius']
deltaLength = problem_kwargs['deltaLength']
epsilon = problem_kwargs['epsilon']
u = problem_kwargs['u']
matrix_method = problem_kwargs['matrix_method']
n_obj_x = problem_kwargs['n_obj_x']
n_obj_y = problem_kwargs['n_obj_y']
move_delta = problem_kwargs['move_delta']
random_velocity = problem_kwargs['random_velocity']
getConvergenceHistory = problem_kwargs['getConvergenceHistory']
pickProblem = problem_kwargs['pickProblem']
if not restart:
print_case_info(**problem_kwargs)
sphere_geo0 = sphere_geo() # force geo
sphere_geo0.create_delta(deltaLength, radius)
# # DBG
# nodes = ((0.17389, 0.2938, 0.37454),
# (0.76774, 0.87325, 0.50809),
# (0.17557, 0.82348, 0.7485),
# (0.50734, 0.99882, 0.39992))
# sphere_geo0.set_nodes(nodes=nodes, deltalength=deltaLength)
if random_velocity:
sphere_velocity = np.random.sample(6) * u
else:
sphere_velocity = np.array([0, u, 0, 0, 0, 0])
sphere_geo0.set_rigid_velocity(sphere_velocity)
problem = problem_dic[matrix_method](**problem_kwargs)
if pickProblem:
problem.pickmyself(fileHandle,
ifcheck=True) # not save anything really, just check if the path is correct, to avoid this error after long time calculation.
obj_sphere = obj_dic[matrix_method]()
obj_sphere_kwargs = {'name': 'sphereObj_0_0'}
sphere_geo1 = sphere_geo0.copy()
if matrix_method in ('pf',):
sphere_geo1.node_zoom((radius + deltaLength * epsilon) / radius)
obj_sphere.set_data(sphere_geo1, sphere_geo0, **obj_sphere_kwargs)
obj_sphere.move((0, 0, 0))
for i in range(n_obj_x * n_obj_y):
ix = i // n_obj_x
iy = i % n_obj_x
obj2 = obj_sphere.copy()
obj2.set_name('sphereObj_%d_%d' % (ix, iy))
move_dist = np.array([ix, iy, 0]) * move_delta
obj2.move(move_dist)
if random_velocity:
sphere_velocity = np.random.sample(6) * u
obj2.get_u_geo().set_rigid_velocity(sphere_velocity)
problem.add_obj(obj2)
problem.print_info()
problem.create_matrix()
residualNorm = problem.solve()
fp = problem.get_force_petsc()
if getConvergenceHistory:
convergenceHistory = problem.get_convergenceHistory()
if pickProblem:
problem.pickmyself(fileHandle)
else:
with open(fileHandle + '_pick.bin', 'rb') as input:
unpick = pickle.Unpickler(input)
problem = unpick.load()
problem.unpick_myself()
residualNorm = problem.get_residualNorm()
obj_sphere = problem.get_obj_list()[0]
PETSc.Sys.Print('---->>>unpick the problem from file %s.pickle' % (fileHandle))
sphere_err = 0
# sphere_err = save_vtk(problem, **main_kwargs)
force_sphere = obj2.get_total_force()
PETSc.Sys.Print('---->>>Resultant is', force_sphere / 6 / np.pi / radius / u)
return problem, sphere_err
# @profile
def two_step_main_fun(**main_kwargs):
comm = PETSc.COMM_WORLD.tompi4py()
rank = comm.Get_rank()
problem_kwargs = get_problem_kwargs(**main_kwargs)
restart = problem_kwargs['restart']
fileHandle = problem_kwargs['fileHandle']
radius = problem_kwargs['radius']
deltaLength = problem_kwargs['deltaLength']
u = problem_kwargs['u']
matrix_method = problem_kwargs['matrix_method']
if not restart:
n = int(16 * radius * radius / deltaLength / deltaLength)
sphere_geo0 = sphere_geo() # force geo
sphere_geo0.create_n(n, radius)
sphere_geo0.set_rigid_velocity([u, 0, 0, 0, 0, 0])
print_case_info(**problem_kwargs)
problem = problem_dic[matrix_method](**problem_kwargs)
problem.pickmyself(
fileHandle) # not save anything really, just check if the path is correct, to avoid this error after long time calculation.
obj_sphere = obj_dic[matrix_method]()
obj_sphere_kwargs = {'name': 'sphereObj'}
obj_sphere.set_data(sphere_geo0, sphere_geo0, **obj_sphere_kwargs)
problem.add_obj(obj_sphere)
problem.print_info()
problem.create_matrix()
residualNorm = problem.solve()
# problem.pickmyself(fileHandle)
else:
with open(fileHandle + '_pick.bin', 'rb') as input:
unpick = pickle.Unpickler(input)
problem = unpick.load()
problem.unpick_myself()
residualNorm = problem.get_residualNorm()
obj_sphere = problem.get_obj_list()[-1]
PETSc.Sys.Print('---->>>unpick the problem from file %s.pickle' % (fileHandle))
sphere_err = 0
# sphere_err = save_vtk(problem, **main_kwargs)
factor = 10
obj_sphere1 = obj_sphere.copy()
obj_sphere1.zoom(factor)
ref_slt = sphere_slt(problem)
problem.vtk_check('%s_Check_%f' % (fileHandle, (radius * d0)), obj_sphere1)
sphere_geo_check = sphere_geo()
sphere_geo_check.create_n(2000, radius)
sphere_geo_check.set_rigid_velocity([u, 0, 0, 0, 0, 0])
theta = np.pi / 2
sphere_geo_check.node_rotation(norm=np.array([0, 1, 0]), theta=theta)
sphere_check = sf.StokesFlowObj()
sphere_check.set_data(sphere_geo_check, sphere_geo_check)
sphere_err0 = problem.vtk_check('%s_Check_%f' % (fileHandle, (radius)), sphere_check)[0]
t0 = time()
problem_kwargs['delta'] = deltaLength * epsilon * d0
problem_kwargs['name'] = 'spherePrb1'
problem1 = problem_dic[matrix_method](**problem_kwargs)
problem1.add_obj(obj_sphere1)
problem1.create_matrix()
t1 = time()
PETSc.Sys.Print('%s: create problem use: %fs' % (str(problem), (t1 - t0)))
residualNorm1 = problem1.solve()
sphere_err1 = problem1.vtk_check('%s_Check_%f' % (fileHandle, (radius * d0)), sphere_check)
force_sphere = obj_sphere.get_force_x()
PETSc.Sys.Print('sphere_err0=%f, sphere_err1=%f' % (sphere_err0, sphere_err1))
PETSc.Sys.Print('---->>>Resultant at x axis is %f' % (np.sum(force_sphere)))
return problem, sphere_err, residualNorm
def tp_rs_wrapper():
# r_factor = np.array((1, 1))
# deltaLength = (0.5, 0.4)
# epsilon = (0.1, 0.2)
# N = np.array((1, 2))
r_factor = 3 ** (np.arange(0, 1.2, 0.2) ** 2)
deltaLength = 0.05 ** np.arange(0.25, 1.05, 0.1)
epsilon = 0.1 ** np.arange(-1, 1.2, 0.2)
N = np.array((1, 2, 10, 20))
deltaLength, epsilon, N = np.meshgrid(deltaLength, epsilon, N)
deltaLength = deltaLength.flatten()
epsilon = epsilon.flatten()
N = N.flatten()
sphere_err = np.zeros((epsilon.size, r_factor.size))
residualNorm = epsilon.copy()
main_kwargs = {'r_factor': r_factor}
OptDB = PETSc.Options()
OptDB.setValue('sm', 'tp_rs')
for i0 in range(epsilon.size):
d = deltaLength[i0]
e = epsilon[i0]
n = N[i0]
fileHandle = 'sphere_%05d_%6.4f_%4.2f_%d' % (i0, d, e, n)
OptDB.setValue('d', d)
OptDB.setValue('e', e)
OptDB.setValue('tp_n', int(n))
OptDB.setValue('f', fileHandle)
_, sphere_err[i0, :], residualNorm[i0] = main_fun(**main_kwargs)
comm = PETSc.COMM_WORLD.tompi4py()
rank = comm.Get_rank()
if rank == 0:
savemat('sphere_err.mat',
{'deltaLength': deltaLength,
'epsilon': epsilon,
'N': N,
'sphere_err': sphere_err,
'residualNorm': residualNorm,
'r_factor': r_factor},
oned_as='column')
def lg_rs_wrapper():
"""
to determine best combination of m and n for this method.
:return:
"""
# r_factor = np.array((1, 1))
# deltaLength = (0.5, 0.4)
# epsilon = (0.1, 0.2)
# mk_bank = np.array(((2, 0), (2, 1), (2, 2), (2, 3), (2, 4),
# (3, 0), (3, 1), (3, 2), (3, 3),
# (4, 0), (4, 1), (4, 2), (4, 3),
# (5, 0), (5, 1), (5, 2)))
OptDB = PETSc.Options()
r_factor = 3 ** (np.arange(0, 1.2, 0.2) ** 2)
deltaLength = 0.05 ** np.arange(0.25, 1.05, 0.1)
epsilon = 0.1 ** np.arange(-1, 1.2, 0.2)
mk_case = OptDB.getInt('mk_case', 0)
mk_banks = {
0: np.array((2, 1)),
1: np.array(((2, 0), (2, 1), (2, 2), (2, 3), (2, 4))),
2: np.array(((2, 1), (3, 1), (4, 1), (5, 1))),
3: np.array(((2, 2), (3, 2), (4, 2), (5, 2))),
10: np.array(((2, 0), (2, 1), (2, 2), (2, 3), (2, 4),
(3, 0), (3, 1), (3, 2), (3, 3),
(4, 0), (4, 1), (4, 2), (4, 3),
(5, 0), (5, 1), (5, 2)))
}
mk_bank = mk_banks[mk_case].reshape((-1, 2))
deltaLength, epsilon, mk_index = np.meshgrid(deltaLength, epsilon, range(mk_bank.shape[0]))
deltaLength = deltaLength.flatten()
epsilon = epsilon.flatten()
mk_index = mk_index.flatten()
sphere_err = np.zeros((epsilon.size, r_factor.size))
residualNorm = epsilon.copy()
main_kwargs = {'r_factor': r_factor}
OptDB.setValue('sm', 'lg_rs')
for i0 in range(epsilon.size):
d = deltaLength[i0]
e = epsilon[i0]
m = mk_bank[mk_index[i0], 0]
k = mk_bank[mk_index[i0], 1]
fileHandle = 'sphere_%05d_%6.4f_%4.2f_m=%d,k=%d' % (i0, d, e, m, k)
OptDB.setValue('d', d)
OptDB.setValue('e', e)
OptDB.setValue('legendre_m', int(m))
OptDB.setValue('legendre_k', int(k))
OptDB.setValue('f', fileHandle)
_, sphere_err[i0, :], residualNorm[i0] = main_fun(**main_kwargs)
comm = PETSc.COMM_WORLD.tompi4py()
rank = comm.Get_rank()
if rank == 0:
savemat('sphere_err.mat',
{'deltaLength': deltaLength,
'epsilon': epsilon,
'mk_bank': mk_bank,
'mk_index': mk_index,
'sphere_err': sphere_err,
'residualNorm': residualNorm,
'r_factor': r_factor},
oned_as='column')
def percondition_wrapper():
"""
multi spheres with random velocities. to determine if the precondition method is work.
:return:
"""
# r_factor = np.array((1, 1))
# deltaLength = (0.5, 0.4)
# epsilon = (0.1, 0.2)
# mk_bank = np.array(((2, 0), (2, 1), (2, 2), (2, 3), (2, 4),
# (3, 0), (3, 1), (3, 2), (3, 3),
# (4, 0), (4, 1), (4, 2), (4, 3),
# (5, 0), (5, 1), (5, 2)))
OptDB = PETSc.Options()
OptDB.setValue('r', 1)
OptDB.setValue('d', 0.2)
OptDB.setValue('e', 0.25)
OptDB.setValue('f', 'sphere')
OptDB.setValue('sm', 'lg_rs')
OptDB.setValue('random_velocity', True)
OptDB.setValue('getConvergenceHistory', True)
OptDB.setValue('ksp_rtol', 1e-8)
n_max = OptDB.getInt('n_max', 2)
sphere_err = np.zeros((n_max,), dtype=np.object)
convergenceHistory = np.zeros((n_max,), dtype=np.object)
for n in range(0, n_max):
OptDB.setValue('n', n + 1)
problem, sphere_err[n] = main_fun()
convergenceHistory[n] = problem.get_convergenceHistory()
comm = PETSc.COMM_WORLD.tompi4py()
rank = comm.Get_rank()
if rank == 0:
savemat('sphere_err.mat',
{'n': np.arange(n_max),
'convergenceHistory': convergenceHistory,
'sphere_err': sphere_err},
oned_as='column')
def two_step_wrapper():
OptDB = PETSc.Options()
# r_factor = 3 ** (np.arange(0, 1.2, 0.2) ** 2)
r_factor = np.ones(1)
deltaLength = 0.10573713
epsilon = 3
mk_bank = np.array((3, 2))
sphere_err = np.zeros((r_factor.size))
main_kwargs = {'r_factor': r_factor}
OptDB.setValue('sm', 'lg_rs')
fileHandle = 'sphere_%6.4f_%4.2f_m=%d,k=%d' % \
(deltaLength, epsilon, mk_bank[0], mk_bank[1])
OptDB.setValue('d', deltaLength)
OptDB.setValue('e', epsilon)
OptDB.setValue('legendre_m', int(mk_bank[0]))
OptDB.setValue('legendre_k', int(mk_bank[1]))
OptDB.setValue('f', fileHandle)
problem, sphere_err[:], residualNorm = two_step_main_fun(**main_kwargs)
if __name__ == '__main__':
# lg_rs_wrapper()
# tp_rs_wrapper()
# percondition_wrapper()
main_fun()
pass
# OptDB.setValue('sm', 'sf')
# m_sf = main_fun()
# delta_m = np.abs(m_rs - m_sf)
# # view_matrix(np.log10(delta_m), 'rs_m - sf_m')
# percentage = delta_m / (np.maximum(np.abs(m_rs), np.abs(m_sf)) + 1e-100)
#
# view_args = {'vmin': -10,
# 'vmax': 0,
# 'title': 'log10_abs_rs',
# 'cmap': 'gray'}
# view_matrix(np.log10(np.abs(m_rs) + 1e-100), **view_args)
#
# view_args = {'vmin': -10,
# 'vmax': 0,
# 'title': 'log10_abs_sf',
# 'cmap': 'gray'}
# view_matrix(np.log10(np.abs(m_sf) + 1e-100), **view_args)
#
# view_args = {'vmin': 0,
# 'vmax': 1,
# 'title': 'percentage',
# 'cmap': 'gray'}
# view_matrix(percentage, **view_args)
#
# view_args = {'vmin': 0,
# 'vmax': -10,
# 'title': 'log10_percentage',
# 'cmap': 'gray'}
# view_matrix(np.log10(percentage + 1e-100), **view_args)
| pcmagic/stokes_flow | sphere/sphere_rs.py | Python | mit | 20,947 |
# regression tree
# input is a dataframe of features
# the corresponding y value(called labels here) is the scores for each document
import pandas as pd
import numpy as np
from multiprocessing import Pool
from itertools import repeat
import scipy
import scipy.optimize
node_id = 0
def get_splitting_points(args):
# given a list
# return a list of possible splitting values
attribute, col = args
attribute.sort()
possible_split = []
for i in range(len(attribute)-1):
if attribute[i] != attribute[i+1]:
possible_split.append(np.mean((attribute[i],attribute[i+1])))
return possible_split, col
# create a dictionary, key is the attribute number, value is whole list of possible splits for that column
def find_best_split_parallel(args):
best_ls = 1000000
best_split = None
best_children = None
split_point, data, label = args
key,possible_split = split_point
for split in possible_split:
children = split_children(data, label, key, split)
#weighted average of left and right ls
ls = len(children[1])*least_square(children[1])/len(label) + len(children[3])*least_square(children[3])/len(label)
if ls < best_ls:
best_ls = ls
best_split = (key, split)
best_children = children
return best_ls, best_split, best_children
def find_best_split(data, label, split_points):
# split_points is a dictionary of possible splitting values
# return the best split
best_ls = 1000000
best_split = None
best_children = None
pool = Pool()
for ls, split, children in pool.map(find_best_split_parallel, zip(split_points.items(), repeat(data), repeat(label))):
if ls < best_ls:
best_ls = ls
best_split = split
best_children = children
pool.close()
return best_split, best_children # return a tuple(attribute, value)
def split_children(data, label, key, split):
left_index = [index for index in xrange(len(data.iloc[:,key])) if data.iloc[index,key] < split]
right_index = [index for index in xrange(len(data.iloc[:,key])) if data.iloc[index,key] >= split]
left_data = data.iloc[left_index,:]
right_data = data.iloc[right_index,:]
left_label = [label[i] for i in left_index]
right_label =[label[i] for i in right_index]
return left_data, left_label, right_data, right_label
def least_square(label):
if not len(label):
return 0
return (np.sum(label)**2)/len(set(label))
def create_leaf(label):
global node_id
node_id += 1
leaf = {'splittng_feature': None,
'left': None,
'right':None,
'is_leaf':True,
'index':node_id}
leaf['value'] = round(np.mean(label),3)
return leaf
def find_splits_parallel(args):
var_space, label, col = args
# var_space = data.iloc[:,col].tolist()
return scipy.optimize.fminbound(error_function, min(var_space), max(var_space), args = (col, var_space, label), full_output = 1)
# return,
# if not min_error or error < min_error:
# min_error = error
# split_var = col
# min_split = split
def create_tree(data, all_pos_split, label, max_depth, ideal_ls, current_depth = 0):
remaining_features = all_pos_split
#stopping conditions
if sum([len(v)!= 0 for v in remaining_features.values()]) == 0:
# If there are no remaining features to consider, make current node a leaf node
return create_leaf(label)
# #Additional stopping condition (limit tree depth)
elif current_depth > max_depth:
return create_leaf(label)
#######
min_error = None
split_var = None
min_split = None
var_spaces = [data.iloc[:,col].tolist() for col in xrange(data.shape[1])]
cols = [col for col in xrange(data.shape[1])]
pool = Pool()
for split, error, ierr, numf in pool.map(find_splits_parallel, zip(var_spaces, repeat(label), cols)):
if not min_error or error < min_error:
min_error = error
split_var = col
min_split = split
pool.close()
splitting_feature = (split_var, min_split)
children = split_children(data, label, split_var, min_split)
left_data, left_label, right_data, right_label = children
if len(left_label) == 0 or len(right_label) == 0:
return create_leaf(label)
left_least_square = least_square(left_label)
# Create a leaf node if the split is "perfect"
if left_least_square < ideal_ls:
return create_leaf(left_label)
if least_square(right_label) < ideal_ls:
return create_leaf(right_label)
# recurse on children
left_tree = create_tree(left_data, remaining_features, left_label, max_depth, ideal_ls, current_depth +1)
right_tree = create_tree(right_data, remaining_features, right_label, max_depth, ideal_ls, current_depth +1)
return {'is_leaf' : False,
'value' : None,
'splitting_feature': splitting_feature,
'left' : left_tree,
'right' : right_tree,
'index' : None}
def error_function(split_point, split_var, data, label):
data1 = []
data2 = []
for i in xrange(len(data)):
temp_dat = data[i]
if temp_dat <= split_point:
data1.append(label[i])
else:
data2.append(label[i])
return least_square(data1) + least_square(data2)
def make_prediction(tree, x, annotate = False):
if tree['is_leaf']:
if annotate:
print "At leaf, predicting %s" % tree['value']
return tree['value']
else:
# the splitting value of x.
split_feature_value = x[tree['splitting_feature'][0]]
if annotate:
print "Split on %s = %s" % (tree['splitting_feature'], split_feature_value)
if split_feature_value < tree['splitting_feature'][1]:
return make_prediction(tree['left'], x, annotate)
else:
return make_prediction(tree['right'], x, annotate)
class RegressionTree:
def __init__(self, training_data, labels, max_depth=5, ideal_ls=100):
self.training_data = training_data
self.labels = labels
self.max_depth = max_depth
self.ideal_ls = ideal_ls
self.tree = None
def fit(self):
global node_id
node_id = 0
all_pos_split = {}
pool = Pool()
splitting_data = [self.training_data.iloc[:,col].tolist() for col in xrange(self.training_data.shape[1])]
cols = [col for col in xrange(self.training_data.shape[1])]
for dat, col in pool.map(get_splitting_points, zip(splitting_data, cols)):
all_pos_split[col] = dat
pool.close()
self.tree = create_tree(self.training_data, all_pos_split, self.labels, self.max_depth, self.ideal_ls)
def predict(self, test):
prediction = np.array([make_prediction(self.tree, x) for x in test])
return prediction
if __name__ == '__main__':
#read in data, label
data = pd.read_excel("mlr06.xls")
test = [[478, 184, 40, 74, 11, 31], [1000,10000,10000,10000,10000,1000,100000]]
label = data['X7']
del data['X7']
model = RegressionTree(data, label)
model.fit()
print model.predict(test)
| lezzago/LambdaMart | RegressionTree.py | Python | mit | 6,591 |
__version__ = "9.1.4"
| oslab-fr/lesspass | cli/lesspass/version.py | Python | mit | 22 |
import _plotly_utils.basevalidators
class TickvalsValidator(_plotly_utils.basevalidators.DataArrayValidator):
def __init__(self, plotly_name="tickvals", parent_name="mesh3d.colorbar", **kwargs):
super(TickvalsValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
role=kwargs.pop("role", "data"),
**kwargs
)
| plotly/python-api | packages/python/plotly/plotly/validators/mesh3d/colorbar/_tickvals.py | Python | mit | 460 |
from django.contrib import admin
from friends.apps.phonebook.models import Contact
@admin.register(Contact)
class ContactAdmin(admin.ModelAdmin):
list_display = ('id', 'first_name', 'last_name', 'phone_number')
list_filter = ('last_name', )
search_fields = ['first_name', 'last_name']
| alexdzul/friends | friends/apps/phonebook/admin.py | Python | mit | 299 |
# -*- coding: utf-8 -*-
import logging
import argparse
from .imdb import find_movies
logger = logging.getLogger('mrot')
def parse_args():
parser = argparse.ArgumentParser(prog='mrot', description='Show movie ratings over time.',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('movie_name', help='the name of the movie')
# Optional arguments
parser.add_argument("-c", "--concurrency", type=int, default=2,
help="maximum number of concurrent requests to the wayback machine")
parser.add_argument("-d", "--delta", type=int, default=365, help="minimum number of days between two ratings")
parser.add_argument("-q", "--quiet", action="store_true", help="don't print progress")
args = parser.parse_args()
return args
def main():
args = parse_args()
logging.basicConfig(level=(logging.WARN if args.quiet else logging.INFO))
# Don't allow more than 20 concurrent requests to the wayback machine
concurrency = min(args.concurrency, 10)
# Find the movies corresponding to the given movie name
imdb_movies = find_movies(args.movie_name)
if len(imdb_movies) > 0:
# Show rating for the first movie matching the given name
imdb_movie = imdb_movies[0]
imdb_movie.plot_ratings(concurrency, args.delta)
else:
logger.info('Movie not found')
| abrenaut/mrot | mrot/cli.py | Python | mit | 1,423 |
import bisect
from mahjong import bots, patterns, scoring
from mahjong.types import Tile
def can_win(context, player_idx=None, incoming_tile=None):
'''
Return if a player can win.
Unlike Hand.can_win(), this function does extra checking for special
winning patterns and winning restrictions (filters) in GameSettings.
'''
player_idx = _get_player_index(context, player_idx)
incoming_tile = _get_incoming_tile(context, player_idx, incoming_tile)
player = context.players[player_idx]
hand = player.hand
# if player declared ready, there should be waiting tiles cached in player.extra already
# so no need to call costly Hand.can_win()
if player.extra.get('declared_ready'):
waiting_tiles = player.extra.get('waiting_tiles')
if waiting_tiles and incoming_tile in waiting_tiles:
return True
# check if player is in water penalty
if context.settings.water:
waiting_tiles = player.extra.get('water')
if waiting_tiles and incoming_tile in waiting_tiles:
return False
# general winning pattern
if hand.can_win(incoming_tile):
all_matched = True
for pattern_name in context.settings.patterns_win_filter:
all_matched = patterns.match(pattern_name, context, player_idx, incoming_tile)
if not all_matched:
break
if all_matched:
return True
# special winning patterns
for pattern_name in context.settings.patterns_win:
if patterns.match(pattern_name, context, player_idx, incoming_tile):
return True
return False
def can_flower_win(context, player_idx=None, incoming_tile=None):
'''
Return if a player can win with a flower.
Actually, this function can be replaced with algo.can_win(), but it only
checks for flower winning, which is much faster.
'''
player_idx = _get_player_index(context, player_idx)
incoming_tile = _get_incoming_tile(context, player_idx, incoming_tile)
if not incoming_tile.is_general_flower():
return False
for pattern_name in ('seven-flowers', 'eight-flowers'):
if pattern_name in context.settings.patterns_win:
if patterns.match(pattern_name, context, player_idx, incoming_tile):
return True
return False
def waiting_tiles(context, player_idx=None):
'''
Return a list of tiles that makes a player a winning hand.
Unlike Hand.waiting_tiles(), this function covers special winning patterns
and winning restrictions (filters) in GameSettings. This function is quite
time-expensive. Use it with caution.
'''
if player_idx is None:
player_idx = context.cur_player_idx
player = context.players[player_idx]
if player.extra.get('declared_ready'):
waiting_tiles = player.extra.get('waiting_tiles')
if waiting_tiles is not None:
return waiting_tiles
tiles = []
for tile in Tile.ALL.itervalues():
if can_win(context, player_idx, tile):
bisect.insort(tiles, tile)
return tiles
def ready(context, player_idx=None):
'''
Is a player has a ready hand?
Unlike Hand.ready(), this function covers special winning patterns and
winning restrictions (filters) in GameSettings. This function is quite
time-expensive. Use it with caution.
'''
for tile in Tile.ALL.itervalues():
if can_win(context, player_idx, tile):
return True
return False
def select_melders(viable_decisions, player_decisions, base_offset=0):
'''
Given a table of viable decisions that all players can make, this function
selects top-priority players and their decisions.
Example:
viable_decisions = [
None, # player 0 can't do anything
['chow', 'skip'], # player 1 can chow
['win', 'pong', 'skip'], # player 2 can win and pong
['win', 'skip'] # player 3 can win
]
player_decisions = [
None,
'chow',
'win',
'win'
]
melders = _select_melder(viable_decisions, player_decisions)
assert melders == [(2, 'win'), (3, 'win')]
Notice that the input arrays don't have to be length of 4. As long as they
have the same length, this function will work.
'''
result = []
num_players = len(player_decisions)
# multiple winners could be allowed,
# so we need add all winners into result
for i in xrange(0, num_players):
player_idx = (base_offset + i) % num_players
player_viable = viable_decisions[player_idx]
if player_viable and 'win' in player_viable:
player_decision = player_decisions[player_idx]
if player_decision == 'win':
result.append((player_idx, 'win'))
elif player_decision not in player_viable:
# player i hasn't make a decision,
# this returned value indicates we need to wait for him
return [(player_idx, None)]
# if anyone wants to win, no need to check kong, pong, chow
if result:
return result
# there can be only one kong, pong, or chow
for decision in ('kong', 'pong', 'chow'):
for i in xrange(0, num_players):
player_idx = (base_offset + i) % num_players
player_viable = viable_decisions[player_idx]
if player_viable and decision in player_viable:
player_decision = player_decisions[player_idx]
if player_decision not in player_viable:
# player i hasn't make a decision,
# this returned value indicates we need to wait for him
return [(player_idx, None)]
elif player_decision == decision:
# player i made a decision
return [(player_idx, decision)]
# everybody skips or nobody can do anything
return None
def score(context, match_results):
return scoring.score(context.settings.scorer, match_results,
context.settings.patterns_score)
def can_4_kong_win(context, player_idx, tile):
'''
'Four-kongs' is a special scenario because we need to display 'win'
instead of 'kong' as an option for the player. So in the game logic, use
this function to determine if should list 'kong' in the viable decisions.
'''
if 'four-kongs' in context.settings.patterns_win:
if patterns.match('four-kongs', context, player_idx, tile):
return True
return False
def get_decision(context, player_idx=None, viable_decisions=None):
'''
Get the player's decision. This decision can come from the human player
(Player.decision) or a bot. Return None if the player hasn't made the
decision or made an invalid decision.
'''
if player_idx is None:
player_idx = context.cur_player_idx
player = context.players[player_idx]
viable_decisions = viable_decisions or player.extra.get('viable_decisions')
if viable_decisions:
if player.extra.get('bot'):
return bots.get().make_decision(context, player_idx, viable_decisions)
if player.extra.get('declared_ready'):
if context.state == 'discarding':
return player.hand.last_tile
else:
if 'win' in viable_decisions:
return 'win'
return 'skip'
if player.decision in viable_decisions:
return player.decision
return None
def _get_player_index(context, player_idx=None):
if player_idx is None:
player_idx = context.cur_player_idx
if player_idx is None:
raise ValueError('Must specify player_idx')
return player_idx
def _get_incoming_tile(context, player_idx, tile=None):
'''
Try to find the incoming tile with the following order:
1. Parameter ``tile``
2. ``Hand.last_tile``
3. ``context.last_discarded()``
'''
player = context.players[player_idx]
hand = player.hand
if not tile:
tile = hand.last_tile or context.last_discarded()
if not tile:
raise ValueError('Needs an incoming tile')
return tile
def _copy_scores(scores):
result = {}
for name, match_result in scores.iteritems():
result[name] = match_result.clone()
return result
def _filter_scores(scores, attr_name, subtract_func):
result = _copy_scores(scores)
for name, match_result in scores.iteritems():
pattern = patterns.get(name)
other_names = getattr(pattern, attr_name)
if other_names:
for other_name in other_names:
match_result2 = result.get(other_name)
if match_result2:
subtract_func(pattern, match_result, other_name, match_result2)
if not match_result2:
result.pop(other_name, None)
return result
| eliangcs/mahjong | mahjong/algo.py | Python | mit | 9,066 |
import os
import sys
import errno
import itertools
import logging
import stat
import threading
from fuse import FuseOSError, Operations
from . import exceptions, utils
from .keys import Key
from .logs import Log
from .views import View
logger = logging.getLogger('basefs.fs')
class ViewToErrno():
def __enter__(self):
return self
def __exit__(self, exc_type, exc, exc_tb):
if exc_type is exceptions.PermissionDenied:
raise FuseOSError(errno.EACCES)
if exc_type is exceptions.DoesNotExist:
raise FuseOSError(errno.ENOENT)
if exc_type is exceptions.Exists:
raise FuseOSError(errno.EEXIST)
class FileSystem(Operations):
def __init__(self, view, serf=None, serf_agent=None, init_function=None):
self.view = view
self.cache = {}
self.dirty = {}
self.loaded = view.log.loaded
self.init_function = init_function
self.serf = serf
self.serf_agent = serf_agent
def __call__(self, op, path, *args):
logger.debug('-> %s %s %s', op, path, repr(args))
ret = '[Unhandled Exception]'
try:
ret = getattr(self, op)(path, *args)
return ret
except OSError as e:
ret = str(e)
raise
finally:
logger.debug('<- %s %s', op, repr(ret))
def init(self, path):
""" threads should start here, otherwise will not run when fuse is backgrounded """
if self.init_function:
self.init_function()
def destroy(self, path):
super().destroy(path)
if self.serf_agent:
self.serf_agent.stop()
def get_node(self, path):
# check if logfile has been modified
if self.loaded != self.view.log.loaded:
logger.debug('-> %s rebuild', path)
self.view.build()
self.loaded = self.view.log.loaded
with ViewToErrno():
node = self.view.get(path)
if node.entry.action == node.entry.DELETE:
raise FuseOSError(errno.ENOENT)
return node
def send(self, node):
if self.serf:
entry = node.entry
logger.debug("Sending entry %s '%s'", entry.hash, entry.name)
self.serf.send(node.entry)
# def access(self, path, mode):
# return super(FileSystem, self).access(path, mode)
# full_path = self._full_path(path)
# if not os.access(full_path, mode):
# raise FuseOSError(errno.EACCES)
# def chmod(self, path, mode):
# full_path = self._full_path(path)
# return os.chmod(full_path, mode)
# def chown(self, path, uid, gid):
# full_path = self._full_path(path)
# return os.chown(full_path, uid, gid)
def getattr(self, path, fh=None):
try:
content = self.cache[path]
except KeyError:
node = self.get_node(path)
has_perm = bool(self.view.get_key(path))
if node.entry.action == node.entry.MKDIR:
mode = stat.S_IFDIR | (0o0750 if has_perm else 0o0550)
else:
mode = stat.S_IFREG | (0o0640 if has_perm else 0o0440)
return {
'st_atime': node.entry.timestamp,
'st_ctime': node.entry.ctime,
'st_gid': os.getgid(),
'st_mode': mode,
'st_mtime': node.entry.timestamp,
'st_nlink': 1,
'st_size': len(node.content),
'st_uid': os.getuid(),
}
else:
import time
return {
'st_atime': time.time(),
'st_ctime': time.time(),
'st_gid': os.getgid(),
'st_mode': stat.S_IFREG | 0o0640,
'st_mtime': time.time(),
'st_nlink': 1,
'st_size': len(content),
'st_uid': os.getuid(),
}
# full_path = self._full_path(path)
# st = os.lstat(full_path)
# return dict((key, getattr(st, key)) for key in ())
def readdir(self, path, fh):
node = self.get_node(path)
entry = node.entry
dirs = ['.', '..']
for d in itertools.chain(dirs, [child.entry.name for child in node.childs if child.entry.action not in (entry.DELETE, entry.GRANT, entry.REVOKE)]):
yield d
# def readlink(self, path):
# pathname = os.readlink(self._full_path(path))
# if pathname.startswith("/"):
# # Path name is absolute, sanitize it.
# return os.path.relpath(pathname, self.root)
# else:
# return pathname
def mknod(self, path, mode, dev):
raise NotImplementedError
def rmdir(self, path):
with ViewToErrno():
node = self.view.delete(path)
self.send(node)
def mkdir(self, path, mode):
with ViewToErrno():
node = self.view.mkdir(path)
self.send(node)
return 0
# def statfs(self, path):
# full_path = self._full_path(path)
# stv = os.statvfs(full_path)
# return dict((key, getattr(stv, key)) for key in ('f_bavail', 'f_bfree',
# 'f_blocks', 'f_bsize', 'f_favail', 'f_ffree', 'f_files', 'f_flag',
# 'f_frsize', 'f_namemax'))
def unlink(self, path):
with ViewToErrno():
node = self.view.delete(path)
self.send(node)
# return os.unlink(self._full_path(path))
# def symlink(self, name, target):
# return os.symlink(name, self._full_path(target))
def rename(self, old, new):
raise NotImplementedError
# def link(self, target, name):
# return os.link(self._full_path(target), self._full_path(name))
# def utimens(self, path, times=None):
# return os.utime(self._full_path(path), times)
# # File methods
# # ============
def open(self, path, flags):
node = self.get_node(path)
id = int(node.entry.hash, 16)
if path not in self.cache:
self.cache[path] = node.content
self.dirty[path] = False
return id
def create(self, path, mode, fi=None):
self.cache[path] = b''
self.dirty[path] = True
return id(path)
def read(self, path, length, offset, fh):
try:
content = self.cache[path]
except KeyError:
node = self.get_node(path)
content = node.content
return content[offset:offset+length]
def write(self, path, buf, offset, fh):
# TODO check write perissions
try:
content = self.cache[path]
except KeyError:
node = self.get_node(path)
content = node.content
size = len(buf)
new_content = content[:offset] + buf + content[offset+size:]
if content != new_content:
self.dirty[path] = True
self.cache[path] = new_content
return size
def truncate(self, path, length, fh=None):
self.cache[path] = self.cache[path][:length]
self.dirty[path] = True
# def flush(self, path, fh):
# # TODO Filesystems shouldn't assume that flush will always be called after some writes, or that if will be called at all.
# content = self.cache.pop(path, None)
# dirty = self.dirty.pop(path, False)
# if content is not None and dirty:
# print('write')
# node = self.view.write(path, content)
## self.send(node)
def release(self, path, fh):
content = self.cache.pop(path, None)
dirty = self.dirty.pop(path, False)
if content is not None and dirty:
# TODO raise permission denied should happen in write() create().... not here
with ViewToErrno():
node = self.view.write(path, content)
self.send(node)
# def fsync(self, path, fdatasync, fh):
# return self.flush(path, fh)
# return None
| glic3rinu/basefs | basefs/fs.py | Python | mit | 8,050 |
# -*- coding: utf-8 -*-
from PIL import Image
import unittest
from pynayzr.cropper import crop, ttv, ftv, sets
class TestCropBase(unittest.TestCase):
def test_blank_base(self):
base = crop.CropBase('base')
self.assertEqual(base.name, 'base')
self.assertEqual(base.image, None)
self.assertEqual(base.title_box, None)
self.assertEqual(base.subtitle_box, None)
self.assertEqual(base.source_box, None)
class TestTTVCropper(unittest.TestCase):
def setUp(self):
self.p = ttv.TTVCropper('tests/imgs/ttv/main_01.jpg')
def test_crop_title(self):
self.assertIsInstance(self.p.title(), Image.Image)
def test_crop_subtitle(self):
self.assertIsInstance(self.p.subtitle(), Image.Image)
class TestSETSCropper(unittest.TestCase):
def setUp(self):
self.p = sets.SETSCropper('tests/imgs/set/main_02.jpg')
def test_crop_title(self):
self.assertIsInstance(self.p.title(), Image.Image)
def test_crop_subtitle(self):
self.assertIsInstance(self.p.subtitle(), Image.Image)
def test_crop_source(self):
self.assertIsInstance(self.p.source(), Image.Image)
class TestFTVCropper(unittest.TestCase):
def setUp(self):
self.p = ftv.FTVCropper('tests/imgs/ftv/main_02.jpg')
def test_crop_title(self):
self.assertIsInstance(self.p.title(), Image.Image)
def test_crop_subtitle(self):
self.assertIsInstance(self.p.subtitle(), Image.Image)
if __name__ == '__main__':
unittest.main()
| pynayzr/pynayzr | test/test_cropper.py | Python | mit | 1,540 |
import re, sys
import functools
import graphviz as gv
from graphviz import Source
bad_words = [ 'jns', 'js', 'jnz', 'jz', 'jno', 'jo', 'jbe', 'jb', 'jle', 'jl', 'jae', 'ja', 'jne loc', 'je', 'jmp', 'jge', 'jg', 'SLICE_EXTRA', 'SLICE_ADDRESSING', '[BUG]', 'SLICE_VERIFICATION', 'syscall', '#PARAMS_LOG']
instrEdges = []
instrNodes = []
with open('smallCleanedSlice.txt') as oldfile:
for line in oldfile:
tempLine = line.split()
instrNodes.append(tempLine[1] + '-' + tempLine[2])
i=0
for x in instrNodes:
instrNodes[i] = x.replace("#", "")
i += 1
instrNodesString = ''.join(instrNodes)
print('Done! Instruction Nodes List Size is : ') #+ instrNodesString
#print(instrNodes)
print(len(instrNodes))
#print(instrNodes[len(instrNodes)-1])
pattern = '\s+(\S+)\s'
with open('smallCleanedSlice.txt') as oldfile:
for line in oldfile:
prepline = line.replace("#\S*", " r1 ")
prepline = prepline.replace("[SLICE_INFO]", " r2 ")
prepline = prepline.replace("[SLICE_INFO]", " r2 ")
prepline = prepline.replace("[SLICE]", " r3 ")
prepline = prepline.replace("\t", " \t ")
prepline = prepline.rstrip("\t")
prepline = re.sub(r'(\s)#\w+', r'\1', prepline)
prepline = re.sub(r'.*SLICE', '', prepline)
prepline = re.sub(r'(\s)SLICE\s+', r'\1', prepline)
splitList = re.split("r1 | r2 | \t | r3 ", prepline)
if (len(splitList) >=2):
tempEdge = splitList[1]
tempEdge = tempEdge.lstrip()
#print tempEdges
#print len(splitList)
else :
tempEdge = splitList[0]
#print ('hello: '+tempEdge)
instrEdges.append(tempEdge)
#str1 = ''.join(tempLine)
#for line in str1:
dict1 ={}
j = 0
#give unique id number for each instruction based on its line number (starting at 0)
'''for x in instrNodes:
instrNodes[j] = str(j)+ '-' +instrNodes[j]
j+=1
'''
instrNodesString = ''.join(instrEdges)
print('Done! Instruction Edges List size is : ') #+ instrNodesString
#print(instrEdges)
#print(instrNodes)
print(len(instrEdges))
new_dict = {k: v for k, v in zip(instrNodes, instrEdges)}
#print(dict1)
#example dictionary entry is dict1['0-cmp': 'eax, 0xfffff001']
print('Done! Dict (LineNumber-Instruction: Edges) is : ')
#print((new_dict).keys())
#print((new_dict))
print("first node(instr): and its edges(operands): " + 'b7ff5c05-cmp: '+str(new_dict['b7ff5c05-cmp']))
#PRINT OUT THE TWO LISTS INTO TWO SEPERATE FILES
#y = ",".join(map(str, instrNodes))
#z = ",,".join(map(str, instrEdges))
#outputFile= open('nodesOut.txt', 'w')
#outputFile.write(y)
#outputFile2 = open('edgesOut.txt', 'w')
#outputFile2.write(z)
flagEnterKeys = 1
while (flagEnterKeys == 1):
input_var = raw_input('Enter a key (b7ff5c05-cmp for the 1st instruction cmp in the slice): TYPE EXIT TO End.\n')
if (input_var in new_dict):
print("Operands for " + input_var + " are: " + str(new_dict[input_var]) + ".\n")
break
if ((input_var == "exit") or (input_var == ",exit,")):
flagEnterKeys = 0;
break
else :
print("ERROR! Please enter in a valid key for the instrNodes, instrEdges dictionary.")
##New Graphviz-dot code here
graph = functools.partial(gv.Graph, format='svg')
digraph = functools.partial(gv.Digraph, format='svg')
datG = digraph()
nodes = instrNodes
edges = instrEdges
#nodes = testNodes
#edges = testEdges
print(nodes)
print(edges)
def add_nodes(graph):
for n in nodes:
graph.node(n, label = str(n) + '(' + str(new_dict[n]) + ')')
return graph
def add_edges(graph):
for e in edges:
graph.edge(*e)
return graph
cmpFlags = []
newestOF = ''
newestSF = ''
newestZF = ''
newestAF = ''
newestCF = ''
newestPF = ''
# default values 'R' means edge from root node in the 32-bit 4word registers
#Accumulator Counter Data Base Stack Pointer Stack Base Pointer Source Destination
EAX = ['R','R','R','R']
ECX = ['R','R','R','R']
EDI = ['R','R','R','R']
EDX = ['R','R','R','R']
EBX = ['R','R','R','R']
ESP = ['R','R','R','R']
EBP = ['R','R','R','R']
ESI = ['R','R','R','R']
EDI = ['R','R','R','R']
#modify Eax register and its 16 and 8 bit versions
def modifyEAX(firstWord, secondWord, thirdWord, fourthWord):
EAX[0:4] = [firstWord, secondWord, thirdWord, fourthWord]
def modifyAX(thirdWord, fourthWord):
EAX[2:4] = [thirdWord, fourthWord]
def modifyAH(thirdWord):
EAX[2:3] = [thirdWord]
def modifyAL(fourthWord):
EAX[3:4] = [fourthWord]
#modify ecx register and its 16 and 8 bit versions
def modifyECX(firstWord, secondWord, thirdWord, fourthWord):
ECX[0:4] = [firstWord, secondWord, thirdWord, fourthWord]
def modifyCX(thirdWord, fourthWord):
ECX[2:4] = [thirdWord, fourthWord]
def modifyCH(thirdWord):
ECX[2:3] = [thirdWord]
def modifyCL(fourthWord):
ECX[3:4] = [fourthWord]
#modify edx register and its 16 and 8 bit versions
def modifyEDX(firstWord, secondWord, thirdWord, fourthWord):
EDX[0:4] = [firstWord, secondWord, thirdWord, fourthWord]
def modifyDX(thirdWord, fourthWord):
EDX[2:4] = [thirdWord, fourthWord]
def modifyDH(thirdWord):
EDX[2:3] = [thirdWord]
def modifyDL(fourthWord):
EDX[3:4] = [fourthWord]
#modify ebx register and its 16 and 8 bit versions
def modifyEBX(firstWord, secondWord, thirdWord, fourthWord):
EBX[0:4] = [firstWord, secondWord, thirdWord, fourthWord]
def modifyBX(thirdWord, fourthWord):
EBX[2:4] = [thirdWord, fourthWord]
def modifyBH(thirdWord):
EBX[2:3] = [thirdWord]
def modifyBL(fourthWord):
EBX[3:4] = [fourthWord]
#modify esp register and its 16bit versions
def modifyESP(firstWord, secondWord, thirdWord, fourthWord):
ESP[0:4] = [firstWord, secondWord, thirdWord, fourthWord]
def modifySP(thirdWord, fourthWord):
ESP[2:4] = [thirdWord, fourthWord]
#modify ebp register and its 16bit versions
def modifyEBP(firstWord, secondWord, thirdWord, fourthWord):
EBP[0:4] = [firstWord, secondWord, thirdWord, fourthWord]
def modifyBP(thirdWord, fourthWord):
EBP[2:4] = [thirdWord, fourthWord]
#modify esi register and its 16bit versions
def modifyESI(firstWord, secondWord, thirdWord, fourthWord):
ESI[0:4] = [firstWord, secondWord, thirdWord, fourthWord]
def modifySI(thirdWord, fourthWord):
ESI[2:4] = [thirdWord, fourthWord]
#modify edi register and its 16bit versions
def modifyEDI(firstWord, secondWord, thirdWord, fourthWord):
EDI[0:4] = [firstWord, secondWord, thirdWord, fourthWord]
def modifyDI(thirdWord, fourthWord):
EDI[2:4] = [thirdWord, fourthWord]
ax = EAX[2:4]
print(EAX)
print(ax)
ax = ['changedax1', 'changedax2']
print(EAX)
print(ax)
datG.node('R', 'Root')
#datG.edge('R', '0-cmp', label='eax')
#datG.edge('R', '0-cmp', label='0xfffff001' )
datG.node('Out', 'Output')
pattern = re.compile("^\s+|\s*,\s*|\s+$")
for idx, c in enumerate(instrEdges):
splitStr = [a for a in pattern.split(c) if a]
for idz, b in enumerate(splitStr):
tempNodeStr = instrNodes[(idx)]
if (idz == 0 and 'mov' not in tempNodeStr):
# if dest reg is eax
if b == "eax":
modifyEAX(nodes[idx],nodes[idx],nodes[idx],nodes[idx])
if b == "ax":
modifyAX(nodes[idx],nodes[idx])
if b == "ah":
modifyAH(nodes[idx])
if b == "al":
modifyAL(nodes[idx])
#
# if dest reg is ecx
if b == "ecx":
modifyECX(nodes[idx],nodes[idx],nodes[idx],nodes[idx])
if b == "cx":
modifyCX(nodes[idx],nodes[idx])
if b == "ch":
modifyCH(nodes[idx])
if b == "cl":
modifyCL(nodes[idx])
#
# if dest reg is edx
if b == "edx":
modifyEDX(nodes[idx],nodes[idx],nodes[idx],nodes[idx])
if b == "dx":
modifyDX(nodes[idx],nodes[idx])
if b == "dh":
modifyDH(nodes[idx])
if b == "dl":
modifyDL(nodes[idx])
#
# if dest reg is ebx
if b == "ebx":
modifyEBX(nodes[idx],nodes[idx],nodes[idx],nodes[idx])
if b == "bx":
modifyBX(nodes[idx],nodes[idx])
if b == "bh":
modifyBH(nodes[idx])
if b == "bl":
modifyBL(nodes[idx])
#
# if dest reg is esp
if b == "esp":
modifyESP(nodes[idx],nodes[idx],nodes[idx],nodes[idx])
if b == "sp":
modifySP(nodes[idx],nodes[idx])
# if dest reg is ebp
if b == "ebp":
modifyEBP(nodes[idx],nodes[idx],nodes[idx],nodes[idx])
if b == "ebp":
modifyBP(nodes[idx],nodes[idx])
# if dest reg is esi
if b == "esi":
modifyESI(nodes[idx],nodes[idx],nodes[idx],nodes[idx])
if b == "esi":
modifySI(nodes[idx],nodes[idx])
# if dest reg is edi
if b == "edi":
modifyEDI(nodes[idx],nodes[idx],nodes[idx],nodes[idx])
if b == "di":
modifyDI(nodes[idx],nodes[idx])
if "cmp" in tempNodeStr and idz == 0:
#Eax edges
if splitStr[idz] == "eax":
for ido, k in enumerate(EAX):
datG.edge(k, tempNodeStr, label=str(k)+'(eax)'+str(ido))
if splitStr[idz] == "ax":
for ido, k in enumerate(EAX[2:4]):
datG.edge(k, tempNodeStr, label=str(k)+'(ax)'+str(ido))
if splitStr[idz] == "ah":
for ido, k in enumerate(EAX[2:3]):
datG.edge(k, tempNodeStr, label=str(k)+'(ah)'+str(ido))
if splitStr[idz] == "al":
for ido, k in enumerate(EAX[3:4]):
datG.edge(k, tempNodeStr, label=str(k)+'(al)'+str(ido))
#Ecx edges
if splitStr[idz] == "ecx":
for ido, k in enumerate(ECX):
datG.edge(k, tempNodeStr, label=str(k)+'(ecx)'+str(ido))
if splitStr[idz] == "cx":
for ido, k in enumerate(ECX[2:4]):
datG.edge(k, tempNodeStr, label=str(k)+'(cx)'+str(ido))
if splitStr[idz] == "ch":
for ido, k in enumerate(ECX[2:3]):
datG.edge(k, tempNodeStr, label=str(k)+'(ch)'+str(ido))
if splitStr[idz] == "cl":
for ido, k in enumerate(ECX[3:4]):
datG.edge(k, tempNodeStr, label=str(k)+'(cl)'+str(ido))
#
#Edx edges
if splitStr[idz] == "edx":
for ido, k in enumerate(EDX):
datG.edge(k, tempNodeStr, label=str(k)+'(edx)'+str(ido))
if splitStr[idz] == "dx":
for ido, k in enumerate(EDX[2:4]):
datG.edge(k, tempNodeStr, label=str(k)+'(dx)'+str(ido))
if splitStr[idz] == "dh":
for ido, k in enumerate(EDX[2:3]):
datG.edge(k, tempNodeStr, label=str(k)+'(dh)'+str(ido))
if splitStr[idz] == "dl":
for ido, k in enumerate(EDX[3:4]):
datG.edge(k, tempNodeStr, label=str(k)+'(dl)'+str(ido))
#
#Ebx edges
if splitStr[idz] == "ebx":
for ido, k in enumerate(EBX):
datG.edge(k, tempNodeStr, label=str(k)+'(ebx)'+str(ido))
if splitStr[idz] == "bx":
for ido, k in enumerate(EBX[2:4]):
datG.edge(k, tempNodeStr, label=str(k)+'(bx)'+str(ido))
if splitStr[idz] == "bh":
for ido, k in enumerate(EBX[2:3]):
datG.edge(k, tempNodeStr, label=str(k)+'(bh)'+str(ido))
if splitStr[idz] == "bl":
for ido, k in enumerate(EBX[3:4]):
datG.edge(k, tempNodeStr, label=str(k)+'(bl)'+str(ido))
#esp edges
if splitStr[idz] == "esp":
for ido, k in enumerate(ESP):
datG.edge(k, tempNodeStr, label=str(k)+'(esp)'+str(ido))
if splitStr[idz] == "sp":
for ido, k in enumerate(ESP[2:4]):
datG.edge(k, tempNodeStr, label=str(k)+'(sp)'+str(ido))
#
#ebp edges
if splitStr[idz] == "ebp":
for ido, k in enumerate(EBP):
datG.edge(k, tempNodeStr, label=str(k)+'(ebp)'+str(ido))
if splitStr[idz] == "bp":
for ido, k in enumerate(EBP[2:4]):
datG.edge(k, tempNodeStr, label=str(k)+'(bp)'+str(ido))
#
#esi edges
if splitStr[idz] == "esi":
for ido, k in enumerate(ESI):
datG.edge(k, tempNodeStr, label=str(k)+'(esi)'+str(ido))
if splitStr[idz] == "si":
for ido, k in enumerate(ESI[2:4]):
datG.edge(k, tempNodeStr, label=str(k)+'(si)'+str(ido))
#
#
if splitStr[idz] == "edi":
for ido, k in enumerate(EDI):
datG.edge(k, tempNodeStr, label=str(k)+'(edi)'+str(ido))
if splitStr[idz] == "di":
for ido, k in enumerate(EDI[2:4]):
datG.edge(k, tempNodeStr, label=str(k)+'(di)'+str(ido))
#
else:
datG.edge('R', tempNodeStr, label=str(k)+'(misc cmp)'+str(ido))
if "cmp" in tempNodeStr and idz == 0:
statusFlags = ['OF', 'SF', 'ZF', 'AF', 'CF', 'PF']
#if b == "edi":
# if src reg is eax
if "mov" in tempNodeStr and idz == 1:
#Eax edges
if splitStr[idz] == "eax":
for ido, k in enumerate(EAX):
datG.edge(k, tempNodeStr, label=str(k)+'(eax)'+str(ido))
elif splitStr[idz] == "ax":
for ido, k in enumerate(EAX[2:4]):
datG.edge(k, tempNodeStr, label=str(k)+'(ax)'+str(ido))
elif splitStr[idz] == "ah":
for ido, k in enumerate(EAX[2:3]):
datG.edge(k, tempNodeStr, label=str(k)+'(ah)'+str(ido))
elif splitStr[idz] == "al":
for ido, k in enumerate(EAX[3:4]):
datG.edge(k, tempNodeStr, label=str(k)+'(al)'+str(ido))
#Ecx edges
elif splitStr[idz] == "ecx":
for ido, k in enumerate(ECX):
datG.edge(k, tempNodeStr, label=str(k)+'(ecx)'+str(ido))
elif splitStr[idz] == "cx":
for ido, k in enumerate(ECX[2:4]):
datG.edge(k, tempNodeStr, label=str(k)+'(cx)'+str(ido))
elif splitStr[idz] == "ch":
for ido, k in enumerate(ECX[2:3]):
datG.edge(k, tempNodeStr, label=str(k)+'(ch)'+str(ido))
elif splitStr[idz] == "cl":
for ido, k in enumerate(ECX[3:4]):
datG.edge(k, tempNodeStr, label=str(k)+'(cl)'+str(ido))
#
#Edx edges
elif splitStr[idz] == "edx":
for ido, k in enumerate(EDX):
datG.edge(k, tempNodeStr, label=str(k)+'(edx)'+str(ido))
elif splitStr[idz] == "dx":
for ido, k in enumerate(EDX[2:4]):
datG.edge(k, tempNodeStr, label=str(k)+'(dx)'+str(ido))
elif splitStr[idz] == "dh":
for ido, k in enumerate(EDX[2:3]):
datG.edge(k, tempNodeStr, label=str(k)+'(dh)'+str(ido))
elif splitStr[idz] == "dl":
for ido, k in enumerate(EDX[3:4]):
datG.edge(k, tempNodeStr, label=str(k)+'(dl)'+str(ido))
#
#Ebx edges
elif splitStr[idz] == "ebx":
for ido, k in enumerate(EBX):
datG.edge(k, tempNodeStr, label=str(k)+'(ebx)'+str(ido))
elif splitStr[idz] == "bx":
for ido, k in enumerate(EBX[2:4]):
datG.edge(k, tempNodeStr, label=str(k)+'(bx)'+str(ido))
elif splitStr[idz] == "bh":
for ido, k in enumerate(EBX[2:3]):
datG.edge(k, tempNodeStr, label=str(k)+'(bh)'+str(ido))
elif splitStr[idz] == "bl":
for ido, k in enumerate(EBX[3:4]):
datG.edge(k, tempNodeStr, label=str(k)+'(bl)'+str(ido))
#esp edges
elif splitStr[idz] == "esp":
for ido, k in enumerate(ESP):
datG.edge(k, tempNodeStr, label=str(k)+'(esp)'+str(ido))
elif splitStr[idz] == "sp":
for ido, k in enumerate(ESP[2:4]):
datG.edge(k, tempNodeStr, label=str(k)+'(sp)'+str(ido))
#
#ebp edges
elif splitStr[idz] == "ebp":
for ido, k in enumerate(EBP):
datG.edge(k, tempNodeStr, label=str(k)+'(ebp)'+str(ido))
elif splitStr[idz] == "bp":
for ido, k in enumerate(EBP[2:4]):
datG.edge(k, tempNodeStr, label=str(k)+'(bp)'+str(ido))
#
#esi edges
elif splitStr[idz] == "esi":
for ido, k in enumerate(ESI):
datG.edge(k, tempNodeStr, label=str(k)+'(esi)'+str(ido))
elif splitStr[idz] == "si":
for ido, k in enumerate(ESI[2:4]):
datG.edge(k, tempNodeStr, label=str(k)+'(si)'+str(ido))
#
#
elif splitStr[idz] == "edi":
for ido, k in enumerate(EDI):
datG.edge(k, tempNodeStr, label=str(k)+'(edi)'+str(ido))
elif splitStr[idz] == "di":
for ido, k in enumerate(EDI[2:4]):
datG.edge(k, tempNodeStr, label=str(k)+'(di)'+str(ido))
#
else:
datG.edge('R', tempNodeStr, label=str('unhandledParam')+'(misc mov)'+str(-1))
#iterate through the flags outputted (affected) by the instruction and do both:
#add an edge from the instruction to generic 'OutputNode'
#update the flags with newest most recent values
for idy, c in enumerate(statusFlags):
datG.edge(tempNodeStr, 'Out', label=tempNodeStr + ',' + str(c))
if c == "OF":
newestOF = tempNodeStr + '-' + str(c)
if c == "SF":
newestSF = tempNodeStr + '-' + str(c)
if c == "ZF":
newestZF = tempNodeStr + '-' + str(c)
if c == "AF":
newestAF = tempNodeStr + '-' + str(c)
if c == "CF":
newestCF = tempNodeStr + '-' + str(c)
if c == "PF":
newestPF = tempNodeStr + '-' + str(c)
statusFlags = []
newFlagRegList = [newestOF, newestSF, newestZF, newestAF, newestCF, newestPF]
'''
for idx, c in enumerate(statusFlags):
tempNodeStr = instrNodes[(idx)]
datG.edge('b7ff5c05-cmp', 'Out', label=tempNodeStr + '-' + c)
'''
add_nodes(datG)
#add_edges(datG)
print(datG.source)
src = Source(datG)
src.render('test-output/dataFlowSliceWes1.gv', view=True)
#some example graph code
'''
class Graph(object):
def __init__(self, graph_dict=None):
""" initializes a graph object
If no dictionary or None is given,
an empty dictionary will be used
"""
if graph_dict == None:
graph_dict = {}
self.__graph_dict = graph_dict
def vertices(self):
""" returns the vertices of a graph """
return list(self.__graph_dict.keys())
def edges(self):
""" returns the edges of a graph """
return self.__generate_edges()
def add_vertex(self, vertex):
""" If the vertex "vertex" is not in
self.__graph_dict, a key "vertex" with an empty
list as a value is added to the dictionary.
Otherwise nothing has to be done.
"""
if vertex not in self.__graph_dict:
self.__graph_dict[vertex] = []
def add_edge(self, edge):
""" assumes that edge is of type set, tuple or list;
between two vertices can be multiple edges!
"""
edge = set(edge)
(vertex1, vertex2) = tuple(edge)
if vertex1 in self.__graph_dict:
self.__graph_dict[vertex1].append(vertex2)
else:
self.__graph_dict[vertex1] = [vertex2]
def __generate_edges(self):
""" A static method generating the edges of the
graph "graph". Edges are represented as sets
with one (a loop back to the vertex) or two
vertices
"""
edges = []
for vertex in self.__graph_dict:
for neighbour in self.__graph_dict[vertex]:
if {neighbour, vertex} not in edges:
edges.append({vertex, neighbour})
return edges
def __str__(self):
res = "vertices: "
for k in self.__graph_dict:
res += str(k) + " "
res += "\nedges: "
for edge in self.__generate_edges():
res += str(edge) + " "
return res
if __name__ == "__main__":
f = { "a" : ["d"],
"b" : ["c"],
"c" : ["b", "c", "d", "e"],
"d" : ["a", "c"],
"e" : ["c"],
"f" : []
}
print(new_dict)
print(new_dict['0-cmp'])
graph = Graph(new_dict)
print("Vertices of graph:")
print(graph.vertices())
print("Edges of graph:")
print(graph.edges())
print("Add vertex:")
graph.add_vertex("z")
print("Vertices of graph:")
print(graph.vertices())
print("Add an edge:")
graph.add_edge({"a","z"})
print("Vertices of graph:")
print(graph.vertices())
print("Edges of graph:")
print(graph.edges())
print('Adding an edge {"x","y"} with new vertices:')
graph.add_edge({"x","y"})
print("Vertices of graph:")
print(graph.vertices())
print("Edges of graph:")
print(graph.edges())
'''
| WesCoomber/dataFlowGraphProjecto | presentgetBothNodesEdges.py | Python | mit | 22,404 |
# coding: utf-8
# In[ ]:
import numpy as np
import numexpr as ne
def sym_decorrelation_ne(W):
""" Symmetric decorrelation """
K = np.dot(W, W.T)
s, u = np.linalg.eigh(K)
return (u @ np.diag(1.0/np.sqrt(s)) @ u.T) @ W
# logcosh
def g_logcosh_ne(wx,alpha):
"""derivatives of logcosh"""
return ne.evaluate('tanh(alpha * wx)')
def gprime_logcosh_ne(wx,alpha):
"""second derivatives of logcosh"""
return alpha * (1-ne.evaluate('tanh(alpha*wx)**2'))
# exp
def g_exp_ne(wx,alpha):
"""derivatives of exp"""
return ne.evaluate('wx * exp(-wx**2/2)')
def gprime_exp_ne(wx,alpha):
"""second derivatives of exp"""
return (1-np.square(wx)) * ne.evaluate('exp(-wx**2/2)')
def fastica_s(X, f,alpha=None,n_comp=None,maxit=200, tol=1e-04):
n,p = X.shape
#check if n_comp is valid
if n_comp is None:
n_comp = min(n,p)
elif n_comp > min(n,p):
print("n_comp is too large")
n_comp = min(n,p)
#centering
#by subtracting the mean of each column of X (array).
X = X - X.mean(axis=0)[None,:]
X = X.T
#whitening
s = np.linalg.svd(X @ (X.T) / n)
D = np.diag(1/np.sqrt(s[1]))
k = D @ (s[0].T)
k = k[:n_comp,:]
X1 = k @ X
# initial random weght vector
w_init = np.random.normal(size=(n_comp, n_comp))
W = sym_decorrelation_ne(w_init)
lim = 1
it = 0
# The FastICA algorithm
while lim > tol and it < maxit :
wx = W @ X1
if f =="logcosh":
gwx = g_logcosh_ne(wx,alpha)
g_wx = gprime_logcosh_ne(wx,alpha)
elif f =="exp":
gwx = g_exp_ne(wx,alpha)
g_wx = gprimeg_exp_ne(wx,alpha)
else:
print("doesn't support this approximation negentropy function")
W1 = np.dot(gwx,X1.T)/X1.shape[1] - np.dot(np.diag(g_wx.mean(axis=1)),W)
W1 = sym_decorrelation_ne(W1)
it = it +1
lim = np.max(np.abs(np.abs(np.diag(W1 @ W.T))) - 1.0)
W = W1
S = W @ X1
#A = np.linalg.inv(W @ k)
return{'X':X1.T,'S':S.T}
| 663project/fastica_lz | fastica_lz/fastica_lz.py | Python | mit | 2,082 |
from datetime import datetime, time, timedelta
from pandas.compat import range
import sys
import os
import nose
import numpy as np
from pandas import Index, DatetimeIndex, Timestamp, Series, date_range, period_range
import pandas.tseries.frequencies as frequencies
from pandas.tseries.tools import to_datetime
import pandas.tseries.offsets as offsets
from pandas.tseries.period import PeriodIndex
import pandas.compat as compat
import pandas.util.testing as tm
from pandas import Timedelta
def test_to_offset_multiple():
freqstr = '2h30min'
freqstr2 = '2h 30min'
result = frequencies.to_offset(freqstr)
assert(result == frequencies.to_offset(freqstr2))
expected = offsets.Minute(150)
assert(result == expected)
freqstr = '2h30min15s'
result = frequencies.to_offset(freqstr)
expected = offsets.Second(150 * 60 + 15)
assert(result == expected)
freqstr = '2h 60min'
result = frequencies.to_offset(freqstr)
expected = offsets.Hour(3)
assert(result == expected)
freqstr = '15l500u'
result = frequencies.to_offset(freqstr)
expected = offsets.Micro(15500)
assert(result == expected)
freqstr = '10s75L'
result = frequencies.to_offset(freqstr)
expected = offsets.Milli(10075)
assert(result == expected)
freqstr = '2800N'
result = frequencies.to_offset(freqstr)
expected = offsets.Nano(2800)
assert(result == expected)
# malformed
try:
frequencies.to_offset('2h20m')
except ValueError:
pass
else:
assert(False)
def test_to_offset_negative():
freqstr = '-1S'
result = frequencies.to_offset(freqstr)
assert(result.n == -1)
freqstr = '-5min10s'
result = frequencies.to_offset(freqstr)
assert(result.n == -310)
def test_to_offset_leading_zero():
freqstr = '00H 00T 01S'
result = frequencies.to_offset(freqstr)
assert(result.n == 1)
freqstr = '-00H 03T 14S'
result = frequencies.to_offset(freqstr)
assert(result.n == -194)
def test_to_offset_pd_timedelta():
# Tests for #9064
td = Timedelta(days=1, seconds=1)
result = frequencies.to_offset(td)
expected = offsets.Second(86401)
assert(expected==result)
td = Timedelta(days=-1, seconds=1)
result = frequencies.to_offset(td)
expected = offsets.Second(-86399)
assert(expected==result)
td = Timedelta(hours=1, minutes=10)
result = frequencies.to_offset(td)
expected = offsets.Minute(70)
assert(expected==result)
td = Timedelta(hours=1, minutes=-10)
result = frequencies.to_offset(td)
expected = offsets.Minute(50)
assert(expected==result)
td = Timedelta(weeks=1)
result = frequencies.to_offset(td)
expected = offsets.Day(7)
assert(expected==result)
td1 = Timedelta(hours=1)
result1 = frequencies.to_offset(td1)
result2 = frequencies.to_offset('60min')
assert(result1 == result2)
td = Timedelta(microseconds=1)
result = frequencies.to_offset(td)
expected = offsets.Micro(1)
assert(expected == result)
td = Timedelta(microseconds=0)
tm.assertRaises(ValueError, lambda: frequencies.to_offset(td))
def test_anchored_shortcuts():
result = frequencies.to_offset('W')
expected = frequencies.to_offset('W-SUN')
assert(result == expected)
result = frequencies.to_offset('Q')
expected = frequencies.to_offset('Q-DEC')
assert(result == expected)
_dti = DatetimeIndex
class TestFrequencyInference(tm.TestCase):
def test_raise_if_period_index(self):
index = PeriodIndex(start="1/1/1990", periods=20, freq="M")
self.assertRaises(TypeError, frequencies.infer_freq, index)
def test_raise_if_too_few(self):
index = _dti(['12/31/1998', '1/3/1999'])
self.assertRaises(ValueError, frequencies.infer_freq, index)
def test_business_daily(self):
index = _dti(['12/31/1998', '1/3/1999', '1/4/1999'])
self.assertEqual(frequencies.infer_freq(index), 'B')
def test_day(self):
self._check_tick(timedelta(1), 'D')
def test_day_corner(self):
index = _dti(['1/1/2000', '1/2/2000', '1/3/2000'])
self.assertEqual(frequencies.infer_freq(index), 'D')
def test_non_datetimeindex(self):
dates = to_datetime(['1/1/2000', '1/2/2000', '1/3/2000'])
self.assertEqual(frequencies.infer_freq(dates), 'D')
def test_hour(self):
self._check_tick(timedelta(hours=1), 'H')
def test_minute(self):
self._check_tick(timedelta(minutes=1), 'T')
def test_second(self):
self._check_tick(timedelta(seconds=1), 'S')
def test_millisecond(self):
self._check_tick(timedelta(microseconds=1000), 'L')
def test_microsecond(self):
self._check_tick(timedelta(microseconds=1), 'U')
def test_nanosecond(self):
self._check_tick(np.timedelta64(1, 'ns'), 'N')
def _check_tick(self, base_delta, code):
b = Timestamp(datetime.now())
for i in range(1, 5):
inc = base_delta * i
index = _dti([b + inc * j for j in range(3)])
if i > 1:
exp_freq = '%d%s' % (i, code)
else:
exp_freq = code
self.assertEqual(frequencies.infer_freq(index), exp_freq)
index = _dti([b + base_delta * 7] +
[b + base_delta * j for j in range(3)])
self.assertIsNone(frequencies.infer_freq(index))
index = _dti([b + base_delta * j for j in range(3)] +
[b + base_delta * 7])
self.assertIsNone(frequencies.infer_freq(index))
def test_weekly(self):
days = ['MON', 'TUE', 'WED', 'THU', 'FRI', 'SAT', 'SUN']
for day in days:
self._check_generated_range('1/1/2000', 'W-%s' % day)
def test_week_of_month(self):
days = ['MON', 'TUE', 'WED', 'THU', 'FRI', 'SAT', 'SUN']
for day in days:
for i in range(1, 5):
self._check_generated_range('1/1/2000', 'WOM-%d%s' % (i, day))
def test_fifth_week_of_month(self):
# Only supports freq up to WOM-4. See #9425
func = lambda: date_range('2014-01-01', freq='WOM-5MON')
self.assertRaises(ValueError, func)
def test_fifth_week_of_month_infer(self):
# Only attempts to infer up to WOM-4. See #9425
index = DatetimeIndex(["2014-03-31", "2014-06-30", "2015-03-30"])
assert frequencies.infer_freq(index) is None
def test_week_of_month_fake(self):
#All of these dates are on same day of week and are 4 or 5 weeks apart
index = DatetimeIndex(["2013-08-27","2013-10-01","2013-10-29","2013-11-26"])
assert frequencies.infer_freq(index) != 'WOM-4TUE'
def test_monthly(self):
self._check_generated_range('1/1/2000', 'M')
def test_monthly_ambiguous(self):
rng = _dti(['1/31/2000', '2/29/2000', '3/31/2000'])
self.assertEqual(rng.inferred_freq, 'M')
def test_business_monthly(self):
self._check_generated_range('1/1/2000', 'BM')
def test_business_start_monthly(self):
self._check_generated_range('1/1/2000', 'BMS')
def test_quarterly(self):
for month in ['JAN', 'FEB', 'MAR']:
self._check_generated_range('1/1/2000', 'Q-%s' % month)
def test_annual(self):
for month in MONTHS:
self._check_generated_range('1/1/2000', 'A-%s' % month)
def test_business_annual(self):
for month in MONTHS:
self._check_generated_range('1/1/2000', 'BA-%s' % month)
def test_annual_ambiguous(self):
rng = _dti(['1/31/2000', '1/31/2001', '1/31/2002'])
self.assertEqual(rng.inferred_freq, 'A-JAN')
def _check_generated_range(self, start, freq):
freq = freq.upper()
gen = date_range(start, periods=7, freq=freq)
index = _dti(gen.values)
if not freq.startswith('Q-'):
self.assertEqual(frequencies.infer_freq(index), gen.freqstr)
else:
inf_freq = frequencies.infer_freq(index)
self.assertTrue((inf_freq == 'Q-DEC' and
gen.freqstr in ('Q', 'Q-DEC', 'Q-SEP', 'Q-JUN',
'Q-MAR'))
or
(inf_freq == 'Q-NOV' and
gen.freqstr in ('Q-NOV', 'Q-AUG', 'Q-MAY', 'Q-FEB'))
or
(inf_freq == 'Q-OCT' and
gen.freqstr in ('Q-OCT', 'Q-JUL', 'Q-APR', 'Q-JAN')))
gen = date_range(start, periods=5, freq=freq)
index = _dti(gen.values)
if not freq.startswith('Q-'):
self.assertEqual(frequencies.infer_freq(index), gen.freqstr)
else:
inf_freq = frequencies.infer_freq(index)
self.assertTrue((inf_freq == 'Q-DEC' and
gen.freqstr in ('Q', 'Q-DEC', 'Q-SEP', 'Q-JUN',
'Q-MAR'))
or
(inf_freq == 'Q-NOV' and
gen.freqstr in ('Q-NOV', 'Q-AUG', 'Q-MAY', 'Q-FEB'))
or
(inf_freq == 'Q-OCT' and
gen.freqstr in ('Q-OCT', 'Q-JUL', 'Q-APR', 'Q-JAN')))
def test_infer_freq(self):
rng = period_range('1959Q2', '2009Q3', freq='Q')
rng = Index(rng.to_timestamp('D', how='e').asobject)
self.assertEqual(rng.inferred_freq, 'Q-DEC')
rng = period_range('1959Q2', '2009Q3', freq='Q-NOV')
rng = Index(rng.to_timestamp('D', how='e').asobject)
self.assertEqual(rng.inferred_freq, 'Q-NOV')
rng = period_range('1959Q2', '2009Q3', freq='Q-OCT')
rng = Index(rng.to_timestamp('D', how='e').asobject)
self.assertEqual(rng.inferred_freq, 'Q-OCT')
def test_infer_freq_tz(self):
freqs = {'AS-JAN': ['2009-01-01', '2010-01-01', '2011-01-01', '2012-01-01'],
'Q-OCT': ['2009-01-31', '2009-04-30', '2009-07-31', '2009-10-31'],
'M': ['2010-11-30', '2010-12-31', '2011-01-31', '2011-02-28'],
'W-SAT': ['2010-12-25', '2011-01-01', '2011-01-08', '2011-01-15'],
'D': ['2011-01-01', '2011-01-02', '2011-01-03', '2011-01-04'],
'H': ['2011-12-31 22:00', '2011-12-31 23:00', '2012-01-01 00:00', '2012-01-01 01:00']
}
# GH 7310
for tz in [None, 'Australia/Sydney', 'Asia/Tokyo', 'Europe/Paris',
'US/Pacific', 'US/Eastern']:
for expected, dates in compat.iteritems(freqs):
idx = DatetimeIndex(dates, tz=tz)
self.assertEqual(idx.inferred_freq, expected)
def test_infer_freq_tz_transition(self):
# Tests for #8772
date_pairs = [['2013-11-02', '2013-11-5'], #Fall DST
['2014-03-08', '2014-03-11'], #Spring DST
['2014-01-01', '2014-01-03']] #Regular Time
freqs = ['3H', '10T', '3601S', '3600001L', '3600000001U', '3600000000001N']
for tz in [None, 'Australia/Sydney', 'Asia/Tokyo', 'Europe/Paris',
'US/Pacific', 'US/Eastern']:
for date_pair in date_pairs:
for freq in freqs:
idx = date_range(date_pair[0], date_pair[1], freq=freq, tz=tz)
print(idx)
self.assertEqual(idx.inferred_freq, freq)
index = date_range("2013-11-03", periods=5, freq="3H").tz_localize("America/Chicago")
self.assertIsNone(index.inferred_freq)
def test_infer_freq_businesshour(self):
# GH 7905
idx = DatetimeIndex(['2014-07-01 09:00', '2014-07-01 10:00', '2014-07-01 11:00',
'2014-07-01 12:00', '2014-07-01 13:00', '2014-07-01 14:00'])
# hourly freq in a day must result in 'H'
self.assertEqual(idx.inferred_freq, 'H')
idx = DatetimeIndex(['2014-07-01 09:00', '2014-07-01 10:00', '2014-07-01 11:00',
'2014-07-01 12:00', '2014-07-01 13:00', '2014-07-01 14:00',
'2014-07-01 15:00', '2014-07-01 16:00',
'2014-07-02 09:00', '2014-07-02 10:00', '2014-07-02 11:00'])
self.assertEqual(idx.inferred_freq, 'BH')
idx = DatetimeIndex(['2014-07-04 09:00', '2014-07-04 10:00', '2014-07-04 11:00',
'2014-07-04 12:00', '2014-07-04 13:00', '2014-07-04 14:00',
'2014-07-04 15:00', '2014-07-04 16:00',
'2014-07-07 09:00', '2014-07-07 10:00', '2014-07-07 11:00'])
self.assertEqual(idx.inferred_freq, 'BH')
idx = DatetimeIndex(['2014-07-04 09:00', '2014-07-04 10:00', '2014-07-04 11:00',
'2014-07-04 12:00', '2014-07-04 13:00', '2014-07-04 14:00',
'2014-07-04 15:00', '2014-07-04 16:00',
'2014-07-07 09:00', '2014-07-07 10:00', '2014-07-07 11:00',
'2014-07-07 12:00', '2014-07-07 13:00', '2014-07-07 14:00',
'2014-07-07 15:00', '2014-07-07 16:00',
'2014-07-08 09:00', '2014-07-08 10:00', '2014-07-08 11:00',
'2014-07-08 12:00', '2014-07-08 13:00', '2014-07-08 14:00',
'2014-07-08 15:00', '2014-07-08 16:00'])
self.assertEqual(idx.inferred_freq, 'BH')
def test_not_monotonic(self):
rng = _dti(['1/31/2000', '1/31/2001', '1/31/2002'])
rng = rng[::-1]
self.assertIsNone(rng.inferred_freq)
def test_non_datetimeindex(self):
rng = _dti(['1/31/2000', '1/31/2001', '1/31/2002'])
vals = rng.to_pydatetime()
result = frequencies.infer_freq(vals)
self.assertEqual(result, rng.inferred_freq)
def test_invalid_index_types(self):
# test all index types
for i in [ tm.makeIntIndex(10),
tm.makeFloatIndex(10),
tm.makePeriodIndex(10) ]:
self.assertRaises(TypeError, lambda : frequencies.infer_freq(i))
for i in [ tm.makeStringIndex(10),
tm.makeUnicodeIndex(10) ]:
self.assertRaises(ValueError, lambda : frequencies.infer_freq(i))
def test_string_datetimelike_compat(self):
# GH 6463
expected = frequencies.infer_freq(['2004-01', '2004-02', '2004-03', '2004-04'])
result = frequencies.infer_freq(Index(['2004-01', '2004-02', '2004-03', '2004-04']))
self.assertEqual(result,expected)
def test_series(self):
# GH6407
# inferring series
# invalid type of Series
for s in [ Series(np.arange(10)),
Series(np.arange(10.))]:
self.assertRaises(TypeError, lambda : frequencies.infer_freq(s))
# a non-convertible string
self.assertRaises(ValueError, lambda : frequencies.infer_freq(Series(['foo','bar'])))
# cannot infer on PeriodIndex
for freq in [None, 'L', 'Y']:
s = Series(period_range('2013',periods=10,freq=freq))
self.assertRaises(TypeError, lambda : frequencies.infer_freq(s))
# DateTimeIndex
for freq in ['M', 'L', 'S']:
s = Series(date_range('20130101',periods=10,freq=freq))
inferred = frequencies.infer_freq(s)
self.assertEqual(inferred,freq)
s = Series(date_range('20130101','20130110'))
inferred = frequencies.infer_freq(s)
self.assertEqual(inferred,'D')
MONTHS = ['JAN', 'FEB', 'MAR', 'APR', 'MAY', 'JUN', 'JUL', 'AUG', 'SEP',
'OCT', 'NOV', 'DEC']
def test_is_superperiod_subperiod():
assert(frequencies.is_superperiod(offsets.YearEnd(), offsets.MonthEnd()))
assert(frequencies.is_subperiod(offsets.MonthEnd(), offsets.YearEnd()))
assert(frequencies.is_superperiod(offsets.Hour(), offsets.Minute()))
assert(frequencies.is_subperiod(offsets.Minute(), offsets.Hour()))
assert(frequencies.is_superperiod(offsets.Second(), offsets.Milli()))
assert(frequencies.is_subperiod(offsets.Milli(), offsets.Second()))
assert(frequencies.is_superperiod(offsets.Milli(), offsets.Micro()))
assert(frequencies.is_subperiod(offsets.Micro(), offsets.Milli()))
assert(frequencies.is_superperiod(offsets.Micro(), offsets.Nano()))
assert(frequencies.is_subperiod(offsets.Nano(), offsets.Micro()))
if __name__ == '__main__':
import nose
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
| bdh1011/wau | venv/lib/python2.7/site-packages/pandas/tseries/tests/test_frequencies.py | Python | mit | 16,705 |
# -*- coding: utf-8 -*-
"""
Module that implements the questions types
"""
import json
from . import errors
def question_factory(kind, *args, **kwargs):
for clazz in (Text, Password, Confirm, List, Checkbox):
if clazz.kind == kind:
return clazz(*args, **kwargs)
raise errors.UnknownQuestionTypeError()
def load_from_dict(question_dict):
"""
Load one question from a dict.
It requires the keys 'name' and 'kind'.
:return: The Question object with associated data.
:return type: Question
"""
return question_factory(**question_dict)
def load_from_list(question_list):
"""
Load a list of questions from a list of dicts.
It requires the keys 'name' and 'kind' for each dict.
:return: A list of Question objects with associated data.
:return type: List
"""
return [load_from_dict(q) for q in question_list]
def load_from_json(question_json):
"""
Load Questions from a JSON string.
:return: A list of Question objects with associated data if the JSON
contains a list or a Question if the JSON contains a dict.
:return type: List or Dict
"""
data = json.loads(question_json)
if isinstance(data, list):
return load_from_list(data)
if isinstance(data, dict):
return load_from_dict(data)
raise TypeError(
'Json contained a %s variable when a dict or list was expected',
type(data))
class TaggedValue(object):
def __init__(self, label, value):
self.label = label
self.value = value
def __str__(self):
return self.label
def __repr__(self):
return self.value
def __cmp__(self, other):
if isinstance(other, TaggedValue):
return self.value != other.value
return self.value != other
class Question(object):
kind = 'base question'
def __init__(self,
name,
message='',
choices=None,
default=None,
ignore=False,
validate=True):
self.name = name
self._message = message
self._choices = choices or []
self._default = default
self._ignore = ignore
self._validate = validate
self.answers = {}
@property
def ignore(self):
return bool(self._solve(self._ignore))
@property
def message(self):
return self._solve(self._message)
@property
def default(self):
return self._solve(self._default)
@property
def choices_generator(self):
for choice in self._solve(self._choices):
yield (
TaggedValue(*choice)
if isinstance(choice, tuple) and len(choice) == 2
else choice
)
@property
def choices(self):
return list(self.choices_generator)
def validate(self, current):
try:
if self._solve(self._validate, current):
return
except Exception:
pass
raise errors.ValidationError(current)
def _solve(self, prop, *args, **kwargs):
if callable(prop):
return prop(self.answers, *args, **kwargs)
if isinstance(prop, str):
return prop.format(**self.answers)
return prop
class Text(Question):
kind = 'text'
class Password(Question):
kind = 'password'
class Confirm(Question):
kind = 'confirm'
def __init__(self, name, default=False, **kwargs):
super(Confirm, self).__init__(name, default=default, **kwargs)
class List(Question):
kind = 'list'
class Checkbox(Question):
kind = 'checkbox'
| piton-package-manager/piton | piton/lib/inquirer/questions.py | Python | mit | 3,684 |
from os.path import abspath, dirname, join
from unittest.mock import MagicMock, Mock, call
from tests.common import NetflixTestFixture
from netflix.data.genre import NetflixGenre
from netflix.parsers.title import NetflixTitleParser
from netflix.utils import netflix_url
class TestNetflixTitleParser(NetflixTestFixture):
def setUp(self):
self.tv_title_string = self._read("tv_title.html")
self.movie_title_string = self._read("movie_title.html")
def tearDown(self):
pass
def _read(self, filename):
input_path = join(dirname(abspath(__file__)), filename)
with open(input_path) as f:
return f.read()
def test_get_tv_title(self):
parser = NetflixTitleParser()
title = parser.get_title(self.tv_title_string)
self.assertEqual(title.title, "House of Cards")
self.assertEqual(title.title_id, 70178217)
self.assertEqual(title.year, 2013)
self.assertEqual(title.maturity, "Adult")
self.assertEqual(title.duration, "4 Seasons")
self.assertEqual(title.description, "Foo. Bar. Baz.")
self.assertEqual(title.background_url, "https://scdn.nflximg.net/ipl/29399/578030fdb9fc2b6de6f3d47b2f347da96a5da95c.jpg")
def test_get_movie_title(self):
parser = NetflixTitleParser()
title = parser.get_title(self.movie_title_string)
self.assertEqual(title.title, "Mad Max: Fury Road")
self.assertEqual(title.title_id, 80025919)
self.assertEqual(title.year, 2015)
self.assertEqual(title.maturity, "15")
self.assertEqual(title.duration, "120m")
self.assertEqual(title.description, "Foo. Bar. Baz.")
self.assertEqual(title.background_url, "https://scdn.nflximg.net/ipl/20552/de6776c61b5509db1c0a028ae81fc71b15bd6ef5.jpg")
| dstenb/pylaunchr-netflix | tests/parsers/title_test.py | Python | mit | 1,822 |
from core.himesis import Himesis, HimesisPreConditionPatternLHS
import cPickle as pickle
from uuid import UUID
class HNacExampleRuleLHS(HimesisPreConditionPatternLHS):
def __init__(self):
"""
Creates the himesis graph representing the AToM3 model HNacExampleRuleLHS.
"""
# Flag this instance as compiled now
self.is_compiled = True
super(HNacExampleRuleLHS, self).__init__(name='HNacExampleRuleLHS', num_nodes=3, edges=[])
# Add the edges
self.add_edges([(2, 0), (0, 1)])
# Set the graph attributes
self["mm__"] = pickle.loads("""(lp1
S'MT_pre__PoliceStationMM'
p2
aS'MoTifRule'
p3
a.""")
self["MT_constraint__"] = pickle.loads("""V#===============================================================================\u000a# This code is executed after the nodes in the LHS have been matched.\u000a# You can access a matched node labelled n by: PreNode('n').\u000a# To access attribute x of node n, use: PreNode('n')['x'].\u000a# The given constraint must evaluate to a boolean expression:\u000a# returning True enables the rule to be applied,\u000a# returning False forbids the rule from being applied.\u000a#===============================================================================\u000a\u000areturn True\u000a
p1
.""")
self["name"] = """"""
self["GUID__"] = UUID('83a57be0-5686-4b72-97a9-c7537819e243')
# Set the node attributes
self.vs[0]["MT_subtypeMatching__"] = False
self.vs[0]["MT_pre__associationType"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
"""
self.vs[0]["MT_label__"] = """3"""
self.vs[0]["MT_subtypes__"] = pickle.loads("""(lp1
.""")
self.vs[0]["mm__"] = """MT_pre__directLink_S"""
self.vs[0]["MT_dirty__"] = False
self.vs[0]["GUID__"] = UUID('e566fd0c-10db-4ce2-8256-f3515998bc22')
self.vs[1]["MT_subtypeMatching__"] = False
self.vs[1]["MT_pre__classtype"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
"""
self.vs[1]["MT_pre__name"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
"""
self.vs[1]["MT_label__"] = """2"""
self.vs[1]["MT_subtypes__"] = pickle.loads("""(lp1
.""")
self.vs[1]["mm__"] = """MT_pre__Male_S"""
self.vs[1]["MT_dirty__"] = False
self.vs[1]["MT_pre__cardinality"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
"""
self.vs[1]["GUID__"] = UUID('807712d0-3d4f-416e-a4f3-94a6f36884b7')
self.vs[2]["MT_subtypeMatching__"] = False
self.vs[2]["MT_pre__classtype"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
"""
self.vs[2]["MT_pre__name"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
"""
self.vs[2]["MT_label__"] = """1"""
self.vs[2]["MT_subtypes__"] = pickle.loads("""(lp1
.""")
self.vs[2]["mm__"] = """MT_pre__Station_S"""
self.vs[2]["MT_dirty__"] = False
self.vs[2]["MT_pre__cardinality"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
"""
self.vs[2]["GUID__"] = UUID('3511c637-3093-4ff7-ae80-7531122a176d')
# Load the NACs
from HNacExampleRuleNAC0 import HNacExampleRuleNAC0
self.NACs = [HNacExampleRuleNAC0(LHS=self)]
def eval_associationType3(self, attr_value, this):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
def eval_classtype2(self, attr_value, this):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
def eval_name2(self, attr_value, this):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
def eval_cardinality2(self, attr_value, this):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
def eval_classtype1(self, attr_value, this):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
def eval_name1(self, attr_value, this):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
def eval_cardinality1(self, attr_value, this):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
def constraint(self, PreNode, graph):
"""
Executable constraint code.
@param PreNode: Function taking an integer as parameter
and returns the node corresponding to that label.
"""
#===============================================================================
# This code is executed after the nodes in the LHS have been matched.
# You can access a matched node labelled n by: PreNode('n').
# To access attribute x of node n, use: PreNode('n')['x'].
# The given constraint must evaluate to a boolean expression:
# returning True enables the rule to be applied,
# returning False forbids the rule from being applied.
#===============================================================================
return True
| levilucio/SyVOLT | tests/TestModules/HNacExampleRuleLHS.py | Python | mit | 12,702 |
# -*- coding: utf-8 -*-
###
# (C) Copyright (2012-2017) Hewlett Packard Enterprise Development LP
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
###
from pprint import pprint
from hpOneView.oneview_client import OneViewClient
from config_loader import try_load_from_file
config = {
"ip": "<oneview_ip>",
"credentials": {
"userName": "<username>",
"password": "<password>"
}
}
# Try load config from a file (if there is a config file)
config = try_load_from_file(config)
oneview_client = OneViewClient(config)
print("\n Querying system for login details\n")
login_detail = oneview_client.login_details.get_login_details()
print("\n Login details are: \n")
pprint(login_detail)
| HewlettPackard/python-hpOneView | examples/login_details.py | Python | mit | 1,720 |
#!/usr/bin/python
from mininet.topo import Topo
from mininet.net import Mininet
from mininet.cli import CLI
from mininet.log import setLogLevel, info, debug
from mininet.node import Host, RemoteController, OVSSwitch
# Must exist and be owned by quagga user (quagga:quagga by default on Ubuntu)
QUAGGA_RUN_DIR = '/var/run/quagga'
QCONFIG_DIR = 'configs'
ZCONFIG_DIR = 'configs'
class SdnIpHost(Host):
def __init__(self, name, ip, route, *args, **kwargs):
Host.__init__(self, name, ip=ip, *args, **kwargs)
self.route = route
def config(self, **kwargs):
Host.config(self, **kwargs)
debug("configuring route %s" % self.route)
self.cmd('ip route add default via %s' % self.route)
class Router(Host):
def __init__(self, name, quaggaConfFile, zebraConfFile, intfDict, *args, **kwargs):
Host.__init__(self, name, *args, **kwargs)
self.quaggaConfFile = quaggaConfFile
self.zebraConfFile = zebraConfFile
self.intfDict = intfDict
def config(self, **kwargs):
Host.config(self, **kwargs)
self.cmd('sysctl net.ipv4.ip_forward=1')
for intf, attrs in self.intfDict.items():
self.cmd('ip addr flush dev %s' % intf)
# setup mac address to specific interface
if 'mac' in attrs:
self.cmd('ip link set %s down' % intf)
self.cmd('ip link set %s address %s' % (intf, attrs['mac']))
self.cmd('ip link set %s up ' % intf)
# setup address to interfaces
for addr in attrs['ipAddrs']:
self.cmd('ip addr add %s dev %s' % (addr, intf))
self.cmd('zebra -d -f %s -z %s/zebra%s.api -i %s/zebra%s.pid' % (self.zebraConfFile, QUAGGA_RUN_DIR, self.name, QUAGGA_RUN_DIR, self.name))
self.cmd('bgpd -d -f %s -z %s/zebra%s.api -i %s/bgpd%s.pid' % (self.quaggaConfFile, QUAGGA_RUN_DIR, self.name, QUAGGA_RUN_DIR, self.name))
def terminate(self):
self.cmd("ps ax | egrep 'bgpd%s.pid|zebra%s.pid' | awk '{print $1}' | xargs kill" % (self.name, self.name))
Host.terminate(self)
class SdnIpTopo(Topo):
def build(self):
zebraConf = '{}/zebra.conf'.format(ZCONFIG_DIR)
s1 = self.addSwitch('s1', dpid='0000000000000001', cls=OVSSwitch, failMode="standalone")
# Quagga 1
bgpEth0 = {
'mac': '00:00:00:00:00:01',
'ipAddrs': [
'10.0.1.1/24',
]
}
bgpIntfs = {
'bgpq1-eth0': bgpEth0
}
bgpq1 = self.addHost("bgpq1", cls=Router,
quaggaConfFile='{}/quagga1.conf'.format(QCONFIG_DIR),
zebraConfFile=zebraConf,
intfDict=bgpIntfs)
self.addLink(bgpq1, s1)
# Quagga 2
bgpEth0 = {
'mac': '00:00:00:00:00:02',
'ipAddrs': [
'10.0.2.1/24',
]
}
bgpIntfs = {
'bgpq2-eth0': bgpEth0
}
bgpq2 = self.addHost("bgpq2", cls=Router,
quaggaConfFile='{}/quagga2.conf'.format(QCONFIG_DIR),
zebraConfFile=zebraConf,
intfDict=bgpIntfs)
self.addLink(bgpq2, s1)
topos = {'sdnip': SdnIpTopo}
if __name__ == '__main__':
setLogLevel('debug')
topo = SdnIpTopo()
net = Mininet(topo=topo, controller=RemoteController)
net.start()
CLI(net)
net.stop()
info("done\n")
| TakeshiTseng/SDN-Work | mininet/bgp/topo.py | Python | mit | 3,549 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Optional, TYPE_CHECKING
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core import AsyncARMPipelineClient
from msrest import Deserializer, Serializer
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials_async import AsyncTokenCredential
from ._configuration import PeeringManagementClientConfiguration
from .operations import PeeringManagementClientOperationsMixin
from .operations import LegacyPeeringsOperations
from .operations import Operations
from .operations import PeerAsnsOperations
from .operations import PeeringLocationsOperations
from .operations import PeeringsOperations
from .operations import PeeringServiceLocationsOperations
from .operations import PeeringServicePrefixesOperations
from .operations import PrefixesOperations
from .operations import PeeringServiceProvidersOperations
from .operations import PeeringServicesOperations
from .. import models
class PeeringManagementClient(PeeringManagementClientOperationsMixin):
"""Peering Client.
:ivar legacy_peerings: LegacyPeeringsOperations operations
:vartype legacy_peerings: azure.mgmt.peering.aio.operations.LegacyPeeringsOperations
:ivar operations: Operations operations
:vartype operations: azure.mgmt.peering.aio.operations.Operations
:ivar peer_asns: PeerAsnsOperations operations
:vartype peer_asns: azure.mgmt.peering.aio.operations.PeerAsnsOperations
:ivar peering_locations: PeeringLocationsOperations operations
:vartype peering_locations: azure.mgmt.peering.aio.operations.PeeringLocationsOperations
:ivar peerings: PeeringsOperations operations
:vartype peerings: azure.mgmt.peering.aio.operations.PeeringsOperations
:ivar peering_service_locations: PeeringServiceLocationsOperations operations
:vartype peering_service_locations: azure.mgmt.peering.aio.operations.PeeringServiceLocationsOperations
:ivar peering_service_prefixes: PeeringServicePrefixesOperations operations
:vartype peering_service_prefixes: azure.mgmt.peering.aio.operations.PeeringServicePrefixesOperations
:ivar prefixes: PrefixesOperations operations
:vartype prefixes: azure.mgmt.peering.aio.operations.PrefixesOperations
:ivar peering_service_providers: PeeringServiceProvidersOperations operations
:vartype peering_service_providers: azure.mgmt.peering.aio.operations.PeeringServiceProvidersOperations
:ivar peering_services: PeeringServicesOperations operations
:vartype peering_services: azure.mgmt.peering.aio.operations.PeeringServicesOperations
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials_async.AsyncTokenCredential
:param subscription_id: The Azure subscription ID.
:type subscription_id: str
:param str base_url: Service URL
"""
def __init__(
self,
credential: "AsyncTokenCredential",
subscription_id: str,
base_url: Optional[str] = None,
**kwargs: Any
) -> None:
if not base_url:
base_url = 'https://management.azure.com'
self._config = PeeringManagementClientConfiguration(credential, subscription_id, **kwargs)
self._client = AsyncARMPipelineClient(base_url=base_url, config=self._config, **kwargs)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._serialize.client_side_validation = False
self._deserialize = Deserializer(client_models)
self.legacy_peerings = LegacyPeeringsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.operations = Operations(
self._client, self._config, self._serialize, self._deserialize)
self.peer_asns = PeerAsnsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.peering_locations = PeeringLocationsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.peerings = PeeringsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.peering_service_locations = PeeringServiceLocationsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.peering_service_prefixes = PeeringServicePrefixesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.prefixes = PrefixesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.peering_service_providers = PeeringServiceProvidersOperations(
self._client, self._config, self._serialize, self._deserialize)
self.peering_services = PeeringServicesOperations(
self._client, self._config, self._serialize, self._deserialize)
async def _send_request(self, http_request: HttpRequest, **kwargs: Any) -> AsyncHttpResponse:
"""Runs the network request through the client's chained policies.
:param http_request: The network request you want to make. Required.
:type http_request: ~azure.core.pipeline.transport.HttpRequest
:keyword bool stream: Whether the response payload will be streamed. Defaults to True.
:return: The response of your network call. Does not do error handling on your response.
:rtype: ~azure.core.pipeline.transport.AsyncHttpResponse
"""
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
http_request.url = self._client.format_url(http_request.url, **path_format_arguments)
stream = kwargs.pop("stream", True)
pipeline_response = await self._client._pipeline.run(http_request, stream=stream, **kwargs)
return pipeline_response.http_response
async def close(self) -> None:
await self._client.close()
async def __aenter__(self) -> "PeeringManagementClient":
await self._client.__aenter__()
return self
async def __aexit__(self, *exc_details) -> None:
await self._client.__aexit__(*exc_details)
| Azure/azure-sdk-for-python | sdk/peering/azure-mgmt-peering/azure/mgmt/peering/aio/_peering_management_client.py | Python | mit | 6,754 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.1 on 2016-01-28 01:00
from __future__ import unicode_literals
import caching.base
from django.db import migrations, models
import django.db.models.deletion
import sorl.thumbnail.fields
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Organization',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now=True)),
('is_live', models.BooleanField(default=True, verbose_name='Display on site')),
('show_in_lists', models.BooleanField(default=True, verbose_name='Show on Organization list page')),
('name', models.CharField(max_length=255)),
('slug', models.SlugField(unique=True)),
('email', models.EmailField(blank=True, max_length=254, verbose_name='Email address')),
('twitter_username', models.CharField(blank=True, max_length=32)),
('github_username', models.CharField(blank=True, max_length=32)),
('github_repos_num', models.PositiveIntegerField(blank=True, null=True)),
('github_gists_num', models.PositiveIntegerField(blank=True, null=True)),
('homepage', models.URLField(blank=True)),
('description', models.TextField(blank=True)),
('address', models.CharField(blank=True, max_length=255)),
('city', models.CharField(blank=True, max_length=64)),
('state', models.CharField(blank=True, max_length=32)),
('country', models.CharField(blank=True, help_text='Only necessary if outside the U.S.', max_length=32)),
('logo', sorl.thumbnail.fields.ImageField(blank=True, help_text='Resized to fit 200x50 box in template', null=True, upload_to='img/uploads/org_logos')),
],
options={
'ordering': ('name',),
},
bases=(caching.base.CachingMixin, models.Model),
),
migrations.CreateModel(
name='OrganizationLink',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now=True)),
('name', models.CharField(max_length=128)),
('url', models.URLField()),
('organization', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='people.Organization')),
],
options={
'verbose_name': 'Organization Link',
'ordering': ('organization', 'name'),
},
bases=(caching.base.CachingMixin, models.Model),
),
migrations.CreateModel(
name='Person',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now=True)),
('is_live', models.BooleanField(default=True, verbose_name='Display on site')),
('show_in_lists', models.BooleanField(default=True, verbose_name='Show on People list page')),
('first_name', models.CharField(max_length=128)),
('last_name', models.CharField(max_length=128)),
('slug', models.SlugField(unique=True)),
('email', models.EmailField(blank=True, max_length=254, verbose_name='Email address')),
('twitter_username', models.CharField(blank=True, max_length=32)),
('twitter_bio', models.TextField(blank=True)),
('twitter_profile_image_url', models.URLField(blank=True)),
('github_username', models.CharField(blank=True, max_length=32)),
('github_repos_num', models.PositiveIntegerField(blank=True, null=True)),
('github_gists_num', models.PositiveIntegerField(blank=True, null=True)),
('description', models.TextField(blank=True, verbose_name='Bio')),
('organizations', models.ManyToManyField(blank=True, to='people.Organization')),
],
options={
'verbose_name_plural': 'People',
'ordering': ('last_name', 'first_name'),
},
bases=(caching.base.CachingMixin, models.Model),
),
migrations.CreateModel(
name='PersonLink',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now=True)),
('name', models.CharField(max_length=128)),
('url', models.URLField()),
('person', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='people.Person')),
],
options={
'verbose_name': 'Person Link',
'ordering': ('person', 'name'),
},
bases=(caching.base.CachingMixin, models.Model),
),
]
| OpenNews/opennews-source | source/people/migrations/0001_initial.py | Python | mit | 5,559 |
#!/usr/bin/env python
from __future__ import print_function
################################################################################
#
#
# drmaa_wrapper.py
#
# Copyright (C) 2013 Leo Goodstadt
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# Portions of code from adapted from:
#
# http://stackoverflow.com/questions/375427/non-blocking-read-on-a-subprocess-pipe-in-python
# Courtesy of J.F. Sebastian
# Use is licensed under the "Creative Commons Attribution Share Alike license"
# See http://stackexchange.com/legal
#
#################################################################################
"""
********************************************
:mod:`ruffus.cmdline` -- Overview
********************************************
.. moduleauthor:: Leo Goodstadt <ruffus@llew.org.uk>
#
# Using drmaa
#
from ruffus import *
import drmaa_wrapper
"""
import sys, os
import stat
#
# tempfile for drmaa scripts
#
import tempfile
import datetime
import subprocess
import time
import sys
import subprocess
import threading
try:
from Queue import Queue, Empty
except ImportError:
from queue import Queue, Empty # python 3.x
ON_POSIX = 'posix' in sys.builtin_module_names
if sys.hexversion >= 0x03000000:
# everything is unicode in python3
path_str_type = str
else:
path_str_type = basestring
#_________________________________________________________________________________________
# error_drmaa_job
#_________________________________________________________________________________________
class error_drmaa_job(Exception):
"""
All exceptions throw in this module
"""
def __init__(self, *errmsg):
Exception.__init__(self, *errmsg)
#_________________________________________________________________________________________
# read_stdout_stderr_from_files
#_________________________________________________________________________________________
def read_stdout_stderr_from_files( stdout_path, stderr_path, logger = None, cmd_str = "", tries=5):
"""
Reads the contents of two specified paths and returns the strings
Thanks to paranoia approach contributed by Andreas Heger:
Retry just in case file system hasn't committed.
Logs error if files are missing: No big deal?
Cleans up files afterwards
Returns tuple of stdout and stderr.
"""
#
# delay up to 10 seconds until files are ready
#
for xxx in range(tries):
if os.path.exists( stdout_path ) and os.path.exists( stderr_path ):
break
time.sleep(2)
try:
stdout = open( stdout_path, "r" ).readlines()
except IOError:
exceptionType, exceptionValue, exceptionTraceback = sys.exc_info()
msg = str(exceptionValue)
if logger:
logger.warning( "could not open stdout: %s for \n%s" % (msg, cmd_str))
stdout = []
try:
stderr = open( stderr_path, "r" ).readlines()
except IOError:
exceptionType, exceptionValue, exceptionTraceback = sys.exc_info()
msg = str(exceptionValue)
if logger:
logger.warning( "could not open stderr: %s for \n%s" % (msg, cmd_str))
stderr = []
#
# cleanup ignoring errors
#
try:
os.unlink( stdout_path )
os.unlink( stderr_path )
except OSError:
pass
return stdout, stderr
#_________________________________________________________________________________________
# setup_drmaa_job
#_________________________________________________________________________________________
def setup_drmaa_job( drmaa_session, job_name, job_environment, working_directory, job_other_options):
job_template = drmaa_session.createJobTemplate()
if not working_directory:
job_template.workingDirectory = os.getcwd()
else:
job_template.workingDirectory = working_directory
if job_environment:
# dictionary e.g. { 'BASH_ENV' : '~/.bashrc' }
job_template.jobEnvironment = job_environment
job_template.args = []
if job_name:
job_template.jobName = job_name
else:
# nameless jobs sometimes breaks drmaa implementations...
job_template.jobName = "ruffus_job_" + "_".join(map(str, datetime.datetime.now().timetuple()[0:6]))
#
# optional job parameters
#
job_template.nativeSpecification = job_other_options
# separate stdout and stderr
job_template.joinFiles=False
return job_template
#_________________________________________________________________________________________
# write_job_script_to_temp_file
#_________________________________________________________________________________________
def write_job_script_to_temp_file( cmd_str, job_script_directory, job_name, job_other_options, job_environment, working_directory):
'''
returns (job_script_path, stdout_path, stderr_path)
'''
import sys
time_stmp_str = "_".join(map(str, datetime.datetime.now().timetuple()[0:6]))
# create script directory if necessary
# Ignore errors rather than test for existence to avoid race conditions
try:
os.makedirs(job_script_directory)
except:
pass
tmpfile = tempfile.NamedTemporaryFile(mode='w', prefix='drmaa_script_' + time_stmp_str + "__", dir = job_script_directory, delete = False)
#
# hopefully #!/bin/sh is universally portable among unix-like operating systems
#
tmpfile.write( "#!/bin/sh\n" )
#
# log parameters as suggested by Bernie Pope
#
for title, parameter in ( ("job_name", job_name, ),
("job_other_options", job_other_options,),
("job_environment", job_environment, ),
("working_directory", working_directory), ):
if parameter:
tmpfile.write( "#%s=%s\n" % (title, parameter))
tmpfile.write( cmd_str + "\n" )
tmpfile.close()
job_script_path = os.path.abspath( tmpfile.name )
stdout_path = job_script_path + ".stdout"
stderr_path = job_script_path + ".stderr"
os.chmod( job_script_path, stat.S_IRWXG | stat.S_IRWXU )
return (job_script_path, stdout_path, stderr_path)
#_________________________________________________________________________________________
# run_job_using_drmaa
#_________________________________________________________________________________________
def run_job_using_drmaa (cmd_str, job_name = None, job_other_options = "", job_script_directory = None, job_environment = None, working_directory = None, retain_job_scripts = False, logger = None, drmaa_session = None, verbose = 0):
"""
Runs specified command remotely using drmaa,
either with the specified session, or the module shared drmaa session
"""
import drmaa
#
# used specified session else module session
#
if drmaa_session is None:
raise error_drmaa_job( "Please specify a drmaa_session in run_job()")
#
# make job template
#
job_template = setup_drmaa_job( drmaa_session, job_name, job_environment, working_directory, job_other_options)
#
# make job script
#
if not job_script_directory:
job_script_directory = os.getcwd()
job_script_path, stdout_path, stderr_path = write_job_script_to_temp_file( cmd_str, job_script_directory, job_name, job_other_options, job_environment, working_directory)
job_template.remoteCommand = job_script_path
# drmaa paths specified as [hostname]:file_path.
# See http://www.ogf.org/Public_Comment_Docs/Documents/2007-12/ggf-drmaa-idl-binding-v1%2000%20RC7.pdf
job_template.outputPath = ":" + stdout_path
job_template.errorPath = ":" + stderr_path
#
# Run job and wait
#
jobid = drmaa_session.runJob(job_template)
if logger:
logger.debug( "job has been submitted with jobid %s" % str(jobid ))
try:
job_info = drmaa_session.wait(jobid, drmaa.Session.TIMEOUT_WAIT_FOREVER)
except Exception:
exceptionType, exceptionValue, exceptionTraceback = sys.exc_info()
msg = str(exceptionValue)
# ignore message 24 in PBS
# code 24: drmaa: Job finished but resource usage information and/or termination status could not be provided.":
if not msg.startswith("code 24"): raise
if logger:
logger.info("Warning %s\n"
"The original command was:\n%s\njobid=jobid\n"
(msg.message, cmd_str,jobid) )
job_info = None
#
# Read output
#
stdout, stderr = read_stdout_stderr_from_files( stdout_path, stderr_path, logger, cmd_str)
job_info_str = ("The original command was: >> %s <<\n"
"The jobid was: %s\n"
"The job script name was: %s\n" %
(cmd_str,
jobid,
job_script_path))
def stderr_stdout_to_str (stderr, stdout):
"""
Concatenate stdout and stderr to string
"""
result = ""
if stderr:
result += "The stderr was: \n%s\n\n" % ("".join( stderr))
if stdout:
result += "The stdout was: \n%s\n\n" % ("".join( stdout))
return result
#
# Throw if failed
#
if job_info:
job_info_str += "Resources used: %s " % (job_info.resourceUsage)
if job_info.wasAborted:
raise error_drmaa_job( "The drmaa command was never ran but used %s:\n%s"
% (job_info.exitStatus, job_info_str + stderr_stdout_to_str (stderr, stdout)))
elif job_info.hasSignal:
raise error_drmaa_job( "The drmaa command was terminated by signal %i:\n%s"
% (job_info.exitStatus, job_info_str + stderr_stdout_to_str (stderr, stdout)))
elif job_info.hasExited:
if job_info.exitStatus:
raise error_drmaa_job( "The drmaa command was terminated by signal %i:\n%s"
% (job_info.exitStatus, job_info_str + stderr_stdout_to_str (stderr, stdout)))
#
# Decorate normal exit with some resource usage information
#
elif verbose:
def nice_mem_str(num):
"""
Format memory sizes
http://stackoverflow.com/questions/1094841/reusable-library-to-get-human-readable-version-of-file-size
"""
num = float(num)
for x in ['bytes','KB','MB','GB']:
if num < 1024.0:
return "%3.1f%s" % (num, x)
num /= 1024.0
return "%3.1f%s" % (num, 'TB')
try:
resource_usage_str = []
if 'maxvmem' in job_info.resourceUsage:
if 'mem' in job_info.resourceUsage:
resource_usage_str.append("Mem=%s(%s)" % (nice_mem_str(job_info.resourceUsage['maxvmem']), job_info.resourceUsage['mem']))
else:
resource_usage_str.append("Mem=%s" % nice_mem_str(job_info.resourceUsage['maxvmem']))
if 'ru_wallclock' in job_info.resourceUsage:
resource_usage_str.append("CPU wallclock= %.2gs" % float(job_info.resourceUsage['ru_wallclock']))
if len(resource_usage_str):
logger.info("Drmaa command used %s in running %s" % (", ".join(resource_usage_str), cmd_str))
else:
logger.info("Drmaa command successfully ran %s" % cmd_str)
except:
logger.info("Drmaa command used %s in running %s" % (job_info.resourceUsage, cmd_str))
#
# clean up job template
#
drmaa_session.deleteJobTemplate(job_template)
#
# Cleanup job script unless retain_job_scripts is set
#
if retain_job_scripts:
# job scripts have the jobid as an extension
os.rename(job_script_path, job_script_path + ".%s" % jobid )
else:
try:
os.unlink( job_script_path )
except OSError:
if logger:
logger.warning( "Temporary job script wrapper '%s' missing (and ignored) at clean-up" % job_script_path )
return stdout, stderr
def enqueue_output(out, queue, echo):
for line in iter(out.readline, ''):
queue.put(line)
if echo is not None:
echo.write(line)
echo.flush()
out.close()
#_________________________________________________________________________________________
# run_job_locally
#_________________________________________________________________________________________
def run_job_locally (cmd_str, logger = None, job_environment = None, working_directory = None, local_echo = False):
"""
Runs specified command locally instead of drmaa
"""
popen_params = {"args" : cmd_str,
"cwd" : working_directory if working_directory is not None else os.getcwd(),
"shell" : True,
"stdin" : subprocess.PIPE,
"stdout" : subprocess.PIPE,
"stderr" : subprocess.PIPE,
"bufsize" :1,
"universal_newlines" : True,
"close_fds" : ON_POSIX}
if job_environment is not None:
popen_params["env"] = job_environment
process = subprocess.Popen( **popen_params )
stderrQ = Queue()
stdoutQ = Queue()
stdout_t = threading.Thread(target=enqueue_output, args=(process.stdout, stdoutQ, sys.stdout if local_echo else None))
stderr_t = threading.Thread(target=enqueue_output, args=(process.stderr, stderrQ, sys.stderr if local_echo else None))
# if daemon = False, sub process cannot be interrupted by Ctrl-C
stdout_t.daemon = True
stderr_t.daemon = True
stdout_t.start()
stderr_t.start()
process.wait()
stdout_t.join()
stderr_t.join()
process.stdin.close()
process.stdout.close()
process.stderr.close()
stdout, stderr = [], []
try:
while True:
stdout.append(stdoutQ.get(False))
except:
pass
try:
while True:
stderr.append(stderrQ.get(False))
except:
pass
if process.returncode != 0:
raise error_drmaa_job( "The locally run command was terminated by signal %i:\n"
"The original command was:\n%s\n"
"The stderr was: \n%s\n\n"
"The stdout was: \n%s\n\n" %
(-process.returncode, cmd_str, "".join(stderr), "".join(stdout)) )
return stdout, stderr
#_________________________________________________________________________________________
# touch_output_files
#_________________________________________________________________________________________
def touch_output_files (cmd_str, output_files, logger = None):
"""
Touches output files instead of actually running the command string
"""
if not output_files or not len(output_files):
if logger:
logger.debug("No output files to 'touch' for command:\n%s")
return
# make sure is list
ltypes=(list, tuple)
if not isinstance(output_files, ltypes):
output_files = [output_files]
else:
output_files = list(output_files)
#
# flatten list of file names
# from http://rightfootin.blogspot.co.uk/2006/09/more-on-python-flatten.html
#
i = 0
while i < len(output_files):
while isinstance(output_files[i], ltypes):
if not output_files[i]:
output_files.pop(i)
i -= 1
break
else:
output_files[i:i + 1] = output_files[i]
i += 1
for f in output_files:
# ignore non strings
if not isinstance (f, path_str_type):
continue
# create file
if not os.path.exists(f):
# this should be guaranteed to close the new file immediately?
with open(f, 'w') as p: pass
# touch existing file
else:
os.utime(f, None)
#_________________________________________________________________________________________
# run_job
#_________________________________________________________________________________________
def run_job(cmd_str, job_name = None, job_other_options = None, job_script_directory = None,
job_environment = None, working_directory = None, logger = None,
drmaa_session = None, retain_job_scripts = False,
run_locally = False, output_files = None, touch_only = False, verbose = 0, local_echo = False):
"""
Runs specified command either using drmaa, or locally or only in simulation (touch the output files only)
"""
if touch_only:
touch_output_files (cmd_str, output_files, logger)
return "","",
if run_locally:
return run_job_locally (cmd_str, logger, job_environment, working_directory, local_echo)
return run_job_using_drmaa (cmd_str, job_name, job_other_options, job_script_directory, job_environment, working_directory, retain_job_scripts, logger, drmaa_session, verbose)
| magosil86/ruffus | ruffus/drmaa_wrapper.py | Python | mit | 18,668 |
# -*- coding: utf8 -*-
# Copyright (c) 2017-2021 THL A29 Limited, a Tencent company. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from tencentcloud.common.abstract_model import AbstractModel
class AuthTestTidRequest(AbstractModel):
"""AuthTestTid请求参数结构体
"""
def __init__(self):
r"""
:param Data: 设备端SDK填入测试TID参数后生成的加密数据串
:type Data: str
"""
self.Data = None
def _deserialize(self, params):
self.Data = params.get("Data")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class AuthTestTidResponse(AbstractModel):
"""AuthTestTid返回参数结构体
"""
def __init__(self):
r"""
:param Pass: 认证结果
:type Pass: bool
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Pass = None
self.RequestId = None
def _deserialize(self, params):
self.Pass = params.get("Pass")
self.RequestId = params.get("RequestId")
class BurnTidNotifyRequest(AbstractModel):
"""BurnTidNotify请求参数结构体
"""
def __init__(self):
r"""
:param OrderId: 订单编号
:type OrderId: str
:param Tid: TID编号
:type Tid: str
"""
self.OrderId = None
self.Tid = None
def _deserialize(self, params):
self.OrderId = params.get("OrderId")
self.Tid = params.get("Tid")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class BurnTidNotifyResponse(AbstractModel):
"""BurnTidNotify返回参数结构体
"""
def __init__(self):
r"""
:param Tid: 接收回执成功的TID
:type Tid: str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Tid = None
self.RequestId = None
def _deserialize(self, params):
self.Tid = params.get("Tid")
self.RequestId = params.get("RequestId")
class DeliverTidNotifyRequest(AbstractModel):
"""DeliverTidNotify请求参数结构体
"""
def __init__(self):
r"""
:param OrderId: 订单编号
:type OrderId: str
:param Tid: TID编号
:type Tid: str
"""
self.OrderId = None
self.Tid = None
def _deserialize(self, params):
self.OrderId = params.get("OrderId")
self.Tid = params.get("Tid")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DeliverTidNotifyResponse(AbstractModel):
"""DeliverTidNotify返回参数结构体
"""
def __init__(self):
r"""
:param RemaindCount: 剩余空发数量
:type RemaindCount: int
:param Tid: 已回执的TID编码
:type Tid: str
:param ProductKey: 产品公钥
:type ProductKey: str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RemaindCount = None
self.Tid = None
self.ProductKey = None
self.RequestId = None
def _deserialize(self, params):
self.RemaindCount = params.get("RemaindCount")
self.Tid = params.get("Tid")
self.ProductKey = params.get("ProductKey")
self.RequestId = params.get("RequestId")
class DeliverTidsRequest(AbstractModel):
"""DeliverTids请求参数结构体
"""
def __init__(self):
r"""
:param OrderId: 订单ID
:type OrderId: str
:param Quantity: 数量,1~100
:type Quantity: int
"""
self.OrderId = None
self.Quantity = None
def _deserialize(self, params):
self.OrderId = params.get("OrderId")
self.Quantity = params.get("Quantity")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DeliverTidsResponse(AbstractModel):
"""DeliverTids返回参数结构体
"""
def __init__(self):
r"""
:param TidSet: 空发的TID信息
注意:此字段可能返回 null,表示取不到有效值。
:type TidSet: list of TidKeysInfo
:param ProductKey: 产品公钥
:type ProductKey: str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.TidSet = None
self.ProductKey = None
self.RequestId = None
def _deserialize(self, params):
if params.get("TidSet") is not None:
self.TidSet = []
for item in params.get("TidSet"):
obj = TidKeysInfo()
obj._deserialize(item)
self.TidSet.append(obj)
self.ProductKey = params.get("ProductKey")
self.RequestId = params.get("RequestId")
class DescribeAvailableLibCountRequest(AbstractModel):
"""DescribeAvailableLibCount请求参数结构体
"""
def __init__(self):
r"""
:param OrderId: 订单编号
:type OrderId: str
"""
self.OrderId = None
def _deserialize(self, params):
self.OrderId = params.get("OrderId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeAvailableLibCountResponse(AbstractModel):
"""DescribeAvailableLibCount返回参数结构体
"""
def __init__(self):
r"""
:param Quantity: 可空发的白盒密钥数量
:type Quantity: int
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Quantity = None
self.RequestId = None
def _deserialize(self, params):
self.Quantity = params.get("Quantity")
self.RequestId = params.get("RequestId")
class DescribePermissionRequest(AbstractModel):
"""DescribePermission请求参数结构体
"""
class DescribePermissionResponse(AbstractModel):
"""DescribePermission返回参数结构体
"""
def __init__(self):
r"""
:param EnterpriseUser: 企业用户
:type EnterpriseUser: bool
:param DownloadPermission: 下载控制台权限
:type DownloadPermission: str
:param UsePermission: 使用控制台权限
:type UsePermission: str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.EnterpriseUser = None
self.DownloadPermission = None
self.UsePermission = None
self.RequestId = None
def _deserialize(self, params):
self.EnterpriseUser = params.get("EnterpriseUser")
self.DownloadPermission = params.get("DownloadPermission")
self.UsePermission = params.get("UsePermission")
self.RequestId = params.get("RequestId")
class DownloadTidsRequest(AbstractModel):
"""DownloadTids请求参数结构体
"""
def __init__(self):
r"""
:param OrderId: 订单编号
:type OrderId: str
:param Quantity: 下载数量:1~10
:type Quantity: int
"""
self.OrderId = None
self.Quantity = None
def _deserialize(self, params):
self.OrderId = params.get("OrderId")
self.Quantity = params.get("Quantity")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DownloadTidsResponse(AbstractModel):
"""DownloadTids返回参数结构体
"""
def __init__(self):
r"""
:param TidSet: 下载的TID信息列表
注意:此字段可能返回 null,表示取不到有效值。
:type TidSet: list of TidKeysInfo
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.TidSet = None
self.RequestId = None
def _deserialize(self, params):
if params.get("TidSet") is not None:
self.TidSet = []
for item in params.get("TidSet"):
obj = TidKeysInfo()
obj._deserialize(item)
self.TidSet.append(obj)
self.RequestId = params.get("RequestId")
class TidKeysInfo(AbstractModel):
"""系统生成的TID和密钥信息
"""
def __init__(self):
r"""
:param Tid: TID号码
:type Tid: str
:param PublicKey: 公钥
:type PublicKey: str
:param PrivateKey: 私钥
:type PrivateKey: str
:param Psk: 共享密钥
:type Psk: str
:param DownloadUrl: 软加固白盒密钥下载地址
:type DownloadUrl: str
:param DeviceCode: 软加固设备标识码
:type DeviceCode: str
"""
self.Tid = None
self.PublicKey = None
self.PrivateKey = None
self.Psk = None
self.DownloadUrl = None
self.DeviceCode = None
def _deserialize(self, params):
self.Tid = params.get("Tid")
self.PublicKey = params.get("PublicKey")
self.PrivateKey = params.get("PrivateKey")
self.Psk = params.get("Psk")
self.DownloadUrl = params.get("DownloadUrl")
self.DeviceCode = params.get("DeviceCode")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class UploadDeviceUniqueCodeRequest(AbstractModel):
"""UploadDeviceUniqueCode请求参数结构体
"""
def __init__(self):
r"""
:param CodeSet: 硬件唯一标识码
:type CodeSet: list of str
:param OrderId: 硬件标识码绑定的申请编号
:type OrderId: str
"""
self.CodeSet = None
self.OrderId = None
def _deserialize(self, params):
self.CodeSet = params.get("CodeSet")
self.OrderId = params.get("OrderId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class UploadDeviceUniqueCodeResponse(AbstractModel):
"""UploadDeviceUniqueCode返回参数结构体
"""
def __init__(self):
r"""
:param Count: 本次已上传数量
:type Count: int
:param ExistedCodeSet: 重复的硬件唯一标识码
注意:此字段可能返回 null,表示取不到有效值。
:type ExistedCodeSet: list of str
:param LeftQuantity: 剩余可上传数量
:type LeftQuantity: int
:param IllegalCodeSet: 错误的硬件唯一标识码
注意:此字段可能返回 null,表示取不到有效值。
:type IllegalCodeSet: list of str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Count = None
self.ExistedCodeSet = None
self.LeftQuantity = None
self.IllegalCodeSet = None
self.RequestId = None
def _deserialize(self, params):
self.Count = params.get("Count")
self.ExistedCodeSet = params.get("ExistedCodeSet")
self.LeftQuantity = params.get("LeftQuantity")
self.IllegalCodeSet = params.get("IllegalCodeSet")
self.RequestId = params.get("RequestId")
class VerifyChipBurnInfoRequest(AbstractModel):
"""VerifyChipBurnInfo请求参数结构体
"""
def __init__(self):
r"""
:param Data: 验证数据
:type Data: str
"""
self.Data = None
def _deserialize(self, params):
self.Data = params.get("Data")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class VerifyChipBurnInfoResponse(AbstractModel):
"""VerifyChipBurnInfo返回参数结构体
"""
def __init__(self):
r"""
:param Pass: 验证结果
:type Pass: bool
:param VerifiedTimes: 已验证次数
:type VerifiedTimes: int
:param LeftTimes: 剩余验证次数
:type LeftTimes: int
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Pass = None
self.VerifiedTimes = None
self.LeftTimes = None
self.RequestId = None
def _deserialize(self, params):
self.Pass = params.get("Pass")
self.VerifiedTimes = params.get("VerifiedTimes")
self.LeftTimes = params.get("LeftTimes")
self.RequestId = params.get("RequestId") | tzpBingo/github-trending | codespace/python/tencentcloud/iottid/v20190411/models.py | Python | mit | 15,254 |
from django.db import models
from olc_webportalv2.users.models import User
from django.contrib.postgres.fields.jsonb import JSONField
import os
from django.core.exceptions import ValidationError
# Create your models here.
def validate_fastq(fieldfile):
filename = os.path.basename(fieldfile.name)
if filename.endswith('.fastq.gz') or filename.endswith('.fastq'):
print('File extension for {} confirmed valid'.format(filename))
else:
raise ValidationError(
_('%(file)s does not end with .fastq or .fastq.gz'),
params={'filename': filename},
)
class ProjectMulti(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE)
project_title = models.CharField(max_length=256)
description = models.CharField(max_length=200, blank=True)
date = models.DateTimeField(auto_now_add=True)
forward_id = models.CharField(max_length=256, default='_R1')
reverse_id = models.CharField(max_length=256, default='_R2')
def __str__(self):
return self.project_title
class Sample(models.Model):
project = models.ForeignKey(ProjectMulti, on_delete=models.CASCADE, related_name='samples')
file_R1 = models.FileField(upload_to='%Y%m%d%s', blank=True)
file_R2 = models.FileField(upload_to='%Y%m%d%s', blank=True)
file_fasta = models.FileField(upload_to='%Y%m%d%s', blank=True)
title = models.CharField(max_length=200, blank=True)
genesippr_status = models.CharField(max_length=128,
default="Unprocessed")
sendsketch_status = models.CharField(max_length=128,
default="Unprocessed")
confindr_status = models.CharField(max_length=128,
default="Unprocessed")
genomeqaml_status = models.CharField(max_length=128,
default="Unprocessed")
amr_status = models.CharField(max_length=128,
default="Unprocessed")
def __str__(self):
return self.title
class GenomeQamlResult(models.Model):
class Meta:
verbose_name_plural = "GenomeQAML Results"
sample = models.ForeignKey(Sample, on_delete=models.CASCADE, related_name='genomeqaml_result')
predicted_class = models.CharField(max_length=128, default='N/A')
percent_fail = models.CharField(max_length=128, default='N/A')
percent_pass = models.CharField(max_length=128, default='N/A')
percent_reference = models.CharField(max_length=118, default='N/A')
def __str__(self):
return '{}'.format(self.sample)
class SendsketchResult(models.Model):
class Meta:
verbose_name_plural = "Sendsketch Results"
def __str__(self):
return 'pk {}: Rank {}: Sample {}'.format(self.pk, self.rank, self.sample.pk)
sample = models.ForeignKey(Sample, on_delete=models.CASCADE)
rank = models.CharField(max_length=8, default='N/A')
wkid = models.CharField(max_length=256, default='N/A')
kid = models.CharField(max_length=256, default='N/A')
ani = models.CharField(max_length=256, default='N/A')
complt = models.CharField(max_length=256, default='N/A')
contam = models.CharField(max_length=256, default='N/A')
matches = models.CharField(max_length=256, default='N/A')
unique = models.CharField(max_length=256, default='N/A')
nohit = models.CharField(max_length=256, default='N/A')
taxid = models.CharField(max_length=256, default='N/A')
gsize = models.CharField(max_length=256, default='N/A')
gseqs = models.CharField(max_length=256, default='N/A')
taxname = models.CharField(max_length=256, default='N/A')
class GenesipprResults(models.Model):
# For admin panel
def __str__(self):
return '{}'.format(self.sample)
# TODO: Accomodate seqID
sample = models.ForeignKey(Sample, on_delete=models.CASCADE, related_name='genesippr_results')
# genesippr.csv
strain = models.CharField(max_length=256, default="N/A")
genus = models.CharField(max_length=256, default="N/A")
# STEC
serotype = models.CharField(max_length=256, default="N/A")
o26 = models.CharField(max_length=256, default="N/A")
o45 = models.CharField(max_length=256, default="N/A")
o103 = models.CharField(max_length=256, default="N/A")
o111 = models.CharField(max_length=256, default="N/A")
o121 = models.CharField(max_length=256, default="N/A")
o145 = models.CharField(max_length=256, default="N/A")
o157 = models.CharField(max_length=256, default="N/A")
uida = models.CharField(max_length=256, default="N/A")
eae = models.CharField(max_length=256, default="N/A")
eae_1 = models.CharField(max_length=256, default="N/A")
vt1 = models.CharField(max_length=256, default="N/A")
vt2 = models.CharField(max_length=256, default="N/A")
vt2f = models.CharField(max_length=256, default="N/A")
# listeria
igs = models.CharField(max_length=256, default="N/A")
hlya = models.CharField(max_length=256, default="N/A")
inlj = models.CharField(max_length=256, default="N/A")
# salmonella
inva = models.CharField(max_length=256, default="N/A")
stn = models.CharField(max_length=256, default="N/A")
def inva_number(self):
return float(self.inva.split('%')[0])
def uida_number(self):
return float(self.uida.split('%')[0])
def vt1_number(self):
return float(self.vt1.split('%')[0])
def vt2_number(self):
return float(self.vt2.split('%')[0])
def vt2f_number(self):
return float(self.vt2f.split('%')[0])
def eae_number(self):
return float(self.eae.split('%')[0])
def eae_1_number(self):
return float(self.eae_1.split('%')[0])
def hlya_number(self):
return float(self.hlya.split('%')[0])
def igs_number(self):
return float(self.igs.split('%')[0])
def inlj_number(self):
return float(self.inlj.split('%')[0])
class Meta:
verbose_name_plural = "Genesippr Results"
class GenesipprResultsSixteens(models.Model):
class Meta:
verbose_name_plural = "SixteenS Results"
def __str__(self):
return '{}'.format(self.sample)
sample = models.ForeignKey(Sample, on_delete=models.CASCADE, related_name='sixteens_results')
# sixteens_full.csv
strain = models.CharField(max_length=256, default="N/A")
gene = models.CharField(max_length=256, default="N/A")
percentidentity = models.CharField(max_length=256, default="N/A")
genus = models.CharField(max_length=256, default="N/A")
foldcoverage = models.CharField(max_length=256, default="N/A")
@property
def gi_accession(self):
# Split by | delimiter, pull second element which should be the GI#
gi_accession = self.gene.split('|')[1]
return gi_accession
class GenesipprResultsGDCS(models.Model):
class Meta:
verbose_name_plural = "GDCS Results"
def __str__(self):
return '{}'.format(self.sample)
sample = models.ForeignKey(Sample, on_delete=models.CASCADE, related_name='gdcs_results')
# GDCS.csv
strain = models.CharField(max_length=256, default="N/A")
genus = models.CharField(max_length=256, default="N/A")
matches = models.CharField(max_length=256, default="N/A")
meancoverage = models.CharField(max_length=128, default="N/A")
passfail = models.CharField(max_length=16, default="N/A")
allele_dict = JSONField(blank=True, null=True, default=dict)
class ConFindrResults(models.Model):
class Meta:
verbose_name_plural = 'Confindr Results'
def __str__(self):
return '{}'.format(self.sample)
sample = models.ForeignKey(Sample, on_delete=models.CASCADE, related_name='confindr_results')
strain = models.CharField(max_length=256, default="N/A")
genera_present = models.CharField(max_length=256, default="N/A")
contam_snvs = models.CharField(max_length=256, default="N/A")
contaminated = models.CharField(max_length=256, default="N/A")
class GenesipprResultsSerosippr(models.Model):
class Meta:
verbose_name_plural = "Serosippr Results"
def __str__(self):
return '{}'.format(self.sample)
sample = models.ForeignKey(Sample, on_delete=models.CASCADE)
class AMRResult(models.Model):
class Meta:
verbose_name_plural = 'AMR Results'
def __str__(self):
return '{}'.format(self.sample)
sample = models.ForeignKey(Sample, on_delete=models.CASCADE, related_name='amr_results')
results_dict = JSONField(blank=True, null=True, default=dict)
species = models.CharField(max_length=88, default='N/A')
| forestdussault/olc_webportalv2 | olc_webportalv2/new_multisample/models.py | Python | mit | 8,667 |
from django.test import TestCase
from django.urls import reverse
# Create your tests here.
from .models import Mineral
class MineralModelTests(TestCase):
def test_new_mineral_created(self):
mineral = Mineral.objects.create(
name = "Abelsonite",
image_filename = "240px-Abelsonite_-_Green_River_Formation%2C_Uintah_County%2C_Utah%2C_USA.jpg",
image_caption = "Abelsonite from the Green River Formation, Uintah County, Utah, US",
category = "Organic",
formula = "C<sub>31</sub>H<sub>32</sub>N<sub>4</sub>Ni",
strunz_classification = "10.CA.20",
crystal_system = "Triclinic",
unit_cell= "a = 8.508 Å, b = 11.185 Åc=7.299 Å, α = 90.85°β = 114.1°, γ = 79.99°Z = 1",
color = "Pink-purple, dark greyish purple, pale purplish red, reddish brown",
crystal_symmetry = "Space group: P1 or P1Point group: 1 or 1",
cleavage = "Probable on {111}",
mohs_scale_hardness = "2–3",
luster = "Adamantine, sub-metallic",
streak = "Pink",
diaphaneity = "Semitransparent",
optical_properties = "Biaxial",
group = "Organic Minerals"
)
self.assertIn(mineral, Mineral.objects.all())
class MineralViewTests(TestCase):
def setUp(self):
self.mineral = Mineral.objects.create(
name = "Abhurite",
image_filename = "240px-Abhurite_-_Shipwreck_Hydra%2C_South_coast_of_Norway.jpg",
image_caption = "Brownish tabular crystals of abhurite from Shipwreck \"Hydra\", South coast of Norway",
category = "Halide",
formula = "Sn<sub>21</sub>O<sub>6</sub>(OH)<sub>14</sub>Cl<sub>16</sub>",
strunz_classification = "03.DA.30",
crystal_symmetry = "Trigonal",
group = "Halides"
)
def test_minerals_view(self):
resp = self.client.get(reverse('minerals:mineral_list'))
self.assertEqual(resp.status_code,200)
self.assertIn(self.mineral, resp.context['minerals'])
self.assertTemplateUsed(resp, 'minerals/minerals.html')
def test_minerals_detail_view(self):
resp = self.client.get(reverse('minerals:mineral_detail',
kwargs={'pk':self.mineral.id }))
self.assertEqual(resp.status_code, 200)
self.assertEqual(self.mineral, resp.context['mineral'])
self.assertTemplateUsed(resp, 'minerals/mineral_detail.html')
def test_search_by_letter(self):
letter = "A".lower()
resp = self.client.get(reverse('minerals:search_by_letter',
kwargs={'letter':letter}))
self.assertEqual(resp.status_code,200)
self.assertIn(self.mineral,resp.context['minerals'])
self.assertTemplateUsed(resp,'minerals/minerals.html')
def test_search_by_text(self):
resp = self.client.post("/minerals/search/text/",{'search':'ab'})
self.assertEqual(resp.status_code,200)
self.assertIn(self.mineral,resp.context['minerals'])
self.assertTemplateUsed(resp,'minerals/minerals.html')
def test_search_by_group(self):
group = "Halides"
resp = self.client.get(reverse('minerals:search_by_group',
kwargs={'group':group}))
self.assertEqual(resp.status_code,200)
self.assertIn(self.mineral,resp.context['minerals'])
self.assertTemplateUsed(resp,'minerals/minerals.html')
| squadran2003/filtering-searching-mineral-catalogue | filtering-searching-mineral-catalogue/minerals/tests.py | Python | mit | 3,110 |
import logging
from flask import g
from flask_login import current_user
from app.globals import get_answer_store, get_answers, get_metadata, get_questionnaire_store
from app.questionnaire.path_finder import PathFinder
from app.schema.block import Block
from app.schema.exceptions import QuestionnaireException
logger = logging.getLogger(__name__)
def get_questionnaire_manager(schema, schema_json):
questionnaire_manager = g.get('_questionnaire_manager')
if questionnaire_manager is None:
questionnaire_manager = g._questionnaire_manager = QuestionnaireManager(schema, schema_json)
return questionnaire_manager
class QuestionnaireManager(object):
"""
This class represents a user journey through a survey. It models the request/response process of the web application
"""
def __init__(self, schema, json=None):
self._json = json
self._schema = schema
self.block_state = None
def validate(self, location, post_data, skip_mandatory_validation=False):
self.build_block_state(location, post_data)
return location.is_interstitial() or self.block_state.schema_item.validate(self.block_state, skip_mandatory_validation)
def validate_all_answers(self):
navigator = PathFinder(self._json, get_answer_store(current_user), get_metadata(current_user))
for location in navigator.get_location_path():
answers = get_answers(current_user)
is_valid = self.validate(location, answers)
if not is_valid:
logger.debug("Failed validation with current location %s", str(location))
return False, location
return True, None
def update_questionnaire_store(self, location):
questionnaire_store = self._add_update_answer_store(location)
if location not in questionnaire_store.completed_blocks:
questionnaire_store.completed_blocks.append(location)
def update_questionnaire_store_save_sign_out(self, location):
questionnaire_store = self._add_update_answer_store(location)
if location in questionnaire_store.completed_blocks:
questionnaire_store.completed_blocks.remove(location)
def _add_update_answer_store(self, location):
questionnaire_store = get_questionnaire_store(current_user.user_id, current_user.user_ik)
for answer in self.get_state_answers(location.block_id):
questionnaire_store.answer_store.add_or_update(answer.flatten())
return questionnaire_store
def process_incoming_answers(self, location, post_data):
logger.debug("Processing post data for %s", location)
is_valid = self.validate(location, post_data)
# run the validator to update the validation_store
if is_valid:
self.update_questionnaire_store(location)
return is_valid
def build_block_state(self, location, answers):
# Build the state from the answers
self.block_state = None
if self._schema.item_exists(location.block_id):
metadata = get_metadata(current_user)
block = self._schema.get_item_by_id(location.block_id)
if not isinstance(block, Block):
raise QuestionnaireException
self.block_state = block.construct_state()
for answer in self.get_state_answers(location.block_id):
answer.group_id = location.group_id
answer.group_instance = location.group_instance
self.block_state.update_state(answers)
self.block_state.set_skipped(get_answers(current_user), metadata)
def get_state_answers(self, item_id):
# get the answers from the state
if self._schema.item_exists(item_id):
return self.block_state.get_answers()
return []
def get_schema_item_by_id(self, item_id):
return self._schema.get_item_by_id(item_id)
def get_schema(self):
return self._schema
def add_answer(self, location, answer_store, question_id):
question_schema = self._schema.get_item_by_id(question_id)
question_state = self.block_state.find_state_item(question_schema)
for answer_schema in question_schema.answers:
next_answer_instance_id = self._get_next_answer_instance(answer_store, answer_schema.id)
new_answer_state = question_state.create_new_answer_state(answer_schema, next_answer_instance_id)
question_state.add_new_answer_state(new_answer_state)
self.update_questionnaire_store(location)
@staticmethod
def _get_next_answer_instance(answer_store, answer_id):
existing_answers = answer_store.filter(answer_id=answer_id)
last_answer = existing_answers[-1:]
next_answer_instance_id = 0 if len(last_answer) == 0 else int(last_answer[0]['answer_instance']) + 1
return next_answer_instance_id
def remove_answer(self, location, answer_store, index_to_remove):
state_answers = self.block_state.get_answers()
for state_answer in state_answers:
if state_answer.answer_instance == index_to_remove:
question = state_answer.parent
question.remove_answer(state_answer)
answer_store.remove_answer(state_answer.flatten())
self.update_questionnaire_store(location)
| qateam123/eq | app/questionnaire/questionnaire_manager.py | Python | mit | 5,368 |
# -*- coding: utf-8 -*-
"""The top-level package for ``django-mysqlpool``."""
# These imports make 2 act like 3, making it easier on us to switch to PyPy or
# some other VM if we need to for performance reasons.
from __future__ import (absolute_import, print_function, unicode_literals,
division)
# Make ``Foo()`` work the same in Python 2 as it does in Python 3.
__metaclass__ = type
import os
from django.conf import settings
from django.db.backends.mysql import base
from django.core.exceptions import ImproperlyConfigured
try:
import sqlalchemy.pool as pool
except ImportError as e:
raise ImproperlyConfigured("Error loading SQLAlchemy module: %s" % e)
# Global variable to hold the actual connection pool.
MYSQLPOOL = None
# Default pool type (QueuePool, SingletonThreadPool, AssertionPool, NullPool,
# StaticPool).
DEFAULT_BACKEND = 'QueuePool'
# Needs to be less than MySQL connection timeout (server setting). The default
# is 120, so default to 119.
DEFAULT_POOL_TIMEOUT = 119
def isiterable(value):
"""Determine whether ``value`` is iterable."""
try:
iter(value)
return True
except TypeError:
return False
class OldDatabaseProxy():
"""Saves a reference to the old connect function.
Proxies calls to its own connect() method to the old function.
"""
def __init__(self, old_connect):
"""Store ``old_connect`` to be used whenever we connect."""
self.old_connect = old_connect
def connect(self, **kwargs):
"""Delegate to the old ``connect``."""
# Bounce the call to the old function.
return self.old_connect(**kwargs)
class HashableDict(dict):
"""A dictionary that is hashable.
This is not generally useful, but created specifically to hold the ``conv``
parameter that needs to be passed to MySQLdb.
"""
def __hash__(self):
"""Calculate the hash of this ``dict``.
The hash is determined by converting to a sorted tuple of key-value
pairs and hashing that.
"""
items = [(n, tuple(v)) for n, v in self.items() if isiterable(v)]
return hash(tuple(items))
# Define this here so Django can import it.
DatabaseWrapper = base.DatabaseWrapper
# Wrap the old connect() function so our pool can call it.
OldDatabase = OldDatabaseProxy(base.Database.connect)
def get_pool():
"""Create one and only one pool using the configured settings."""
global MYSQLPOOL
if MYSQLPOOL is None:
backend_name = getattr(settings, 'MYSQLPOOL_BACKEND', DEFAULT_BACKEND)
backend = getattr(pool, backend_name)
kwargs = getattr(settings, 'MYSQLPOOL_ARGUMENTS', {})
kwargs.setdefault('poolclass', backend)
kwargs.setdefault('recycle', DEFAULT_POOL_TIMEOUT)
MYSQLPOOL = pool.manage(OldDatabase, **kwargs)
setattr(MYSQLPOOL, '_pid', os.getpid())
if getattr(MYSQLPOOL, '_pid', None) != os.getpid():
pool.clear_managers()
return MYSQLPOOL
def connect(**kwargs):
"""Obtain a database connection from the connection pool."""
# SQLAlchemy serializes the parameters to keep unique connection
# parameter groups in their own pool. We need to store certain
# values in a manner that is compatible with their serialization.
conv = kwargs.pop('conv', None)
ssl = kwargs.pop('ssl', None)
if conv:
kwargs['conv'] = HashableDict(conv)
if ssl:
kwargs['ssl'] = HashableDict(ssl)
# Open the connection via the pool.
return get_pool().connect(**kwargs)
# Monkey-patch the regular mysql backend to use our hacked-up connect()
# function.
base.Database.connect = connect
| smartfile/django-mysqlpool | django_mysqlpool/backends/mysqlpool/base.py | Python | mit | 3,693 |
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
# Converts A bunch of mac Screenshots to 24hour format
def to_24(string):
hour, minute, trail = string.split('.')
sec, period = trail.split(' ')
hour = int(hour)
minute = int(minute)
sec = int(sec)
is_pm = period.lower() == "pm"
if hour == 12:
if not is_pm:
hour += 12
elif is_pm:
hour += 12
return "%s.%s.%s" % (
str(hour).zfill(2),
str(minute).zfill(2),
str(sec).zfill(2),
)
if __name__ == '__main__':
from folder_list import FolderList
root = FolderList("/Users/Saevon/Pictures/Screenshots/Witch's House/")
for file in root:
# Check if the file's been renamed already
if "PM" not in file.name or "AM" not in file.name:
continue
# Convert to the new format
prefix, time = file.name.split(' at ')
suffix = to_24(time)
new_name = '%s at %s' % (prefix, suffix)
file.rename(new_name)
# print to_24("12.05.20 PM")
# print to_24("12.05.20 AM")
# print to_24("1.05.20 AM")
# print to_24("1.05.20 PM")
| Saevon/Recipes | oneshots/screenshot.py | Python | mit | 1,143 |
# (c) 2009-2015 Martin Wendt and contributors; see WsgiDAV https://github.com/mar10/wsgidav
# Original PyFileServer (c) 2005 Ho Chun Wei.
# Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
"""
WSGI container, that handles the HTTP requests. This object is passed to the
WSGI server and represents our WsgiDAV application to the outside.
On init:
Use the configuration dictionary to initialize lock manager, property manager,
domain controller.
Create a dictionary of share-to-provider mappings.
Initialize middleware objects and RequestResolver and setup the WSGI
application stack.
For every request:
Find the registered DAV provider for the current request.
Add or modify info in the WSGI ``environ``:
environ["SCRIPT_NAME"]
Mount-point of the current share.
environ["PATH_INFO"]
Resource path, relative to the mount path.
environ["wsgidav.provider"]
DAVProvider object that is registered for handling the current
request.
environ["wsgidav.config"]
Configuration dictionary.
environ["wsgidav.verbose"]
Debug level [0-3].
Log the HTTP request, then pass the request to the first middleware.
Note: The OPTIONS method for the '*' path is handled directly.
See `Developers info`_ for more information about the WsgiDAV architecture.
.. _`Developers info`: http://wsgidav.readthedocs.org/en/latest/develop.html
"""
from fs_dav_provider import FilesystemProvider
from wsgidav.dir_browser import WsgiDavDirBrowser
from wsgidav.dav_provider import DAVProvider
from wsgidav.addons.tracim.lock_storage import LockStorage
import time
import sys
import threading
import urllib
import util
from error_printer import ErrorPrinter
from debug_filter import WsgiDavDebugFilter
from http_authenticator import HTTPAuthenticator
from request_resolver import RequestResolver
from property_manager import PropertyManager
from lock_manager import LockManager
#from wsgidav.version import __version__
__docformat__ = "reStructuredText"
# Use these settings, if config file does not define them (or is totally missing)
DEFAULT_CONFIG = {
"mount_path": None, # Application root, e.g. <mount_path>/<share_name>/<res_path>
"provider_mapping": {},
"host": "localhost",
"port": 8080,
"ext_servers": [
# "paste",
# "cherrypy",
# "wsgiref",
"cherrypy-bundled",
"wsgidav",
],
"add_header_MS_Author_Via": True,
"unquote_path_info": False, # See #8
# "use_text_files": False,
"propsmanager": None, # True: use property_manager.PropertyManager
"locksmanager": True, # True: use lock_manager.LockManager
# HTTP Authentication Options
"user_mapping": {}, # dictionary of dictionaries
"domaincontroller": None, # None: domain_controller.WsgiDAVDomainController(user_mapping)
"acceptbasic": True, # Allow basic authentication, True or False
"acceptdigest": True, # Allow digest authentication, True or False
"defaultdigest": True, # True (default digest) or False (default basic)
# Error printer options
"catchall": False,
"enable_loggers": [
],
# Verbose Output
"verbose": 1, # 0 - no output (excepting application exceptions)
# 1 - show single line request summaries (for HTTP logging)
# 2 - show additional events
# 3 - show full request/response header info (HTTP Logging)
# request body and GET response bodies not shown
"dir_browser": {
"enable": True, # Render HTML listing for GET requests on collections
"response_trailer": "", # Raw HTML code, appended as footer
"davmount": False, # Send <dm:mount> response if request URL contains '?davmount'
"ms_mount": False, # Add an 'open as webfolder' link (requires Windows)
"ms_sharepoint_plugin": True, # Invoke MS Offce documents for editing using WebDAV
"ms_sharepoint_urls": False, # Prepend 'ms-word:ofe|u|' to URL for MS Offce documents
},
"middleware_stack": [
WsgiDavDirBrowser,
HTTPAuthenticator,
ErrorPrinter,
WsgiDavDebugFilter,
]
}
def _checkConfig(config):
mandatoryFields = ["provider_mapping",
]
for field in mandatoryFields:
if not field in config:
raise ValueError("Invalid configuration: missing required field '%s'" % field)
#===============================================================================
# WsgiDAVApp
#===============================================================================
class WsgiDAVApp(object):
def __init__(self, config):
self.config = config
util.initLogging(config["verbose"], config.get("enable_loggers", []))
util.log("Default encoding: %s (file system: %s)" % (sys.getdefaultencoding(), sys.getfilesystemencoding()))
# Evaluate configuration and set defaults
_checkConfig(config)
provider_mapping = self.config["provider_mapping"]
# response_trailer = config.get("response_trailer", "")
self._verbose = config.get("verbose", 2)
lockStorage = config.get("locksmanager")
if lockStorage is True:
lockStorage = LockStorage()
if not lockStorage:
locksManager = None
else:
locksManager = LockManager(lockStorage)
propsManager = config.get("propsmanager")
if not propsManager:
# Normalize False, 0 to None
propsManager = None
elif propsManager is True:
propsManager = PropertyManager()
mount_path = config.get("mount_path")
# Instantiate DAV resource provider objects for every share
self.providerMap = {}
for (share, provider) in provider_mapping.items():
# Make sure share starts with, or is, '/'
share = "/" + share.strip("/")
# We allow a simple string as 'provider'. In this case we interpret
# it as a file system root folder that is published.
if isinstance(provider, basestring):
provider = FilesystemProvider(provider)
assert isinstance(provider, DAVProvider)
provider.setSharePath(share)
if mount_path:
provider.setMountPath(mount_path)
# TODO: someday we may want to configure different lock/prop managers per provider
provider.setLockManager(locksManager)
provider.setPropManager(propsManager)
self.providerMap[share] = {"provider": provider, "allow_anonymous": False}
# Define WSGI application stack
application = RequestResolver()
domain_controller = None
dir_browser = config.get("dir_browser", {})
middleware_stack = config.get("middleware_stack", [])
# Replace WsgiDavDirBrowser to custom class for backward compatibility only
# In normal way you should insert it into middleware_stack
if dir_browser.get("enable", True) and "app_class" in dir_browser.keys():
config["middleware_stack"] = [m if m != WsgiDavDirBrowser else dir_browser['app_class'] for m in middleware_stack]
for mw in middleware_stack:
if mw.isSuitable(config):
if self._verbose >= 2:
print "Middleware %s is suitable" % mw
application = mw(application, config)
if issubclass(mw, HTTPAuthenticator):
domain_controller = application.getDomainController()
# check anonymous access
for share, data in self.providerMap.items():
if application.allowAnonymousAccess(share):
data['allow_anonymous'] = True
else:
if self._verbose >= 2:
print "Middleware %s is not suitable" % mw
# Print info
if self._verbose >= 2:
print "Using lock manager: %r" % locksManager
print "Using property manager: %r" % propsManager
print "Using domain controller: %s" % domain_controller
print "Registered DAV providers:"
for share, data in self.providerMap.items():
hint = " (anonymous)" if data['allow_anonymous'] else ""
print " Share '%s': %s%s" % (share, provider, hint)
if self._verbose >= 1:
for share, data in self.providerMap.items():
if data['allow_anonymous']:
# TODO: we should only warn here, if --no-auth is not given
print "WARNING: share '%s' will allow anonymous access." % share
self._application = application
def __call__(self, environ, start_response):
# util.log("SCRIPT_NAME='%s', PATH_INFO='%s'" % (environ.get("SCRIPT_NAME"), environ.get("PATH_INFO")))
# We optionall unquote PATH_INFO here, although this should already be
# done by the server (#8).
path = environ["PATH_INFO"]
if self.config.get("unquote_path_info", False):
path = urllib.unquote(environ["PATH_INFO"])
# GC issue 22: Pylons sends root as u'/'
if isinstance(path, unicode):
util.log("Got unicode PATH_INFO: %r" % path)
path = path.encode("utf8")
# Always adding these values to environ:
environ["wsgidav.config"] = self.config
environ["wsgidav.provider"] = None
environ["wsgidav.verbose"] = self._verbose
## Find DAV provider that matches the share
# sorting share list by reverse length
shareList = self.providerMap.keys()
shareList.sort(key=len, reverse=True)
share = None
for r in shareList:
# @@: Case sensitivity should be an option of some sort here;
# os.path.normpath might give the preferred case for a filename.
if r == "/":
share = r
break
elif path.upper() == r.upper() or path.upper().startswith(r.upper()+"/"):
share = r
break
share_data = self.providerMap.get(share)
# Note: we call the next app, even if provider is None, because OPTIONS
# must still be handled.
# All other requests will result in '404 Not Found'
environ["wsgidav.provider"] = share_data['provider']
# TODO: test with multi-level realms: 'aa/bb'
# TODO: test security: url contains '..'
# Transform SCRIPT_NAME and PATH_INFO
# (Since path and share are unquoted, this also fixes quoted values.)
if share == "/" or not share:
environ["PATH_INFO"] = path
else:
environ["SCRIPT_NAME"] += share
environ["PATH_INFO"] = path[len(share):]
# util.log("--> SCRIPT_NAME='%s', PATH_INFO='%s'" % (environ.get("SCRIPT_NAME"), environ.get("PATH_INFO")))
assert isinstance(path, str)
# See http://mail.python.org/pipermail/web-sig/2007-January/002475.html
# for some clarification about SCRIPT_NAME/PATH_INFO format
# SCRIPT_NAME starts with '/' or is empty
assert environ["SCRIPT_NAME"] == "" or environ["SCRIPT_NAME"].startswith("/")
# SCRIPT_NAME must not have a trailing '/'
assert environ["SCRIPT_NAME"] in ("", "/") or not environ["SCRIPT_NAME"].endswith("/")
# PATH_INFO starts with '/'
assert environ["PATH_INFO"] == "" or environ["PATH_INFO"].startswith("/")
start_time = time.time()
def _start_response_wrapper(status, response_headers, exc_info=None):
# Postprocess response headers
headerDict = {}
for header, value in response_headers:
if header.lower() in headerDict:
util.warn("Duplicate header in response: %s" % header)
headerDict[header.lower()] = value
# Check if we should close the connection after this request.
# http://www.w3.org/Protocols/rfc2616/rfc2616-sec4.html#sec4.4
forceCloseConnection = False
currentContentLength = headerDict.get("content-length")
statusCode = int(status.split(" ", 1)[0])
contentLengthRequired = (environ["REQUEST_METHOD"] != "HEAD"
and statusCode >= 200
and not statusCode in (204, 304))
# print environ["REQUEST_METHOD"], statusCode, contentLengthRequired
if contentLengthRequired and currentContentLength in (None, ""):
# A typical case: a GET request on a virtual resource, for which
# the provider doesn't know the length
util.warn("Missing required Content-Length header in %s-response: closing connection" % statusCode)
forceCloseConnection = True
elif not type(currentContentLength) is str:
util.warn("Invalid Content-Length header in response (%r): closing connection" % headerDict.get("content-length"))
forceCloseConnection = True
# HOTFIX for Vista and Windows 7 (GC issue 13, issue 23)
# It seems that we must read *all* of the request body, otherwise
# clients may miss the response.
# For example Vista MiniRedir didn't understand a 401 response,
# when trying an anonymous PUT of big files. As a consequence, it
# doesn't retry with credentials and the file copy fails.
# (XP is fine however).
util.readAndDiscardInput(environ)
# Make sure the socket is not reused, unless we are 100% sure all
# current input was consumed
if(util.getContentLength(environ) != 0
and not environ.get("wsgidav.all_input_read")):
util.warn("Input stream not completely consumed: closing connection")
forceCloseConnection = True
if forceCloseConnection and headerDict.get("connection") != "close":
util.warn("Adding 'Connection: close' header")
response_headers.append(("Connection", "close"))
# Log request
if self._verbose >= 1:
userInfo = environ.get("http_authenticator.username")
if not userInfo:
userInfo = "(anonymous)"
threadInfo = ""
if self._verbose >= 1:
threadInfo = "<%s> " % threading._get_ident()
extra = []
if "HTTP_DESTINATION" in environ:
extra.append('dest="%s"' % environ.get("HTTP_DESTINATION"))
if environ.get("CONTENT_LENGTH", "") != "":
extra.append("length=%s" % environ.get("CONTENT_LENGTH"))
if "HTTP_DEPTH" in environ:
extra.append("depth=%s" % environ.get("HTTP_DEPTH"))
if "HTTP_RANGE" in environ:
extra.append("range=%s" % environ.get("HTTP_RANGE"))
if "HTTP_OVERWRITE" in environ:
extra.append("overwrite=%s" % environ.get("HTTP_OVERWRITE"))
if self._verbose >= 1 and "HTTP_EXPECT" in environ:
extra.append('expect="%s"' % environ.get("HTTP_EXPECT"))
if self._verbose >= 2 and "HTTP_CONNECTION" in environ:
extra.append('connection="%s"' % environ.get("HTTP_CONNECTION"))
if self._verbose >= 2 and "HTTP_USER_AGENT" in environ:
extra.append('agent="%s"' % environ.get("HTTP_USER_AGENT"))
if self._verbose >= 2 and "HTTP_TRANSFER_ENCODING" in environ:
extra.append('transfer-enc=%s' % environ.get("HTTP_TRANSFER_ENCODING"))
if self._verbose >= 1:
extra.append('elap=%.3fsec' % (time.time() - start_time))
extra = ", ".join(extra)
# This is the CherryPy format:
# 127.0.0.1 - - [08/Jul/2009:17:25:23] "GET /loginPrompt?redirect=/renderActionList%3Frelation%3Dpersonal%26key%3D%26filter%3DprivateSchedule&reason=0 HTTP/1.1" 200 1944 "http://127.0.0.1:8002/command?id=CMD_Schedule" "Mozilla/5.0 (Windows; U; Windows NT 6.0; de; rv:1.9.1) Gecko/20090624 Firefox/3.5"
# print >>sys.stderr, '%s - %s - [%s] "%s" %s -> %s' % (
print >>sys.stdout, '%s - %s - [%s] "%s" %s -> %s' % (
threadInfo + environ.get("REMOTE_ADDR",""),
userInfo,
util.getLogTime(),
environ.get("REQUEST_METHOD") + " " + environ.get("PATH_INFO", ""),
extra,
status,
# response_headers.get(""), # response Content-Length
# referer
)
return start_response(status, response_headers, exc_info)
# Call next middleware
app_iter = self._application(environ, _start_response_wrapper)
for v in app_iter:
yield v
if hasattr(app_iter, "close"):
app_iter.close()
return
| tracim/tracim-webdav | wsgidav/wsgidav_app.py | Python | mit | 18,592 |
#!/usr/bin/env python2.7
# -*- coding:utf-8 -*-
#
# Copyright (c) 2017 by Tsuyoshi Hamada. All rights reserved.
#
import os
import logging as LG
import random
import commands
import shelve
import pickle
import sys
import hashlib
import re as REGEXP
# -- set encode for your terminal --
config_term_encode = 'euc-jp'
# -- set filename for your database --
config_db_filename = '/t m p/g i t commit- '
def get_logger(str_position = ''):
log_basename = __file__
# Don't use Python's hasattr()
# unless you're writing Python 3-only code
# and understand how it works.
if getattr(get_logger, "__count_called", None) is not None:
log_basename = "%s @%s" % (__file__, str_position)
get_logger.__count_called = get_logger.__count_called + 1
'''
print "----------------- %d times called!!" % (get_logger.__count_called)
'''
else:
get_logger.__count_called = 1
'''
print "----------------- first time called!!"
'''
# create logger
logger = LG.getLogger(os.path.basename(log_basename))
logger.setLevel(LG.DEBUG)
# create console handler and set level to debug
ch = LG.StreamHandler()
ch.setLevel(LG.DEBUG)
# create formatter
formatter = LG.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
# add formatter to ch
ch.setFormatter(formatter)
# add ch to logger
logger.addHandler(ch)
# 'application' code
## logger.debug('debug message')
## logger.info('info message')
## logger.warn('warn message')
## logger.error('error message')
## logger.critical('critical message')
return logger
def get_quotes():
result = [ ]
result.append(u"生きる -- 谷川俊太郎")
# --
result.append(u"生きているということ")
result.append(u"いま生きているということ")
result.append(u"それはのどがかわくということ")
result.append(u"木漏れ日がまぶしいということ")
result.append(u"ふっと或るメロディを思い出すということ")
result.append(u"くしゃみをすること")
result.append(u"あなたと手をつなぐこと")
# --
result.append(u"生きているということ")
result.append(u"いま生きているということ")
result.append(u"それはミニスカート")
result.append(u"それはプラネタリウム")
result.append(u"それはヨハン・シュトラウス")
result.append(u"それはピカソ")
result.append(u"それはアルプス")
result.append(u"すべての美しいものに出会うということ")
result.append(u"そして")
result.append(u"かくされた悪を注意深くこばむこと")
# --
result.append(u"生きているということ")
result.append(u"いま生きているということ")
result.append(u"泣けるということ")
result.append(u"笑えるということ")
result.append(u"怒れるということ")
result.append(u"自由ということ")
# --
result.append(u"生きているということ")
result.append(u"いま生きているということ")
result.append(u"いま遠くで犬が吠えるということ")
result.append(u"いま地球が廻っているということ")
result.append(u"いまどこかで産声があがるということ")
result.append(u"いまどこかで兵士が傷つくということ")
result.append(u"いまぶらんこがゆれているということ")
result.append(u"いまいまがすぎてゆくこと")
# --
result.append(u"生きているということ")
result.append(u"いま生きてるということ")
result.append(u"鳥ははばたくということ")
result.append(u"海はとどろくということ")
result.append(u"かたつむりははうということ")
result.append(u"人は愛するということ")
result.append(u"あなたの手のぬくみ")
result.append(u"いのちということ")
result.append(u":-) ;-)")
return result
def get_shelve(fname, logger=None):
if logger is None: logger = get_logger('get_shelve()')
keyname = 'count'
pickle_protocol = pickle.HIGHEST_PROTOCOL
try :
dic = shelve.open(fname, protocol=pickle_protocol)
except Exception as e:
logger.error(e)
logger.error(fname)
sys.exit(-1)
keys = dic.keys()
if keyname not in keys: dic[keyname] = 0
count = dic[keyname]
dic[keyname] = count + 1
dic.close()
return count
def do_uncompress(filename, logger=None):
if logger is None: logger = get_logger('do_uncompress()')
check = commands.getoutput("hostname;time bzip2 -d %s.db.bz2" % filename )
# logger.debug("%s", check)
return True
def do_compress(filename, logger=None):
if logger is None: logger = get_logger('do_compress()')
check = commands.getoutput("hostname;time bzip2 -9 %s.db" % filename )
# logger.debug("%s", check)
return True
def get_id_git(logger=None):
if logger is None: logger = get_logger('get_id_git()')
check = commands.getoutput("git remote -v")
# logger.debug(check)
md5 = hashlib.md5()
md5.update(check)
md5sum = md5.hexdigest()
# logger.debug(md5sum)
return md5sum
def cut_space_str(str):
return REGEXP.sub(r' +', '', str)
if __name__ == "__main__":
msg = ''
logger = get_logger()
md5sum = get_id_git()
db_filename = cut_space_str(config_db_filename + md5sum)
do_uncompress(db_filename)
count = get_shelve(db_filename)
do_compress(db_filename)
qs = get_quotes()
msg = ("%d: %s" % (count+1, qs[count % len(qs)]))
logger.info('# %s', db_filename.encode(config_term_encode))
logger.info('# %s', msg.encode(config_term_encode))
cmd = 'git commit -m "' + msg + '"; git push origin master;'
print cmd.encode(config_term_encode)
| thamada/tool-private | commi.py | Python | mit | 5,969 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.service_client import ServiceClient
from msrest import Serializer, Deserializer
from msrestazure import AzureConfiguration
from .version import VERSION
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from msrestazure.azure_operation import AzureOperationPoller
import uuid
from .operations.application_gateways_operations import ApplicationGatewaysOperations
from .operations.available_endpoint_services_operations import AvailableEndpointServicesOperations
from .operations.express_route_circuit_authorizations_operations import ExpressRouteCircuitAuthorizationsOperations
from .operations.express_route_circuit_peerings_operations import ExpressRouteCircuitPeeringsOperations
from .operations.express_route_circuits_operations import ExpressRouteCircuitsOperations
from .operations.express_route_service_providers_operations import ExpressRouteServiceProvidersOperations
from .operations.load_balancers_operations import LoadBalancersOperations
from .operations.load_balancer_backend_address_pools_operations import LoadBalancerBackendAddressPoolsOperations
from .operations.load_balancer_frontend_ip_configurations_operations import LoadBalancerFrontendIPConfigurationsOperations
from .operations.inbound_nat_rules_operations import InboundNatRulesOperations
from .operations.load_balancer_load_balancing_rules_operations import LoadBalancerLoadBalancingRulesOperations
from .operations.load_balancer_network_interfaces_operations import LoadBalancerNetworkInterfacesOperations
from .operations.load_balancer_probes_operations import LoadBalancerProbesOperations
from .operations.network_interfaces_operations import NetworkInterfacesOperations
from .operations.network_interface_ip_configurations_operations import NetworkInterfaceIPConfigurationsOperations
from .operations.network_interface_load_balancers_operations import NetworkInterfaceLoadBalancersOperations
from .operations.network_security_groups_operations import NetworkSecurityGroupsOperations
from .operations.security_rules_operations import SecurityRulesOperations
from .operations.default_security_rules_operations import DefaultSecurityRulesOperations
from .operations.network_watchers_operations import NetworkWatchersOperations
from .operations.packet_captures_operations import PacketCapturesOperations
from .operations.public_ip_addresses_operations import PublicIPAddressesOperations
from .operations.route_filters_operations import RouteFiltersOperations
from .operations.route_filter_rules_operations import RouteFilterRulesOperations
from .operations.route_tables_operations import RouteTablesOperations
from .operations.routes_operations import RoutesOperations
from .operations.bgp_service_communities_operations import BgpServiceCommunitiesOperations
from .operations.usages_operations import UsagesOperations
from .operations.virtual_networks_operations import VirtualNetworksOperations
from .operations.subnets_operations import SubnetsOperations
from .operations.virtual_network_peerings_operations import VirtualNetworkPeeringsOperations
from .operations.virtual_network_gateways_operations import VirtualNetworkGatewaysOperations
from .operations.virtual_network_gateway_connections_operations import VirtualNetworkGatewayConnectionsOperations
from .operations.local_network_gateways_operations import LocalNetworkGatewaysOperations
from . import models
class NetworkManagementClientConfiguration(AzureConfiguration):
"""Configuration for NetworkManagementClient
Note that all parameters used to create this instance are saved as instance
attributes.
:param credentials: Credentials needed for the client to connect to Azure.
:type credentials: :mod:`A msrestazure Credentials
object<msrestazure.azure_active_directory>`
:param subscription_id: The subscription credentials which uniquely
identify the Microsoft Azure subscription. The subscription ID forms part
of the URI for every service call.
:type subscription_id: str
:param str base_url: Service URL
"""
def __init__(
self, credentials, subscription_id, base_url=None):
if credentials is None:
raise ValueError("Parameter 'credentials' must not be None.")
if subscription_id is None:
raise ValueError("Parameter 'subscription_id' must not be None.")
if not base_url:
base_url = 'https://management.azure.com'
super(NetworkManagementClientConfiguration, self).__init__(base_url)
self.add_user_agent('azure-mgmt-network/{}'.format(VERSION))
self.add_user_agent('Azure-SDK-For-Python')
self.credentials = credentials
self.subscription_id = subscription_id
class NetworkManagementClient(object):
"""Network Client
:ivar config: Configuration for client.
:vartype config: NetworkManagementClientConfiguration
:ivar application_gateways: ApplicationGateways operations
:vartype application_gateways: azure.mgmt.network.v2017_06_01.operations.ApplicationGatewaysOperations
:ivar available_endpoint_services: AvailableEndpointServices operations
:vartype available_endpoint_services: azure.mgmt.network.v2017_06_01.operations.AvailableEndpointServicesOperations
:ivar express_route_circuit_authorizations: ExpressRouteCircuitAuthorizations operations
:vartype express_route_circuit_authorizations: azure.mgmt.network.v2017_06_01.operations.ExpressRouteCircuitAuthorizationsOperations
:ivar express_route_circuit_peerings: ExpressRouteCircuitPeerings operations
:vartype express_route_circuit_peerings: azure.mgmt.network.v2017_06_01.operations.ExpressRouteCircuitPeeringsOperations
:ivar express_route_circuits: ExpressRouteCircuits operations
:vartype express_route_circuits: azure.mgmt.network.v2017_06_01.operations.ExpressRouteCircuitsOperations
:ivar express_route_service_providers: ExpressRouteServiceProviders operations
:vartype express_route_service_providers: azure.mgmt.network.v2017_06_01.operations.ExpressRouteServiceProvidersOperations
:ivar load_balancers: LoadBalancers operations
:vartype load_balancers: azure.mgmt.network.v2017_06_01.operations.LoadBalancersOperations
:ivar load_balancer_backend_address_pools: LoadBalancerBackendAddressPools operations
:vartype load_balancer_backend_address_pools: azure.mgmt.network.v2017_06_01.operations.LoadBalancerBackendAddressPoolsOperations
:ivar load_balancer_frontend_ip_configurations: LoadBalancerFrontendIPConfigurations operations
:vartype load_balancer_frontend_ip_configurations: azure.mgmt.network.v2017_06_01.operations.LoadBalancerFrontendIPConfigurationsOperations
:ivar inbound_nat_rules: InboundNatRules operations
:vartype inbound_nat_rules: azure.mgmt.network.v2017_06_01.operations.InboundNatRulesOperations
:ivar load_balancer_load_balancing_rules: LoadBalancerLoadBalancingRules operations
:vartype load_balancer_load_balancing_rules: azure.mgmt.network.v2017_06_01.operations.LoadBalancerLoadBalancingRulesOperations
:ivar load_balancer_network_interfaces: LoadBalancerNetworkInterfaces operations
:vartype load_balancer_network_interfaces: azure.mgmt.network.v2017_06_01.operations.LoadBalancerNetworkInterfacesOperations
:ivar load_balancer_probes: LoadBalancerProbes operations
:vartype load_balancer_probes: azure.mgmt.network.v2017_06_01.operations.LoadBalancerProbesOperations
:ivar network_interfaces: NetworkInterfaces operations
:vartype network_interfaces: azure.mgmt.network.v2017_06_01.operations.NetworkInterfacesOperations
:ivar network_interface_ip_configurations: NetworkInterfaceIPConfigurations operations
:vartype network_interface_ip_configurations: azure.mgmt.network.v2017_06_01.operations.NetworkInterfaceIPConfigurationsOperations
:ivar network_interface_load_balancers: NetworkInterfaceLoadBalancers operations
:vartype network_interface_load_balancers: azure.mgmt.network.v2017_06_01.operations.NetworkInterfaceLoadBalancersOperations
:ivar network_security_groups: NetworkSecurityGroups operations
:vartype network_security_groups: azure.mgmt.network.v2017_06_01.operations.NetworkSecurityGroupsOperations
:ivar security_rules: SecurityRules operations
:vartype security_rules: azure.mgmt.network.v2017_06_01.operations.SecurityRulesOperations
:ivar default_security_rules: DefaultSecurityRules operations
:vartype default_security_rules: azure.mgmt.network.v2017_06_01.operations.DefaultSecurityRulesOperations
:ivar network_watchers: NetworkWatchers operations
:vartype network_watchers: azure.mgmt.network.v2017_06_01.operations.NetworkWatchersOperations
:ivar packet_captures: PacketCaptures operations
:vartype packet_captures: azure.mgmt.network.v2017_06_01.operations.PacketCapturesOperations
:ivar public_ip_addresses: PublicIPAddresses operations
:vartype public_ip_addresses: azure.mgmt.network.v2017_06_01.operations.PublicIPAddressesOperations
:ivar route_filters: RouteFilters operations
:vartype route_filters: azure.mgmt.network.v2017_06_01.operations.RouteFiltersOperations
:ivar route_filter_rules: RouteFilterRules operations
:vartype route_filter_rules: azure.mgmt.network.v2017_06_01.operations.RouteFilterRulesOperations
:ivar route_tables: RouteTables operations
:vartype route_tables: azure.mgmt.network.v2017_06_01.operations.RouteTablesOperations
:ivar routes: Routes operations
:vartype routes: azure.mgmt.network.v2017_06_01.operations.RoutesOperations
:ivar bgp_service_communities: BgpServiceCommunities operations
:vartype bgp_service_communities: azure.mgmt.network.v2017_06_01.operations.BgpServiceCommunitiesOperations
:ivar usages: Usages operations
:vartype usages: azure.mgmt.network.v2017_06_01.operations.UsagesOperations
:ivar virtual_networks: VirtualNetworks operations
:vartype virtual_networks: azure.mgmt.network.v2017_06_01.operations.VirtualNetworksOperations
:ivar subnets: Subnets operations
:vartype subnets: azure.mgmt.network.v2017_06_01.operations.SubnetsOperations
:ivar virtual_network_peerings: VirtualNetworkPeerings operations
:vartype virtual_network_peerings: azure.mgmt.network.v2017_06_01.operations.VirtualNetworkPeeringsOperations
:ivar virtual_network_gateways: VirtualNetworkGateways operations
:vartype virtual_network_gateways: azure.mgmt.network.v2017_06_01.operations.VirtualNetworkGatewaysOperations
:ivar virtual_network_gateway_connections: VirtualNetworkGatewayConnections operations
:vartype virtual_network_gateway_connections: azure.mgmt.network.v2017_06_01.operations.VirtualNetworkGatewayConnectionsOperations
:ivar local_network_gateways: LocalNetworkGateways operations
:vartype local_network_gateways: azure.mgmt.network.v2017_06_01.operations.LocalNetworkGatewaysOperations
:param credentials: Credentials needed for the client to connect to Azure.
:type credentials: :mod:`A msrestazure Credentials
object<msrestazure.azure_active_directory>`
:param subscription_id: The subscription credentials which uniquely
identify the Microsoft Azure subscription. The subscription ID forms part
of the URI for every service call.
:type subscription_id: str
:param str base_url: Service URL
"""
def __init__(
self, credentials, subscription_id, base_url=None):
self.config = NetworkManagementClientConfiguration(credentials, subscription_id, base_url)
self._client = ServiceClient(self.config.credentials, self.config)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
self.application_gateways = ApplicationGatewaysOperations(
self._client, self.config, self._serialize, self._deserialize)
self.available_endpoint_services = AvailableEndpointServicesOperations(
self._client, self.config, self._serialize, self._deserialize)
self.express_route_circuit_authorizations = ExpressRouteCircuitAuthorizationsOperations(
self._client, self.config, self._serialize, self._deserialize)
self.express_route_circuit_peerings = ExpressRouteCircuitPeeringsOperations(
self._client, self.config, self._serialize, self._deserialize)
self.express_route_circuits = ExpressRouteCircuitsOperations(
self._client, self.config, self._serialize, self._deserialize)
self.express_route_service_providers = ExpressRouteServiceProvidersOperations(
self._client, self.config, self._serialize, self._deserialize)
self.load_balancers = LoadBalancersOperations(
self._client, self.config, self._serialize, self._deserialize)
self.load_balancer_backend_address_pools = LoadBalancerBackendAddressPoolsOperations(
self._client, self.config, self._serialize, self._deserialize)
self.load_balancer_frontend_ip_configurations = LoadBalancerFrontendIPConfigurationsOperations(
self._client, self.config, self._serialize, self._deserialize)
self.inbound_nat_rules = InboundNatRulesOperations(
self._client, self.config, self._serialize, self._deserialize)
self.load_balancer_load_balancing_rules = LoadBalancerLoadBalancingRulesOperations(
self._client, self.config, self._serialize, self._deserialize)
self.load_balancer_network_interfaces = LoadBalancerNetworkInterfacesOperations(
self._client, self.config, self._serialize, self._deserialize)
self.load_balancer_probes = LoadBalancerProbesOperations(
self._client, self.config, self._serialize, self._deserialize)
self.network_interfaces = NetworkInterfacesOperations(
self._client, self.config, self._serialize, self._deserialize)
self.network_interface_ip_configurations = NetworkInterfaceIPConfigurationsOperations(
self._client, self.config, self._serialize, self._deserialize)
self.network_interface_load_balancers = NetworkInterfaceLoadBalancersOperations(
self._client, self.config, self._serialize, self._deserialize)
self.network_security_groups = NetworkSecurityGroupsOperations(
self._client, self.config, self._serialize, self._deserialize)
self.security_rules = SecurityRulesOperations(
self._client, self.config, self._serialize, self._deserialize)
self.default_security_rules = DefaultSecurityRulesOperations(
self._client, self.config, self._serialize, self._deserialize)
self.network_watchers = NetworkWatchersOperations(
self._client, self.config, self._serialize, self._deserialize)
self.packet_captures = PacketCapturesOperations(
self._client, self.config, self._serialize, self._deserialize)
self.public_ip_addresses = PublicIPAddressesOperations(
self._client, self.config, self._serialize, self._deserialize)
self.route_filters = RouteFiltersOperations(
self._client, self.config, self._serialize, self._deserialize)
self.route_filter_rules = RouteFilterRulesOperations(
self._client, self.config, self._serialize, self._deserialize)
self.route_tables = RouteTablesOperations(
self._client, self.config, self._serialize, self._deserialize)
self.routes = RoutesOperations(
self._client, self.config, self._serialize, self._deserialize)
self.bgp_service_communities = BgpServiceCommunitiesOperations(
self._client, self.config, self._serialize, self._deserialize)
self.usages = UsagesOperations(
self._client, self.config, self._serialize, self._deserialize)
self.virtual_networks = VirtualNetworksOperations(
self._client, self.config, self._serialize, self._deserialize)
self.subnets = SubnetsOperations(
self._client, self.config, self._serialize, self._deserialize)
self.virtual_network_peerings = VirtualNetworkPeeringsOperations(
self._client, self.config, self._serialize, self._deserialize)
self.virtual_network_gateways = VirtualNetworkGatewaysOperations(
self._client, self.config, self._serialize, self._deserialize)
self.virtual_network_gateway_connections = VirtualNetworkGatewayConnectionsOperations(
self._client, self.config, self._serialize, self._deserialize)
self.local_network_gateways = LocalNetworkGatewaysOperations(
self._client, self.config, self._serialize, self._deserialize)
def check_dns_name_availability(
self, location, domain_name_label=None, custom_headers=None, raw=False, **operation_config):
"""Checks whether a domain name in the cloudapp.net zone is available for
use.
:param location: The location of the domain name.
:type location: str
:param domain_name_label: The domain name to be verified. It must
conform to the following regular expression:
^[a-z][a-z0-9-]{1,61}[a-z0-9]$.
:type domain_name_label: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: DnsNameAvailabilityResult or ClientRawResponse if raw=true
:rtype:
~azure.mgmt.network.v2017_06_01.models.DnsNameAvailabilityResult or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
api_version = "2017-06-01"
# Construct URL
url = '/subscriptions/{subscriptionId}/providers/Microsoft.Network/locations/{location}/CheckDnsNameAvailability'
path_format_arguments = {
'location': self._serialize.url("location", location, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if domain_name_label is not None:
query_parameters['domainNameLabel'] = self._serialize.query("domain_name_label", domain_name_label, 'str')
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('DnsNameAvailabilityResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
| AutorestCI/azure-sdk-for-python | azure-mgmt-network/azure/mgmt/network/v2017_06_01/network_management_client.py | Python | mit | 20,421 |
import os
import pdb
import functools
import glob
from core import models
from core.ali.oss import BUCKET, oss2key, is_object_exists, is_size_differ_and_newer, is_source_newer, is_md5_differ
from core.misc import *
from colorMessage import dyeWARNING, dyeFAIL
from sqlalchemy import create_engine, UniqueConstraint
from sqlalchemy.orm import sessionmaker
from sqlalchemy.exc import IntegrityError
class DB(object):
def __init__(self, db_path, pipe_path, apps, parameters, dependencies, overwrite=False):
super(DB, self).__init__()
self.db_path = db_path
self.proj_path = os.path.dirname(db_path)
self.pipe_path = pipe_path
self.apps = apps
self.parameters = parameters
self.modules = self.trim_parameters(parameters)
self.dependencies = dependencies
self.engine = create_engine('sqlite:///{db_path}'.format(db_path=self.db_path))
self.proj = None
if overwrite and os.path.exists(self.db_path):
os.remove(self.db_path)
if not os.path.exists(self.db_path):
models.Base.metadata.create_all(self.engine)
Session = sessionmaker(bind=self.engine)
self.session = Session()
def trim_parameters(self, parameters):
skips = ['Inputs', 'Property', 'Parameters', 'CommonData', 'Samples', 'Groups', 'Miscellaneous', 'Outputs', 'Conditions', 'CommonParameters']
return {k:v for k, v in parameters.iteritems() if k not in skips}
def format(self):
self.mkProj()
self.mkInstance()
map(self.mkModule, self.modules.keys())
if self.db_path != ':memory:':
self.mkDepends()
self.session.commit()
def add(self):
snap_db_list = os.path.expanduser("~/.snap/db.yaml")
db_list = {}
if os.path.exists(snap_db_list):
db_list = loadYaml(snap_db_list)
if db_list is None:
db_list = {}
contract_id = self.parameters['CommonParameters']['ContractID']
db_list[contract_id] = self.db_path
dumpYaml(snap_db_list, db_list)
def mkProj(self):
commom_parameters = self.parameters['CommonParameters']
self.proj = models.Project(
name = commom_parameters['ContractID'],
description = commom_parameters['project_description'],
type = commom_parameters.get('BACKEND', models.BCS),
pipe = self.pipe_path,
path = commom_parameters.get('WORKSPACE', './'),
max_job = commom_parameters.get('MAX_JOB', 50),
mns = commom_parameters.get('MNS') )
self.session.add(self.proj)
self.session.commit()
def mkModule(self, module_name):
module = models.Module(name = module_name)
self.session.commit()
for appname in self.modules[module_name].keys():
if appname in self.apps:
self.mkApp(self.apps[appname], module)
def mkInstance(self):
instance_list = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'instance.txt')
instances = []
with open(instance_list, 'r') as instance_file:
for line in instance_file:
(Name, CPU, MEM, DiskType, DiskSize, Price) = line.strip().split('\t')
instances.append(models.Instance(
name=Name, cpu=CPU, mem=MEM, price=Price,
disk_type=DiskType, disk_size=DiskSize) )
self.session.add_all(instances)
self.session.commit()
def chooseInstance(self, app):
instance_id = getAppConfig(app, ['requirements', 'instance', 'id'])
(cpu, mem, disk_size, disk_type) = map(functools.partial(getResourceConfig, app=app), ['cpu', 'mem', 'disk', 'disk_type'])
if instance_id is None:
instance = self.session.query(models.Instance). \
filter( models.Instance.cpu >= cpu ). \
filter( models.Instance.mem >= unifyUnit(mem) ). \
order_by( models.Instance.price ).first()
else:
instance = self.session.query(models.Instance).filter_by(name = instance_id).one()
if instance is None:
raise LookupError("No proper instance found!")
return instance
def mkApp(self, app, module):
def mkTask(script):
def mkMapping(mapping):
m = models.Mapping(
name = mapping['name'],
source = mapping['source'],
destination = mapping['destination'],
is_write = mapping['is_write'],
is_immediate = mapping['is_immediate'],
is_required = mapping['is_required'])
try:
self.session.add(m)
self.session.commit()
except IntegrityError:
self.session.rollback()
m = self.session.query(models.Mapping).filter_by(
name = mapping['name'],
source = mapping['source'],
destination = mapping['destination'],
is_write = mapping['is_write'],
is_immediate = mapping['is_immediate']).one()
return m
script['task'] = models.Task(
shell = os.path.abspath(script['filename']),
cpu = cpu,
mem = unifyUnit(mem),
docker_image = app.docker_image,
disk_size = unifyUnit(disk_size),
disk_type = disk_type,
project = self.proj,
module = module,
app = app,
mapping = map(mkMapping, script['mappings']),
instance = instance)
try:
self.session.add(script['task'])
self.session.commit()
except IntegrityError:
self.session.rollback()
print dyeWARNING("'{sh}' not unique".format(sh=script['filename']))
mem = getAppConfig(app, ['requirements', 'resources', 'mem'])
(cpu, mem, disk_size, disk_type) = map(functools.partial(getResourceConfig, app=app), ['cpu', 'mem', 'disk', 'disk_type'])
instance = self.chooseInstance(app)
scripts = [s for s in app.scripts if s['module'] == module.name]
app = models.App(
name = app.appname,
alias = getAppConfig(app, ['name']),
docker_image = getAppConfig(app, ['requirements', 'container', 'image']),
instance_image = getAppConfig(app, ['requirements', 'instance', 'image']),
yaml = app.config_file,
cpu = cpu,
mem = unifyUnit(mem),
disk_size = unifyUnit(disk_size),
disk_type = disk_type,
module = module,
instance = instance)
self.session.add(app)
self.session.commit()
map(mkTask, scripts)
def mkDepends(self):
def mkCombTaskDepends(tasks, dep_tasks):
for task in tasks:
for dep_task in dep_tasks:
task.depend_on.append(dep_task)
self.session.commit()
def mkSampleTaskDepends(app, module, dep_app, dep_module):
def mkEachSampleTaskDepends(sample_name):
tasks = getSampleTask(app, module, sample_name)
dep_tasks = getSampleTask(dep_app, dep_module, sample_name)
mkCombTaskDepends(tasks, dep_tasks)
map(mkEachSampleTaskDepends, [sample['sample_name'] for sample in self.parameters['Samples']])
def mkAppDepends(app, module_name, depends):
for dep_appname in depends[app.name]['depends']:
if dep_appname not in apps_in_param:
print dyeWARNING("{appname}: skipping dependence app {dep_appname} since it's not in parameters.conf".format(appname=app.name, dep_appname=dep_appname))
continue
if dep_appname in depends:
dep_module_name = module_name
dep_module = self.session.query(models.Module).filter_by(name = dep_module_name).one()
dep_app = self.session.query(models.App).filter_by(name = dep_appname).filter_by(module_id = dep_module.id).one()
else:
dep_module_name = getDepModule(dep_appname)
dep_module = self.session.query(models.Module).filter_by(name = dep_module_name).one()
dep_app = self.session.query(models.App).filter_by(name = dep_appname).filter_by(module_id = dep_module.id).one()
if hasSampleName(module_name, app.name) and hasSampleName(dep_module_name, dep_app.name):
mkSampleTaskDepends(app, module_name, dep_app, dep_module_name)
else:
tasks = getModuleAppTask(app, module_name)
dep_tasks = getModuleAppTask(dep_app, dep_module_name)
mkCombTaskDepends(tasks, dep_tasks)
def getDepModule(dep_appname):
dep_modules = [k for k, v in self.dependencies.iteritems() if dep_appname in v]
if len(dep_modules) == 0:
msg = '{dep_appname} not in any module'.format(dep_appname=dep_appname)
print dyeFAIL(msg)
raise KeyError(msg)
elif len(dep_modules) > 1:
msg = '{dep_appname} has more than one module: {modules}'.format(dep_appname=dep_appname, modules=dep_modules)
print dyeFAIL(msg)
raise KeyError(msg)
elif len(dep_modules) == 1:
dep_module = dep_modules[0]
return dep_module
def hasSampleName(module, appname):
return self.dependencies[module][appname]['sh_file'].count('sample_name}}') > 0
def getModuleAppTask(app, module):
return [t for t in app.task if t.module.name == module]
def getSampleTask(app, module, sample_name):
return [s['task'] for s in self.apps[app.name].scripts if s['task'].module.name == module and s['extra']['sample_name'] == sample_name]
def mkModuleDepend(name, depends):
module = self.session.query(models.Module).filter_by(name = name).one()
for app in module.app:
mkAppDepends(app, module.name, depends)
apps_in_param = reduce(concat, [apps.keys() for apps in self.modules.values()])
for name in self.modules.keys():
mkModuleDepend(name, self.dependencies[name])
def mkOSSuploadSH(self):
def addSource(source, destination):
if source in file_size:
return
if not is_object_exists(destination):
file_size[source] = os.path.getsize(source)
cmd.append("ossutil cp -f %s %s" % (source, destination))
elif is_size_differ_and_newer(source, destination):
cmd.append("ossutil cp -f %s %s" % (source, destination))
elif is_source_newer(source, destination) and is_md5_differ(source, destination):
cmd.append("ossutil cp -f %s %s" % (source, destination))
def tryAddSourceWithPrefix(source, destination):
for each_source in glob.glob(source+'*'):
each_destination = os.path.join(os.path.dirname(destination), os.path.basename(each_source))
addSource(each_source, each_destination)
def mkDataUpload():
for m in self.session.query(models.Mapping). \
filter_by(is_write = 0, is_immediate = 0). \
filter(models.Mapping.name != 'sh').all():
if os.path.exists(m.source):
addSource(m.source, m.destination)
else:
if not m.exists():
msg = "{name}:{source} not exist.".format(name = m.name, source = m.source)
print dyeFAIL(msg)
tryAddSourceWithPrefix(m.source, m.destination)
content = "\n".join(['set -ex'] + list(set(cmd)))
print "uploadData2OSS.sh: %d files(%d GB) to upload" % (len(file_size), sum(file_size.values())/2**30)
script_file = os.path.join(self.proj_path, 'uploadData2OSS.sh')
write(script_file, content)
def mkScriptUpload():
for m in self.session.query(models.Mapping).filter_by(name = 'sh').all():
addSource(m.source, m.destination)
content = "\n".join(['set -ex'] + list(set(cmd)))
print "uploadScripts2OSS.sh: %d files to upload" % len(cmd)
script_file = os.path.join(self.proj_path, 'uploadScript2OSS.sh')
write(script_file, content)
cmd = []
file_size = {}
mkDataUpload()
cmd = []
mkScriptUpload()
def mkOssSyncSH(self):
def mkDataSync():
content = "\n".join([
'{snap_path}/snap mapping list -p {project} -is_not_write -is_not_immediate -skip_existed',
'read -p "Mappings above will be sync. Press enter to continue"',
'{snap_path}/snap mapping sync -p {project} -is_not_write -is_not_immediate -estimate_size' ]).format(snap_path=snap_path, project=self.proj.name)
script_file = os.path.join(self.proj_path, 'uploadData2OSS.sh')
write(script_file, content)
def mkScriptSync():
content = "\n".join([
'{snap_path}/snap mapping list -p {project} -name sh -skip_existed',
'read -p "Mappings above will be sync. Press enter to continue"',
'{snap_path}/snap mapping sync -p {project} -name sh -estimate_size' ]).format(snap_path=snap_path, project=self.proj.name)
script_file = os.path.join(self.proj_path, 'uploadScript2OSS.sh')
write(script_file, content)
snap_path = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
mkDataSync()
mkScriptSync()
def unifyUnit(size):
if isinstance(size, int) or isinstance(size, float):
return float(size)
elif size.upper().endswith("M"):
return float(size.upper().strip('M')) / 1024
elif size.upper().endswith("G"):
return float(size.upper().strip('G'))
else:
raise ValueError("Unkown Unit: {size}".format(size=size))
def getConfig(appconfig, keys):
for key in keys:
appconfig = appconfig.get(key)
if appconfig is None:
break
return appconfig
def getAppConfig(app, keys):
appconfig = app.config['app']
return getConfig(appconfig, keys)
def getResourceConfig(key, app):
keys = ['requirements', 'resources']
keys.append(key)
return getAppConfig(app, keys)
| gahoo/SNAP | core/db.py | Python | mit | 14,857 |
#Kunal Gautam
#Codewars : @Kunalpod
#Problem name: Multiples of 3 and 5
#Problem level: 6 kyu
def solution(number):
return sum([x for x in range(3,number) if x%3==0 or x%5==0])
| Kunalpod/codewars | multiples_of_3_and_5.py | Python | mit | 182 |
DEBUG = False
TEMPLATE_DEBUG = DEBUG
TIME_ZONE = 'UTC'
LANGUAGE_CODE = 'en-US'
SITE_ID = 1
USE_L10N = True
USE_TZ = True
SECRET_KEY = 'local'
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
}
}
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ':memory:',
}
}
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites',
'django_flanker',
'tests',
)
| dmpayton/django-flanker | tests/settings.py | Python | mit | 609 |
# coding: utf-8
from __future__ import absolute_import
import unittest
from flask import json
from six import BytesIO
from openapi_server.models.computer_set import ComputerSet # noqa: E501
from openapi_server.models.free_style_build import FreeStyleBuild # noqa: E501
from openapi_server.models.free_style_project import FreeStyleProject # noqa: E501
from openapi_server.models.hudson import Hudson # noqa: E501
from openapi_server.models.list_view import ListView # noqa: E501
from openapi_server.models.queue import Queue # noqa: E501
from openapi_server.test import BaseTestCase
class TestRemoteAccessController(BaseTestCase):
"""RemoteAccessController integration test stubs"""
def test_get_computer(self):
"""Test case for get_computer
"""
query_string = [('depth', 56)]
headers = {
'Accept': 'application/json',
'Authorization': 'Basic Zm9vOmJhcg==',
}
response = self.client.open(
'/computer/api/json',
method='GET',
headers=headers,
query_string=query_string)
self.assert200(response,
'Response body is : ' + response.data.decode('utf-8'))
def test_get_jenkins(self):
"""Test case for get_jenkins
"""
headers = {
'Accept': 'application/json',
'Authorization': 'Basic Zm9vOmJhcg==',
}
response = self.client.open(
'/api/json',
method='GET',
headers=headers)
self.assert200(response,
'Response body is : ' + response.data.decode('utf-8'))
def test_get_job(self):
"""Test case for get_job
"""
headers = {
'Accept': 'application/json',
'Authorization': 'Basic Zm9vOmJhcg==',
}
response = self.client.open(
'/job/{name}/api/json'.format(name='name_example'),
method='GET',
headers=headers)
self.assert200(response,
'Response body is : ' + response.data.decode('utf-8'))
def test_get_job_config(self):
"""Test case for get_job_config
"""
headers = {
'Accept': 'text/xml',
'Authorization': 'Basic Zm9vOmJhcg==',
}
response = self.client.open(
'/job/{name}/config.xml'.format(name='name_example'),
method='GET',
headers=headers)
self.assert200(response,
'Response body is : ' + response.data.decode('utf-8'))
def test_get_job_last_build(self):
"""Test case for get_job_last_build
"""
headers = {
'Accept': 'application/json',
'Authorization': 'Basic Zm9vOmJhcg==',
}
response = self.client.open(
'/job/{name}/lastBuild/api/json'.format(name='name_example'),
method='GET',
headers=headers)
self.assert200(response,
'Response body is : ' + response.data.decode('utf-8'))
def test_get_job_progressive_text(self):
"""Test case for get_job_progressive_text
"""
query_string = [('start', 'start_example')]
headers = {
'Authorization': 'Basic Zm9vOmJhcg==',
}
response = self.client.open(
'/job/{name}/{number}/logText/progressiveText'.format(name='name_example', number='number_example'),
method='GET',
headers=headers,
query_string=query_string)
self.assert200(response,
'Response body is : ' + response.data.decode('utf-8'))
def test_get_queue(self):
"""Test case for get_queue
"""
headers = {
'Accept': 'application/json',
'Authorization': 'Basic Zm9vOmJhcg==',
}
response = self.client.open(
'/queue/api/json',
method='GET',
headers=headers)
self.assert200(response,
'Response body is : ' + response.data.decode('utf-8'))
def test_get_queue_item(self):
"""Test case for get_queue_item
"""
headers = {
'Accept': 'application/json',
'Authorization': 'Basic Zm9vOmJhcg==',
}
response = self.client.open(
'/queue/item/{number}/api/json'.format(number='number_example'),
method='GET',
headers=headers)
self.assert200(response,
'Response body is : ' + response.data.decode('utf-8'))
def test_get_view(self):
"""Test case for get_view
"""
headers = {
'Accept': 'application/json',
'Authorization': 'Basic Zm9vOmJhcg==',
}
response = self.client.open(
'/view/{name}/api/json'.format(name='name_example'),
method='GET',
headers=headers)
self.assert200(response,
'Response body is : ' + response.data.decode('utf-8'))
def test_get_view_config(self):
"""Test case for get_view_config
"""
headers = {
'Accept': 'text/xml',
'Authorization': 'Basic Zm9vOmJhcg==',
}
response = self.client.open(
'/view/{name}/config.xml'.format(name='name_example'),
method='GET',
headers=headers)
self.assert200(response,
'Response body is : ' + response.data.decode('utf-8'))
def test_head_jenkins(self):
"""Test case for head_jenkins
"""
headers = {
'Authorization': 'Basic Zm9vOmJhcg==',
}
response = self.client.open(
'/api/json',
method='HEAD',
headers=headers)
self.assert200(response,
'Response body is : ' + response.data.decode('utf-8'))
def test_post_create_item(self):
"""Test case for post_create_item
"""
body = 'body_example'
query_string = [('name', 'name_example'),
('from', '_from_example'),
('mode', 'mode_example')]
headers = {
'Accept': '*/*',
'Content-Type': 'application/json',
'jenkins_crumb': 'jenkins_crumb_example',
'content_type': 'content_type_example',
'Authorization': 'Basic Zm9vOmJhcg==',
}
response = self.client.open(
'/createItem',
method='POST',
headers=headers,
data=json.dumps(body),
content_type='application/json',
query_string=query_string)
self.assert200(response,
'Response body is : ' + response.data.decode('utf-8'))
def test_post_create_view(self):
"""Test case for post_create_view
"""
body = 'body_example'
query_string = [('name', 'name_example')]
headers = {
'Accept': '*/*',
'Content-Type': 'application/json',
'jenkins_crumb': 'jenkins_crumb_example',
'content_type': 'content_type_example',
'Authorization': 'Basic Zm9vOmJhcg==',
}
response = self.client.open(
'/createView',
method='POST',
headers=headers,
data=json.dumps(body),
content_type='application/json',
query_string=query_string)
self.assert200(response,
'Response body is : ' + response.data.decode('utf-8'))
def test_post_job_build(self):
"""Test case for post_job_build
"""
query_string = [('json', 'json_example'),
('token', 'token_example')]
headers = {
'jenkins_crumb': 'jenkins_crumb_example',
'Authorization': 'Basic Zm9vOmJhcg==',
}
response = self.client.open(
'/job/{name}/build'.format(name='name_example'),
method='POST',
headers=headers,
query_string=query_string)
self.assert200(response,
'Response body is : ' + response.data.decode('utf-8'))
def test_post_job_config(self):
"""Test case for post_job_config
"""
body = 'body_example'
headers = {
'Accept': '*/*',
'Content-Type': 'application/json',
'jenkins_crumb': 'jenkins_crumb_example',
'Authorization': 'Basic Zm9vOmJhcg==',
}
response = self.client.open(
'/job/{name}/config.xml'.format(name='name_example'),
method='POST',
headers=headers,
data=json.dumps(body),
content_type='application/json')
self.assert200(response,
'Response body is : ' + response.data.decode('utf-8'))
def test_post_job_delete(self):
"""Test case for post_job_delete
"""
headers = {
'jenkins_crumb': 'jenkins_crumb_example',
'Authorization': 'Basic Zm9vOmJhcg==',
}
response = self.client.open(
'/job/{name}/doDelete'.format(name='name_example'),
method='POST',
headers=headers)
self.assert200(response,
'Response body is : ' + response.data.decode('utf-8'))
def test_post_job_disable(self):
"""Test case for post_job_disable
"""
headers = {
'jenkins_crumb': 'jenkins_crumb_example',
'Authorization': 'Basic Zm9vOmJhcg==',
}
response = self.client.open(
'/job/{name}/disable'.format(name='name_example'),
method='POST',
headers=headers)
self.assert200(response,
'Response body is : ' + response.data.decode('utf-8'))
def test_post_job_enable(self):
"""Test case for post_job_enable
"""
headers = {
'jenkins_crumb': 'jenkins_crumb_example',
'Authorization': 'Basic Zm9vOmJhcg==',
}
response = self.client.open(
'/job/{name}/enable'.format(name='name_example'),
method='POST',
headers=headers)
self.assert200(response,
'Response body is : ' + response.data.decode('utf-8'))
def test_post_job_last_build_stop(self):
"""Test case for post_job_last_build_stop
"""
headers = {
'jenkins_crumb': 'jenkins_crumb_example',
'Authorization': 'Basic Zm9vOmJhcg==',
}
response = self.client.open(
'/job/{name}/lastBuild/stop'.format(name='name_example'),
method='POST',
headers=headers)
self.assert200(response,
'Response body is : ' + response.data.decode('utf-8'))
def test_post_view_config(self):
"""Test case for post_view_config
"""
body = 'body_example'
headers = {
'Accept': '*/*',
'Content-Type': 'application/json',
'jenkins_crumb': 'jenkins_crumb_example',
'Authorization': 'Basic Zm9vOmJhcg==',
}
response = self.client.open(
'/view/{name}/config.xml'.format(name='name_example'),
method='POST',
headers=headers,
data=json.dumps(body),
content_type='application/json')
self.assert200(response,
'Response body is : ' + response.data.decode('utf-8'))
if __name__ == '__main__':
unittest.main()
| cliffano/swaggy-jenkins | clients/python-flask/generated/openapi_server/test/test_remote_access_controller.py | Python | mit | 11,876 |
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
from pprint import pprint
import re
from fileinput import input
import csv
from fuzzywuzzy import fuzz
from fuzzywuzzy import process
articles = ['the', 'a']
prepositions = ['at', 'of']
hospital_words = ['hospital', 'medical', 'center']
word_exclusions = articles + prepositions + hospital_words
class FuzzyHospitals:
class Result:
def __init__(self, hospital, score):
self.hospital = hospital
self.score = score
def __init__(self, hospitals):
self._hospitals = list(filter(lambda x: x.name, hospitals))
self._name_cache = list(map(lambda x: x.name, self._hospitals))
self._name_dict = {hospital.name: hospital for hospital in self._hospitals}
def match(self, name):
normal_name = normalize_hospital_name(name)
result = process.extract(normal_name, self._name_cache, limit = 1)
name, score = None, 0
if len(result) == 1:
name, score = result[0]
return FuzzyHospitals.Result(self._name_dict[name] if name else Hospital("No Match", "No Match"), score)
class Hospital:
def __init__(self, name, data):
self.original_name = name
self.name = normalize_hospital_name(name)
self.data = data
def normalize_hospital_name(name):
return " ".join(filter(
lambda x: x not in word_exclusions,
re.sub(
"[^abcdefghijklmnopqrstuvwxyz ]",
"",
name.casefold().replace("-", " ")).split()))
def fetch_hospitals(lines):
return list(filter(None, [re.findall(r"\".*?\"", line)[-1].strip('"') for line in lines]))
def extract_hospital(line):
words = line.split()
return Hospital(" ".join(words[1:-2]), " ".join(words[-2:]))
def fetch_hospital_data(lines):
return [extract_hospital(line) for line in lines]
def write_table_to_file(filename, table):
with open(filename, 'w') as f:
tablewriter = csv.writer(f)
tablewriter.writerows(table)
def match_files(file_a, file_b, outfile):
hospitals = fetch_hospitals([line for line in input(file_a)][1:])
hospital_data = FuzzyHospitals(fetch_hospital_data([line for line in input(file_b)][1:]))
output_table = []
for hospital in hospitals:
match = hospital_data.match(hospital)
output_table.append((hospital, match.hospital.original_name, match.hospital.data))
pprint(output_table[-1])
write_table_to_file(outfile, output_table)
######## Main ########
if __name__ == '__main__':
from sys import argv
if len(argv) >= 4:
match_files(argv[1], argv[2], argv[3])
else
print("Invalid number of arguments. Please pass FileA, FileB, and the name of the output file respectively.")
| pingortle/collate_hospitals_kludge | match_hospitals.py | Python | mit | 2,609 |
# -*- coding: utf8 -*-
"""
@todo: Update argument parser options
"""
# Imports. {{{1
import sys
# Try to load the required modules from Python's standard library.
try:
import os
import argparse
from time import time
import hashlib
except ImportError as e:
msg = "Error: Failed to load one of the required Python modules! (%s)\n"
sys.stderr.write(msg % str(e))
sys.exit(1)
from dedupsqlfs.log import logging
from dedupsqlfs.lib import constants
from dedupsqlfs.db import check_engines
import dedupsqlfs
def mkfs(options, compression_methods=None, hash_functions=None):
from dedupsqlfs.fuse.dedupfs import DedupFS
from dedupsqlfs.fuse.operations import DedupOperations
ops = None
ret = 0
try:
ops = DedupOperations()
_fuse = DedupFS(
ops, None,
options,
fsname="dedupsqlfs", allow_root=True)
if not _fuse.checkIfLocked():
_fuse.saveCompressionMethods(compression_methods)
for modname in compression_methods:
_fuse.appendCompression(modname)
_fuse.setOption("gc_umount_enabled", False)
_fuse.setOption("gc_vacuum_enabled", False)
_fuse.setOption("gc_enabled", False)
_fuse.operations.init()
_fuse.operations.destroy()
except Exception:
import traceback
print(traceback.format_exc())
ret = -1
if ops:
ops.getManager().close()
return ret
def main(): # {{{1
"""
This function enables using mkfs.dedupsqlfs.py as a shell script that creates FUSE
mount points. Execute "mkfs.dedupsqlfs -h" for a list of valid command line options.
"""
logger = logging.getLogger("mkfs.dedupsqlfs/main")
logger.setLevel(logging.INFO)
logger.addHandler(logging.StreamHandler(sys.stderr))
parser = argparse.ArgumentParser(
prog="%s/%s mkfs/%s python/%s" % (dedupsqlfs.__name__, dedupsqlfs.__version__, dedupsqlfs.__fsversion__, sys.version.split()[0]),
conflict_handler="resolve")
# Register some custom command line options with the option parser.
option_stored_in_db = " (this option is only useful when creating a new database, because your choice is stored in the database and can't be changed after that)"
parser.add_argument('-h', '--help', action='help', help="show this help message followed by the command line options defined by the Python FUSE binding and exit")
parser.add_argument('-v', '--verbose', action='count', dest='verbosity', default=0, help="increase verbosity: 0 - error, 1 - warning, 2 - info, 3 - debug, 4 - verbose")
parser.add_argument('--log-file', dest='log_file', help="specify log file location")
parser.add_argument('--log-file-only', dest='log_file_only', action='store_true',
help="Don't send log messages to stderr.")
parser.add_argument('--data', dest='data', metavar='DIRECTORY', default="~/data", help="Specify the base location for the files in which metadata and blocks data is stored. Defaults to ~/data")
parser.add_argument('--name', dest='name', metavar='DATABASE', default="dedupsqlfs", help="Specify the name for the database directory in which metadata and blocks data is stored. Defaults to dedupsqlfs")
parser.add_argument('--temp', dest='temp', metavar='DIRECTORY', help="Specify the location for the files in which temporary data is stored. By default honour TMPDIR environment variable value.")
parser.add_argument('-b', '--block-size', dest='block_size', metavar='BYTES', default=1024*128, type=int, help="Specify the maximum block size in bytes" + option_stored_in_db + ". Defaults to 128kB.")
parser.add_argument('--memory-limit', dest='memory_limit', action='store_true', help="Use some lower values for less memory consumption.")
parser.add_argument('--cpu-limit', dest='cpu_limit', metavar='NUMBER', default=0, type=int, help="Specify the maximum CPU count to use in multiprocess compression. Defaults to 0 (auto).")
engines, msg = check_engines()
if not engines:
logger.error("No storage engines available! Please install sqlite or pymysql python module!")
return 1
parser.add_argument('--storage-engine', dest='storage_engine', metavar='ENGINE', choices=engines, default=engines[0],
help=msg)
if "mysql" in engines:
from dedupsqlfs.db.mysql import get_table_engines
table_engines = get_table_engines()
msg = "One of MySQL table engines: "+", ".join(table_engines)+". Default: %r. Aria and TokuDB engine can be used only with MariaDB or Percona server." % table_engines[0]
parser.add_argument('--table-engine', dest='table_engine', metavar='ENGINE',
choices=table_engines, default=table_engines[0],
help=msg)
parser.add_argument('--no-cache', dest='use_cache', action='store_false', help="Don't use cache in memory and delayed write to storage.")
parser.add_argument('--no-transactions', dest='use_transactions', action='store_false', help="Don't use transactions when making multiple related changes, this might make the file system faster or slower (?).")
parser.add_argument('--no-sync', dest='synchronous', action='store_false', help="Disable SQLite's normal synchronous behavior which guarantees that data is written to disk immediately, because it slows down the file system too much (this means you might lose data when the mount point isn't cleanly unmounted).")
# Dynamically check for supported hashing algorithms.
msg = "Specify the hashing algorithm that will be used to recognize duplicate data blocks: one of %s. Choose wisely - it can't be changed on the fly."
hash_functions = list({}.fromkeys([h.lower() for h in hashlib.algorithms_available]).keys())
hash_functions.sort()
work_hash_funcs = set(hash_functions) & constants.WANTED_HASH_FUCTIONS
msg %= ', '.join('%r' % fun for fun in work_hash_funcs)
defHash = 'md5' # Hope it will be there always. Stupid.
msg += ". Defaults to %r." % defHash
parser.add_argument('--hash', dest='hash_function', metavar='FUNCTION', choices=work_hash_funcs, default=defHash, help=msg)
# Dynamically check for supported compression methods.
compression_methods = [constants.COMPRESSION_TYPE_NONE]
compression_methods_cmd = [constants.COMPRESSION_TYPE_NONE]
for modname in constants.COMPRESSION_SUPPORTED:
try:
module = __import__(modname)
if hasattr(module, 'compress') and hasattr(module, 'decompress'):
compression_methods.append(modname)
if modname not in constants.COMPRESSION_READONLY:
compression_methods_cmd.append(modname)
except ImportError:
pass
if len(compression_methods) > 1:
compression_methods_cmd.append(constants.COMPRESSION_TYPE_BEST)
compression_methods_cmd.append(constants.COMPRESSION_TYPE_CUSTOM)
msg = "Enable compression of data blocks using one of the supported compression methods: one of %s"
msg %= ', '.join('%r' % mth for mth in compression_methods_cmd)
msg += ". Defaults to %r." % constants.COMPRESSION_TYPE_NONE
msg += " You can use <method>:<level> syntax, <level> can be integer or value from --compression-level."
if len(compression_methods_cmd) > 1:
msg += " %r will try all compression methods and choose one with smaller result data." % constants.COMPRESSION_TYPE_BEST
msg += " %r will try selected compression methods (--custom-compress) and choose one with smaller result data." % constants.COMPRESSION_TYPE_CUSTOM
msg += "\nDefaults to %r." % constants.COMPRESSION_TYPE_NONE
parser.add_argument('--compress', dest='compression', metavar='METHOD', action="append",
default=[constants.COMPRESSION_TYPE_NONE], help=msg)
msg = "Enable compression of data blocks using one or more of the supported compression methods: %s"
msg %= ', '.join('%r' % mth for mth in compression_methods_cmd[:-2])
msg += ". To use two or more methods select this option in command line for each compression method."
msg += " You can use <method>=<level> syntax, <level> can be integer or value from --compression-level."
parser.add_argument('--force-compress', dest='compression_forced', action="store_true", help="Force compression even if resulting data is bigger than original.")
parser.add_argument('--minimal-compress-size', dest='compression_minimal_size', metavar='BYTES', type=int, default=1024, help="Minimal block data size for compression. Defaults to 1024 bytes. Value -1 means auto - per method absolute minimum. Not compress if data size is less then BYTES long. If not forced to.")
parser.add_argument('--minimal-compress-ratio', dest='compression_minimal_ratio', metavar='RATIO', type=float, default=0.05, help="Minimal data compression ratio. Defaults to 0.05 (5%%). Do not compress if ratio is less than RATIO. If not forced to.")
levels = (constants.COMPRESSION_LEVEL_DEFAULT, constants.COMPRESSION_LEVEL_FAST, constants.COMPRESSION_LEVEL_NORM, constants.COMPRESSION_LEVEL_BEST)
parser.add_argument('--compression-level', dest='compression_level', metavar="LEVEL", default=constants.COMPRESSION_LEVEL_DEFAULT,
help="Compression level ratio: one of %s; or INT. Defaults to %r. Not all methods support this option." % (
', '.join('%r' % lvl for lvl in levels), constants.COMPRESSION_LEVEL_DEFAULT
))
# Dynamically check for profiling support.
try:
# Using __import__() here because of pyflakes.
for p in 'cProfile', 'pstats': __import__(p)
parser.add_argument('--profile', action='store_true', default=False, help="Use the Python modules cProfile and pstats to create a profile of time spent in various function calls and print out a table of the slowest functions at exit (of course this slows everything down but it can nevertheless give a good indication of the hot spots).")
except ImportError:
logger.warning("No profiling support available, --profile option disabled.")
logger.warning("If you're on Ubuntu try 'sudo apt-get install python-profiler'.")
args = parser.parse_args()
if args.profile:
sys.stderr.write("Enabling profiling..\n")
import cProfile, pstats
profile = '.dedupsqlfs.cprofile-%i' % time()
profiler = cProfile.Profile()
result = profiler.runcall(mkfs, args, compression_methods, hash_functions)
profiler.dump_stats(profile)
sys.stderr.write("\n Profiling statistics:\n\n")
s = pstats.Stats(profile)
s.sort_stats('calls').print_stats(0.1)
s.sort_stats('cumtime').print_stats(0.1)
s.sort_stats('tottime').print_stats(0.1)
os.unlink(profile)
else:
result = mkfs(args, compression_methods, hash_functions)
return result
# vim: ts=4 sw=4 et
| sergey-dryabzhinsky/dedupsqlfs | dedupsqlfs/app/mkfs.py | Python | mit | 11,069 |
import argparse
from goetia import libgoetia
from goetia.dbg import dBG
from goetia.hashing import StrandAware, FwdLemireShifter, CanLemireShifter
from goetia.parsing import iter_fastx_inputs, get_fastx_args
from goetia.storage import *
from goetia.timer import measure_time
parser = argparse.ArgumentParser()
group = get_fastx_args(parser)
group.add_argument('-i', dest='inputs', nargs='+', required=True)
args = parser.parse_args()
for storage_t in [SparseppSetStorage, PHMapStorage, BitStorage, BTreeStorage]:
for hasher_t in [FwdLemireShifter, CanLemireShifter]:
hasher = hasher_t(31)
if storage_t is BitStorage:
storage = storage_t.build(int(1e9), 4)
else:
storage = storage_t.build()
graph = dBG[storage_t, hasher_t].build(storage, hasher)
consumer = dBG[storage_t, hasher_t].Processor.build(graph, 100000)
for sample, name in iter_fastx_inputs(args.inputs, args.pairing_mode):
print(f'dBG type: {type(graph)}')
with measure_time():
consumer.process(*sample)
| camillescott/boink | benchmarks/benchmark-dbg-insert.py | Python | mit | 1,086 |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Image Grounded Conversations (IGC) Task.
See https://www.aclweb.org/anthology/I17-1047/ for more details. One must download
the data from https://www.microsoft.com/en-us/download/details.aspx?id=55324
prior to using this teacher.
The images are then downloaded from the urls specified in the data. Unfortunately,
not all links are live, and thus some examples do not have valid images.
As there is no training set, we manually split 90% of the validation set
into train.
"""
from typing import Optional
from parlai.core.params import ParlaiParser
import csv
import os
from abc import ABC, abstractmethod
from PIL import Image
from typing import List, Dict, Any
from parlai.core.build_data import download_multiprocess
from parlai.core.params import Opt
from parlai.core.teachers import AbstractImageTeacher
from parlai.utils.io import PathManager
import parlai.utils.typing as PT
class IGCTeacher(AbstractImageTeacher):
"""
Teacher for Image Grounded Conversations (IGC) Task.
See https://arxiv.org/abs/1701.08251 for more details
"""
def __init__(self, opt: Opt, shared: PT.TShared = None):
self.blank_image_id = '0000'
super().__init__(opt, shared)
if shared is not None:
self.valid_image_ids = shared['valid_image_ids']
if self.image_features_dict is not None:
self.image_features_dict[self.blank_image_id] = self.blank_image_features
self.multi_ref = opt.get('igc_multi_ref', False)
@classmethod
def add_cmdline_args(
cls, parser: ParlaiParser, partial_opt: Optional[Opt] = None
) -> ParlaiParser:
"""
Include arg.
for multi-reference labels.
"""
super().add_cmdline_args(parser, partial_opt=partial_opt)
agent = parser.add_argument_group('IGC Arguments')
agent.add_argument(
'--igc-multi-ref',
type='bool',
default=False,
help='specify to evaluate on multi-reference labels',
)
return parser
def image_id_to_image_path(self, image_id: str) -> str:
"""
Return image path given image id.
As this function is used in building the image features, and some of the
:param image_id:
image_id key, for IGC this is a str
:return:
the image path associated with the given image key
"""
if image_id not in self.valid_image_ids:
image_id = self.blank_image_id
return os.path.join(self.get_image_path(self.opt), image_id)
def get_data_path(self, opt: Opt) -> str:
"""
Determines path to the data file.
:param opt:
Opt with all options
:return:
the path to the dataset
"""
data_path = os.path.join(opt['datapath'], 'igc')
return data_path
def get_image_features_path(self, task, image_model_name, dt):
"""
Override so that subclasses can see same image features.
"""
# In default implementation, self.data_path already has task name added
image_features_path = os.path.join(self.data_path, 'image_features')
if not os.path.isdir(image_features_path):
PathManager.mkdirs(image_features_path)
return os.path.join(
image_features_path, f'{image_model_name}_{dt}_features_dict'
)
def num_episodes(self) -> int:
"""
Number of episodes.
Iterate through each episode twice, playing each side of the conversation once.
"""
return 2 * len(self.data)
def num_examples(self) -> int:
"""
Number of examples.
There are three turns of dialogue in the IGC task -
Context, Question, Response.
Thus, return 3 * number of data examples.
"""
return 3 * len(self.data)
def get(self, episode_idx: int, entry_idx: int = 0) -> dict:
"""
Override to handle corrupt images and multi-reference labels.
"""
entry_idx *= 2
if episode_idx >= len(self.data):
data = self.data[episode_idx % len(self.data)]
entry_idx += 1
else:
data = self.data[episode_idx]
image_id = data[self.image_id_key]
if data[self.image_id_key] not in self.valid_image_ids:
data[self.image_id_key] = self.blank_image_id
image_features = self.get_image_features(data)
conversation = [data['context'], data['question'], data['response']]
labels = [conversation[entry_idx]]
if self.multi_ref and entry_idx != 0:
key = 'questions' if entry_idx == 1 else 'responses'
labels = data[f'multiref_{key}'].split('***')
text = '' if entry_idx == 0 else conversation[entry_idx - 1]
episode_done = entry_idx >= len(conversation) - 2
action = {
'text': text,
'image_id': image_id,
'episode_done': episode_done,
'image': image_features,
'labels': labels,
}
return action
def load_data(self, data_path: str, opt: Opt) -> List[Dict[str, Any]]:
"""
Override to load CSV files.
"""
dt = opt['datatype'].split(':')[0]
dt_str = 'test' if dt == 'test' else 'val'
dp = os.path.join(self.get_data_path(opt), f'IGC_crowd_{dt_str}.csv')
if not os.path.exists(dp):
raise RuntimeError(
'Please download the IGC Dataset from '
'https://www.microsoft.com/en-us/download/details.aspx?id=55324. '
'Then, make sure to put the two .csv files in {}'.format(
self.get_data_path(opt)
)
)
if (
not os.path.exists(self.get_image_path(opt))
or len(os.listdir(self.get_image_path(opt))) <= 1
):
self._download_images(opt)
self.data = []
with PathManager.open(dp, newline='\n') as csv_file:
reader = csv.reader(csv_file, delimiter=',')
fields = []
for i, row in enumerate(reader):
if i == 0:
fields = row
else:
ep = dict(zip(fields, row))
ep['image_id'] = f'{ep["id"]}'
self.data.append(ep)
if dt == 'train':
# Take first 90% of valid set as train
self.data = self.data[: int(len(self.data) * 0.9)]
elif dt == 'valid':
self.data = self.data[int(len(self.data) * 0.9) :]
self.valid_image_ids = []
for d in self.data:
img_path = os.path.join(self.get_image_path(opt), d['image_id'])
if PathManager.exists(img_path):
self.valid_image_ids.append(d['image_id'])
self.valid_image_ids = set(self.valid_image_ids)
return self.data
def _download_images(self, opt: Opt):
"""
Download available IGC images.
"""
urls = []
ids = []
for dt in ['test', 'val']:
df = os.path.join(self.get_data_path(opt), f'IGC_crowd_{dt}.csv')
with PathManager.open(df, newline='\n') as csv_file:
reader = csv.reader(csv_file, delimiter=',')
fields = []
for i, row in enumerate(reader):
if i == 0:
fields = row
else:
data = dict(zip(fields, row))
urls.append(data['url'])
ids.append(data['id'])
PathManager.mkdirs(self.get_image_path(opt))
# Make one blank image
image = Image.new('RGB', (100, 100), color=0)
image.save(os.path.join(self.get_image_path(opt), self.blank_image_id), 'JPEG')
# Download the rest
download_multiprocess(urls, self.get_image_path(opt), dest_filenames=ids)
# Remove bad images
for fp in os.listdir(self.get_image_path(opt)):
img_path = os.path.join(self.get_image_path(opt), fp)
if PathManager.exists(img_path):
try:
Image.open(img_path).convert('RGB')
except OSError:
PathManager.rm(img_path)
def share(self) -> PT.TShared:
shared = super().share()
shared['valid_image_ids'] = self.valid_image_ids
return shared
class IGCOneSideTeacher(ABC, IGCTeacher):
"""
Override to only return one side of the conversation.
"""
@classmethod
def add_cmdline_args(
cls, parser: ParlaiParser, partial_opt: Optional[Opt] = None
) -> ParlaiParser:
super().add_cmdline_args(parser, partial_opt=partial_opt)
agent = parser.add_argument_group('IGCResponseOnly Arguments')
agent.add_argument(
'--igc-multi-ref',
type='bool',
default=False,
help='specify true to evaluate on multi-reference labels',
)
return parser
def num_episodes(self) -> int:
return len(self.data)
def num_examples(self) -> int:
return len(self.data)
@abstractmethod
def get_label_key(self) -> str:
"""
Return key into data dictionary for the label.
"""
pass
@abstractmethod
def get_text(self, data) -> str:
"""
Return text for an example.
"""
pass
def get(self, episode_idx: int, entry_idx: int = 0) -> Dict[str, Any]:
"""
Override to handle one-sided conversation.
"""
data = self.data[episode_idx]
image_id = data[self.image_id_key]
if data[self.image_id_key] not in self.valid_image_ids:
data[self.image_id_key] = self.blank_image_id
image_features = self.get_image_features(data)
labels = [data[self.get_label_key()]]
if self.multi_ref:
labels = data[f'multiref_{self.get_label_key()}s'].split('***')
text = self.get_text(data)
action = {
'text': text,
'image_id': image_id,
'episode_done': True,
'image': image_features,
'labels': labels,
}
return action
class ResponseOnlyTeacher(IGCOneSideTeacher):
"""
Responses Only.
"""
def get_label_key(self) -> str:
return 'response'
def get_text(self, data) -> str:
return '\n'.join([data['context'], data['question']])
class QuestionOnlyTeacher(IGCOneSideTeacher):
"""
Questions Only.
"""
def get_label_key(self) -> str:
return 'question'
def get_text(self, data) -> str:
return data['context']
class DefaultTeacher(IGCTeacher):
pass
| facebookresearch/ParlAI | parlai/tasks/igc/agents.py | Python | mit | 11,014 |
# -*- coding: utf-8 -*-
# ProjectEuler/src/python/problem303.py
#
# Multiples with small digits
# ===========================
# Published on Saturday, 25th September 2010, 10:00 pm
#
# For a positive integer n, define f(n) as the least positive multiple of n
# that, written in base 10, uses only digits 2. Thus f(2)=2, f(3)=12, f(7)=21,
# f(42)=210, f(89)=1121222. Also, . Find .
import projecteuler as pe
def main():
pass
if __name__ == "__main__":
main()
| olduvaihand/ProjectEuler | src/python/problem303.py | Python | mit | 475 |
# !/usr/bin/python
# -*- coding: utf-8 -*-
#
# Created on Nov 19, 2015
# @author: Bo Zhao
# @email: bo_zhao@hks.harvard.edu
# @website: http://yenching.org
# @organization: Harvard Kennedy School
import sys
from wbcrawler.sentiment import tencent_sentiment
reload(sys)
sys.setdefaultencoding('utf-8')
tencent_sentiment(0, 1, 'insurance', 'localhost', 27017)
| jakobzhao/wbcrawler3 | sentiment_by_tencent.py | Python | mit | 378 |
import unittest
from gis.protobuf.polygon_pb2 import Polygon2D, Polygon3D, MultiPolygon2D, MultiPolygon3D
from gis.protobuf.point_pb2 import Point2D, Point3D
class Polygon2DTestCase(unittest.TestCase):
def test_toGeoJSON(self):
polygon = Polygon2D(point=[Point2D(x=1.0, y=2.0),
Point2D(x=3.0, y=4.0)])
self.assertEqual(polygon.toGeoJSON(), {
'type': 'Polygon',
'coordinates': [[[1.0, 2.0], [3.0, 4.0]]]
})
class Polygon3DTestCase(unittest.TestCase):
def test_toGeoJSON(self):
polygon = Polygon3D(point=[Point3D(x=1.0, y=2.0, z=3.0),
Point3D(x=4.0, y=5.0, z=6.0)])
self.assertEqual(polygon.toGeoJSON(), {
'type': 'Polygon',
'coordinates': [[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]]
})
class MultiPolygon2DTestCase(unittest.TestCase):
def test_toGeoJSON(self):
multiPolygon = MultiPolygon2D(polygon=[Polygon2D(point=[Point2D(x=1.0, y=2.0),
Point2D(x=3.0, y=4.0)]),
Polygon2D(point=[Point2D(x=5.0, y=6.0),
Point2D(x=7.0, y=8.0)])])
self.assertEqual(multiPolygon.toGeoJSON(), {
'type': 'MultiPolygon',
'coordinates': [[[[1.0, 2.0], [3.0, 4.0]]], [[[5.0, 6.0], [7.0, 8.0]]]]
})
class MultiPolygon3DTestCase(unittest.TestCase):
def test_toGeoJSON(self):
multiPolygon = MultiPolygon3D(polygon=[Polygon3D(point=[Point3D(x=1.0, y=2.0, z=3.0),
Point3D(x=4.0, y=5.0, z=6.0)]),
Polygon3D(point=[Point3D(x=7.0, y=8.0, z=9.0),
Point3D(x=10.0, y=11.0, z=12.0)])])
self.assertEqual(multiPolygon.toGeoJSON(), {
'type': 'MultiPolygon',
'coordinates': [[[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]], [[[7.0, 8.0, 9.0], [10.0, 11.0, 12.0]]]]
})
| tomi77/protobuf-gis | python/tests/test_polygon.py | Python | mit | 2,150 |
import logging
import os
from twilio.rest import Client
class TwilioClient(object):
def __init__(self):
self.logger = logging.getLogger("botosan.logger")
self.account_sid = os.environ["TWILIO_SID"]
self.account_token = os.environ["TWILIO_TOKEN"]
self.client = Client(self.account_sid, self.account_token)
def get_mcc_and_mnc(self, phone_number):
"""
Gets the Mobile Country Code and Mobile Network code for a given Twilio Number
:param phone_number: The phone number, containing the +CC Number, ex: +12345678901 for the US.
:return: a tuple containing the mcc and mnc
"""
number = self.client.lookups.phone_numbers(phone_number).fetch(type="carrier")
self.logger.info(number.carrier['mobile_country_code'])
self.logger.info(number.carrier['mobile_network_code'])
return number.carrier['mobile_country_code'], number.carrier['mobile_network_code']
def get_available_numbers(self):
numbers = self.client.available_phone_numbers("GB").local.list(exclude_local_address_required=True)
print(numbers.count())
phone_numbers = []
for number in numbers:
phone_numbers.append(number.phone_number)
return phone_numbers
| FredLoh/BotoSan | twilio-mnc-mcc-getter.py | Python | mit | 1,280 |
# datapath config
# data folder location
data_folder = '/home/data/jleeae/ML/e_learning/KnowledgeTracing/data/'
csv_original_folder = data_folder + 'csv_original/'
csv_rnn_data_folder = data_folder + 'csv_rnn_data/'
pkl_rnn_data_folder = data_folder + 'pkl_rnn_data/'
# csv_original
Assistments2009_csv_original = csv_original_folder + 'skill_builder_data.csv'
Assistments2009_csv_original_corrected = csv_original_folder + 'skill_builder_data_corrected.csv'
Assistments2012_csv_original_problem_contents = csv_original_folder + 'ASSISTmentsProblems.csv'
Assistments2012_csv_original = csv_original_folder + '2012-2013-data-with-predictions-4-final.csv'
Assistments2012_csv_original_without_actions = csv_original_folder + '2012-2013-data-with-predictions-4-final-without-actions.csv'
Assistments2015_csv_original = '2015_100_skill_builders_main_problems.csv'
class AssistmentsProperties(object):
def __init__(self, version):
self.version = version
if ('2009' == version):
self.set2009Attr()
elif ('2012' == version):
self.set2012Attr()
elif ('2015' == version):
self.set2015Attr()
else:
print('{} yr is not realized'.format(version))
exit(1)
def set_datapath(self, datapath):
self.datapath = datapath
# process_config
# method = {'default', 'sliding_window'}
# has_scaffolding = {True, False}
# count_no_skill_id = {True, False}
# has_test_mode = {True, False}
# allow_multi_skills = {True, False}
# one_hot = {True, False}
# window_length: int
def get_datapath(self, ext='csv', is_original=True, process_config = None, is_problem_contents=False, is_training=True):
version = {
'2009': '2009',
'2012': '2012',
'2015': '2015'
}.get(self.version, None)
if (None == version):
print('{} version not yet realized'.format(self.version))
exit(1)
_ext = {
'csv': 'csv',
'pkl': 'pkl'
}.get(ext, None)
if (None == _ext):
print('{} extension not yet realized'.format(ext))
exit(1)
if ('datapath' not in self.__dict__):
if ('csv' == ext and is_original):
if ('2009' == self.version):
datapath = Assistments2009_csv_original_corrected
elif ('2012' == self.version):
if (is_problem_contents):
datapath = Assistments2012_csv_original_problem_contents
else:
datapath = Assistments2012_csv_original_without_actions
elif ('2015' == self.version):
datapath = Assistments2015_csv_original
else:
datapath = self.get_processed_datapath(ext, process_config, is_problem_contents, is_training)
return datapath
def get_processed_datapath(self, ext='csv', process_config=None, is_problem_contents=False, is_training=True):
if (None == process_config):
print('process_config not set properly')
exit(1)
version = {
'2009': '2009',
'2012': '2012',
'2015': '2015'
}.get(self.version, None)
_ext = {
'csv': 'csv',
'pkl': 'pkl'
}.get(ext, None)
if (None == _ext):
print('{} extension not yet realized'.format(ext))
exit(1)
if (None == version):
print('{} version not yet realized'.format(self.version))
exit(1)
# TODO: name policy for problem_contents?
split_rate = process_config.get('split_rate', 0.2)
split_rate = str(int(split_rate * 100))
method = process_config.get('method', None)
has_scaffolding = process_config.get('has_scaffolding', None)
count_no_skill_id = process_config.get('count_no_skill_id', None)
has_test_mode = process_config.get('has_test_mode', None)
allow_multi_skills = process_config.get('allow_multi_skills', None)
one_hot = process_config.get('one_hot', None)
if ('csv' == ext):
datapath = csv_rnn_data_folder
elif ('pkl' == ext):
datapath = pkl_rnn_data_folder
datapath += 'split_' + split_rate + '/'
datapath += 'A' + version + '/'
if (one_hot):
datapath += 'one_hot/'
else:
datapath += 'not_one_hot/'
datapath += method
datapath += '/'
if ('sliding_window' == method):
# 'default' or 'same_as_training' or 'overlapping_last_element' or 'partition'
test_format = process_config.get('test_format', None)
datapath += test_format
datapath += '/'
window_length = process_config.get('window_length', None)
datapath += 'window_'
datapath += str(window_length)
datapath += '_'
if (is_training):
datapath = datapath + 'train_'
else:
datapath = datapath + 'test_'
if (has_scaffolding):
datapath += '1'
else:
datapath += '0'
if (count_no_skill_id):
datapath += '1'
else:
datapath += '0'
if (has_test_mode):
datapath += '1'
else:
datapath += '0'
if (allow_multi_skills):
datapath += '1'
else:
datapath += '0'
datapath += '.'
datapath += ext
return datapath
def set2009Attr(self):
self.order_id = 'order_id'
self.assignment_id = 'assignment_id'
self.user_id = 'user_id'
self.assistment_id = 'assistment_id'
self.problem_id = 'problem_id'
self.original = 'original'
self.correct = 'correct'
self.attempt_count = 'attempt_count'
self.ms_first_response = 'ms_first_response'
self.tutor_mode = 'tutor_mode'
self.answer_type = 'answer_type'
self.sequence_id = 'sequence_id'
self.student_class_id = 'student_class_id'
self.position = 'position'
self.type = 'type'
self.base_sequence_id = 'base_sequence_id'
self.skill_id = 'skill_id'
self.skill_name = 'skill_name'
self.teacher_id = 'teacher_id'
self.school_id = 'school_id'
self.hint_count = 'hint_count'
self.hint_total = 'hint_total'
self.overlap_time = 'overlap_time'
self.template_id = 'template_id'
self.answer_id = 'answer_id'
self.answer_text = 'answer_text'
self.first_action = 'first_action'
self.bottom_hint = 'bottom_hint'
self.opportunity = 'opportunity'
self.opportunity_original = 'opportunity_original'
def set2012Attr(self):
self.problem_log_id = 'problem_log_id'
self.skill = 'skill'
self.problem_id = 'problem_id'
self.user_id = 'user_id'
self.assignment_id = 'assignment_id'
self.assistment_id = 'assistment_id'
self.start_time = 'start_time'
self.end_time = 'end_time'
self.problem_type = 'problem_type'
self.original = 'original'
self.correct = 'correct'
self.bottom_hint = 'bottom_hint'
self.hint_count = 'hint_count'
self.actions = 'actions'
self.attempt_count = 'attempt_count'
self.ms_first_response = 'ms_first_response'
self.tutor_mode = 'tutor_mode'
self.sequence_id = 'sequence_id'
self.student_class_id = 'student_class_id'
self.position = 'position'
self.type = 'type'
self.base_sequence_id = 'base_sequence_id'
self.skill_id = 'skill_id'
self.teacher_id = 'teacher_id'
self.school_id = 'school_id'
self.overlap_time = 'overlap_time'
self.template_id = 'template_id'
self.answer_id = 'answer_id'
self.answer_text = 'answer_text'
self.first_action = 'first_action'
self.problemlog_id = 'problemlog_id'
self.Average_confidence_FRUSTRATED = 'Average_confidence(FRUSTRATED)'
self.Average_confidence_CONFUSED = 'Average_confidence(CONFUSED)'
self.Average_confidence_CONCENTRATING = 'Average_confidence(CONCENTRATING)'
self.Average_confidence_BORED = 'Average_confidence(BORED)'
# problem contents attribute
self.problem_content = 'body'
def set2015Attr(self):
self.user_id = 'user_id'
self.log_id = 'log_id'
self.sequence_id = 'sequence_id'
self.correct = 'correct'
| JSLBen/KnowledgeTracing | codes/AssistmentsProperties.py | Python | mit | 8,942 |
import pygame
class Animation:
def __init__(self, sheet, seq):
#Attributes
self.sheet = sheet
self.length = seq[0]
self.delay = seq[1]
self.x = seq[2]
self.y = seq[3]
self.w = seq[4]
self.h = seq[5]
self.step = 0
self.tick = 0
self.curX = self.x
def draw(self, screen, dest):
screen.blit(self.sheet, dest, pygame.Rect(self.curX, self.y, self.w, self.h))
if self.tick == self.delay:
self.tick = 0
self.step += 1
if self.step < self.length:
self.curX += self.w
else:
self.step = 0
self.curX = self.x
else:
self.tick += 1
| Ohjel/wood-process | jump/animation.py | Python | mit | 750 |
import busbus
from busbus.entity import BaseEntityJSONEncoder
from busbus.provider import ProviderBase
from busbus.queryable import Queryable
import cherrypy
import collections
import itertools
import types
def json_handler(*args, **kwargs):
value = cherrypy.serving.request._json_inner_handler(*args, **kwargs)
return BaseEntityJSONEncoder().encode(value).encode('utf-8')
cherrypy.config['tools.json_out.handler'] = json_handler
EXPAND_TYPES = {
'providers': ProviderBase,
'agencies': busbus.Agency,
'stops': busbus.Stop,
'routes': busbus.Route,
'arrivals': busbus.Arrival,
}
def unexpand_init(result, to_expand):
return ({attr: unexpand(value, to_expand)
for attr, value in dict(obj).items()}
for obj in result)
def unexpand(obj, to_expand):
for name, cls in EXPAND_TYPES.items():
if isinstance(obj, cls):
if name not in to_expand:
return {'id': obj.id}
else:
return {attr: unexpand(value, to_expand)
for attr, value in dict(obj).items()}
if isinstance(obj, dict):
return {attr: unexpand(value, to_expand)
for attr, value in obj.items()}
if isinstance(obj, (list, tuple, collections.Iterator)):
return (unexpand(value, to_expand) for value in obj)
return obj
class APIError(Exception):
def __init__(self, msg, error_code=500):
self.msg = msg
self.error_code = error_code
class EndpointNotFoundError(APIError):
def __init__(self, entity, action=None):
super(EndpointNotFoundError, self).__init__(
'Endpoint /{0} not found'.format(
entity + '/' + action if action else entity), 404)
class Engine(busbus.Engine):
def __init__(self, *args, **kwargs):
# perhaps fix this to use a decorator somehow?
self._entity_actions = {
('stops', 'find'): (self.stops_find, 'stops'),
('routes', 'directions'): (self.routes_directions, 'directions'),
}
super(Engine, self).__init__(*args, **kwargs)
@cherrypy.popargs('entity', 'action')
@cherrypy.expose
@cherrypy.tools.json_out()
def default(self, entity=None, action=None, **kwargs):
if entity is None:
return self.help()
response = {
'request': {
'status': 'ok',
'entity': entity,
'params': kwargs,
}
}
try:
to_expand = (kwargs.pop('_expand').split(',')
if '_expand' in kwargs else [])
if to_expand:
response['request']['expand'] = to_expand
limit = kwargs.pop('_limit', None)
if limit:
try:
limit = int(limit)
if limit <= 0:
raise ValueError()
except ValueError:
raise APIError('_limit must be a positive integer', 422)
response['request']['limit'] = limit
if 'realtime' in kwargs:
if kwargs['realtime'] in ('y', 'Y', 'yes', 'Yes', 'YES',
'true', 'True', 'TRUE',
'on', 'On', 'ON'):
kwargs['realtime'] = True
elif kwargs['realtime'] in ('n', 'N', 'no', 'No', 'NO',
'false', 'False', 'FALSE',
'off', 'Off', 'OFF'):
kwargs['realtime'] = False
else:
raise APIError('realtime is not a boolean', 422)
if action:
response['request']['action'] = action
if (entity, action) in self._entity_actions:
func, entity = self._entity_actions[(entity, action)]
result = func(**kwargs)
else:
raise EndpointNotFoundError(entity, action)
else:
if 'provider.id' in kwargs:
provider_id = kwargs.pop('provider.id')
if provider_id in self._providers:
provider = self._providers[provider_id]
entity_func = getattr(provider, entity, None)
else:
entity_func = Queryable(())
else:
entity_func = getattr(self, entity, None)
if entity_func is not None:
result = entity_func.where(**kwargs)
else:
raise EndpointNotFoundError(entity)
if limit:
result = itertools.islice(result, limit)
response[entity] = unexpand_init(result, to_expand)
except APIError as exc:
response['request']['status'] = 'error'
response['error'] = exc.msg
cherrypy.response.status = exc.error_code
return response
def help(self):
return {
'request': {
'status': 'help',
},
'_entities': EXPAND_TYPES.keys(),
'_actions': self._entity_actions.keys(),
}
def stops_find(self, **kwargs):
expected = ('latitude', 'longitude', 'distance')
if all(x in kwargs for x in expected):
for x in expected:
kwargs[x] = float(kwargs[x])
latlon = (kwargs['latitude'], kwargs['longitude'])
return super(Engine, self).stops.where(
lambda s: s.distance_to(latlon) <= kwargs['distance'])
else:
raise APIError('missing attributes: ' + ','.join(
x for x in expected if x not in kwargs), 422)
def routes_directions(self, **kwargs):
expected = ('route.id', 'provider.id')
missing = [x for x in expected if x not in kwargs]
if missing:
raise APIError('missing attributes: ' + ','.join(missing), 422)
provider = self._providers[kwargs['provider.id']]
route = provider.get(busbus.Route, kwargs['route.id'])
return route.directions
| spaceboats/busbus | busbus/web.py | Python | mit | 6,218 |
from aiohttp import web
from dvhb_hybrid.export.xlsx import XLSXResponse
async def handler1(request):
async with XLSXResponse(request, filename='1.xlsx') as r:
r.append({'x': 2, 'y': 3})
r.append({'x': 'a', 'y': 'f'})
return r
async def handler2(request):
head = ['Column X', 'Column Y']
fields = ['x', 'y']
async with XLSXResponse(request, head=head, fields=fields) as r:
r.append({'x': 2, 'y': 3})
r.append({'x': 'a', 'y': 'f'})
return r
async def test_xlsx(aiohttp_client, loop):
app = web.Application(loop=loop)
app.router.add_get('/xlsx1', handler1)
app.router.add_get('/xlsx2', handler2)
client = await aiohttp_client(app)
r = await client.get('/xlsx1')
assert r.status == 200
data = await r.read()
assert data
r = await client.get('/xlsx2')
assert r.status == 200
data = await r.read()
assert data
| dvhbru/dvhb-hybrid | tests/test_export_xlsx.py | Python | mit | 917 |
# SPDX-License-Identifier: MIT
# Copyright (C) 2019-2020 Tobias Gruetzmacher
# Copyright (C) 2019-2020 Daniel Ring
from .common import _ParserScraper
class ProjectFuture(_ParserScraper):
imageSearch = '//td[@class="tamid"]/img'
prevSearch = '//a[./img[@alt="Previous"]]'
def __init__(self, name, comic, first, last=None):
if name == 'ProjectFuture':
super(ProjectFuture, self).__init__(name)
else:
super(ProjectFuture, self).__init__('ProjectFuture/' + name)
self.url = 'http://www.projectfuturecomic.com/' + comic + '.php'
self.stripUrl = self.url + '?strip=%s'
self.firstStripUrl = self.stripUrl % first
if last:
self.url = self.stripUrl
self.endOfLife = True
@classmethod
def getmodules(cls):
return (
cls('AWalkInTheWoods', 'simeon', '1', last='12'),
cls('BenjaminBuranAndTheArkOfUr', 'ben', '00', last='23'),
cls('BookOfTenets', 'tenets', '01', last='45'),
cls('CriticalMass', 'criticalmass', 'cover', last='26'),
cls('DarkLordRising', 'darklord', '01-00', last='10-10'),
cls('Emily', 'emily', '01-00'),
cls('FishingTrip', 'fishing', '01-00'),
cls('HeadsYouLose', 'heads', '00-01', last='07-12'),
cls('NiallsStory', 'niall', '00'),
cls('ProjectFuture', 'strip', '0'),
cls('RedValentine', 'redvalentine', '1', last='6'),
cls('ShortStories', 'shorts', '01-00'),
cls('StrangeBedfellows', 'bedfellows', '1', last='6'),
cls('TheAxemanCometh', 'axeman', '01-01', last='02-18'),
cls('ToCatchADemon', 'daxxon', '01-00', last='03-14'),
cls('TheDarkAngel', 'darkangel', 'cover', last='54'),
cls('TheEpsilonProject', 'epsilon', '00-01'),
cls('TheHarvest', 'harvest', '01-00'),
cls('TheSierraChronicles', 'sierra', '0', last='29'),
cls('TheTuppenyMan', 'tuppenny', '00', last='16'),
cls('TurningANewPage', 'azrael', '1', last='54'),
)
| webcomics/dosage | dosagelib/plugins/projectfuture.py | Python | mit | 2,118 |
"""
Initialize the application.
"""
import logging
logger = logging.getLogger(__name__)
import appdirs
import click
import datetime
import distutils.dir_util
import os
import putiopy
import sqlite3
APP_NAME = 'putio-automator'
APP_AUTHOR = 'datashaman'
DIRS = appdirs.AppDirs(APP_NAME, APP_AUTHOR)
from .db import create_db, database_path
create_db()
def date_handler(obj):
"Date handler for JSON serialization"
if isinstance(obj, datetime.datetime) or isinstance(obj, datetime.date):
return obj.isoformat()
else:
return None
def find_config(verbose=False):
"Search for config on wellknown paths"
search_paths = [
os.path.join(os.getcwd(), 'config.py'),
os.path.join(DIRS.user_data_dir, 'config.py'),
os.path.join(DIRS.site_data_dir, 'config.py'),
]
config = None
for search_path in search_paths:
message = 'Searching %s' % search_path
logger.debug(message)
if verbose:
click.echo(message)
if os.path.exists(search_path) and not os.path.isdir(search_path):
config = search_path
break
return config
def echo(level, message):
log_func = getattr(logger, level)
log_func(message)
click.echo(message)
| datashaman/putio-automator | putio_automator/__init__.py | Python | mit | 1,262 |
#!/usr/bin/python
"""
Preferences Frame
Copyright (c) 2014, 2015 Andrew Hawkins
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
"""
Import Declarations
"""
import os
import pickle
import wx
from forsteri.interface import data as idata
"""
Constant Declarations
"""
"""
Frame Class
"""
class PreferencesFrame(wx.Frame):
"""
"""
def __init__(self, *args, **kwargs):
"""
"""
## Panel
# Initialize the parents constructor.
super(PreferencesFrame, self).__init__(*args, **kwargs)
# Create the master panel.
masterPanel = wx.Panel(self)
# Create the master sizer.
masterSizer = wx.BoxSizer(wx.VERTICAL)
## Reporting
# Create the reporting static box.
reportSB = wx.StaticBox(masterPanel, label="Reporting")
# Create the reporting sizer.
reportSizer = wx.StaticBoxSizer(reportSB, wx.VERTICAL)
# Create the first rows sizer.
row1Sizer = wx.BoxSizer(wx.HORIZONTAL)
# Create the label for the first row.
row1Label = wx.StaticText(masterPanel, label="Forecast Type")
# Create the list of choices for the first row.
choice1 = ["Auto", "MLR", "EMA", "Naive"]
# Create the object for the first row.
self.row1Obj = wx.ComboBox(masterPanel, size=(150, -1),
choices=choice1, style=wx.CB_READONLY)
# Add the contents to the row 1 sizer.
row1Sizer.Add(row1Label, flag=wx.ALIGN_CENTER|wx.RIGHT, border=5)
row1Sizer.Add(self.row1Obj, flag=wx.ALIGN_CENTER)
# Add all rows to the report sizer.
reportSizer.Add(row1Sizer, flag=wx.ALL, border=5)
#
## Finish Buttons
# Create the finish sizer.
finishSizer = wx.BoxSizer(wx.HORIZONTAL)
# Create the buttons.
okButton = wx.Button(masterPanel, id=wx.ID_OK)
cancelButton = wx.Button(masterPanel, id=wx.ID_CANCEL)
# Set the OK button to be the dafault button.
okButton.SetDefault()
# Add the buttons to the finish sizer.
finishSizer.AddMany([okButton, (5, 0), cancelButton, (5, 0)])
# Bind button presses to functions.
okButton.Bind(wx.EVT_BUTTON, self.onOK)
cancelButton.Bind(wx.EVT_BUTTON, self.onCancel)
## Panel Operations
# Add everything to the master sizer.
masterSizer.Add(reportSizer, flag=wx.ALL, border=5)
masterSizer.AddSpacer(9)
masterSizer.Add(wx.StaticLine(masterPanel, size=(585, 2)),
flag=wx.ALIGN_CENTER)
masterSizer.AddSpacer(9)
masterSizer.Add(finishSizer,
flag=wx.BOTTOM|wx.ALIGN_RIGHT, border=5)
# Load the prefernces.
self.loadPref()
# Set the sizer for the master panel.
masterPanel.SetSizer(masterSizer)
# Bind closing the frame to a function.
self.Bind(wx.EVT_CLOSE, self.onClose)
# Set window properties.
self.SetSize((600, 400))
self.SetTitle("Preferences")
self.Centre()
self.Show(True)
"""
Helper Functions
"""
def loadPref(self):
"""
"""
# Load the preferences from the pickle file.
pref = pickle.load(open(os.path.join(idata.DATA, "Forsteri",
"pref.p"), "rb"))
# Set all of the prefernce objects.
self.row1Obj.SetValue(pref["report_type"])
return True
def savePref(self):
"""
"""
# Initialize the preferences dictionary.
pref = {}
# Get all of the preference objects data.
pref["report_type"] = self.row1Obj.GetValue()
# Save the preferences into the pickle file.
pickle.dump(pref, open(os.path.join(idata.DATA, "Forsteri",
"pref.p"), "wb"))
return True
"""
Event Handlers
"""
def onOK(self, event):
"""
"""
# Save the preferences.
self.savePref()
self.Close()
def onCancel(self, event):
"""
"""
self.Close()
def onClose(self, event):
"""
"""
self.Destroy()
def main():
"""
When the file is called independently create and display the manager frame.
"""
app = wx.App()
PreferencesFrame(None, style=wx.DEFAULT_FRAME_STYLE)#^wx.RESIZE_BORDER)
app.MainLoop()
if __name__ == '__main__':
main()
| achawkins/Forsteri | forsteri/gui/window/preferences.py | Python | mit | 5,406 |
# -*- coding: utf-8 -*-
"""
The project pages map for project
"""
import io
import os
from optimus.builder.pages import PageViewBase
from optimus.conf import settings
from project import __version__ as sveetoy_version
from project.sitemap import PageSitemap, tree_from_directory_structure
from py_css_styleguide.model import Manifest
#sitemap_tree = tree_from_directory_structure(settings.TEMPLATES_DIR)
##sitemap_tree.show()
"""
Page objects
"""
class BasicPage(PageViewBase):
"""
Basic page view
"""
title = "Index"
template_name = "index.html"
destination = "index.html"
foundation_version = 6
def get_context(self):
context = super(BasicPage, self).get_context()
manifest = Manifest()
manifest_filepath = os.path.join(settings.SOURCES_DIR, 'css', 'styleguide_manifest.css')
with io.open(manifest_filepath, 'r') as fp:
manifest.load(fp)
context.update({
'styleguide': manifest,
'version': sveetoy_version,
'foundation_version': self.foundation_version,
})
return context
class PageWithSitemap(BasicPage):
"""
Page view aware of sitemap
"""
sitemap = {}
def get_context(self):
context = super(PageWithSitemap, self).get_context()
context.update({
'site_sitemap': self.sitemap,
})
return context
# Enabled pages to build
#PAGES = PageSitemap(sitemap_tree, PageWithSitemap).ressources
PAGES = [
BasicPage(),
#BasicPage(foundation_version=5, template_name="index_f5.html", destination="f5/index.html"),
]
| sveetch/Sveetoy | project/pages.py | Python | mit | 1,627 |
import unittest
import time
import random
import os
os.environ['USE_CALIENDO'] = 'True'
from caliendo.db import flatfiles
from caliendo.facade import patch
from caliendo.patch import replay
from caliendo.util import recache
from caliendo import Ignore
import caliendo
from test.api import callback
from test.api.callback import method_calling_method
from test.api.callback import method_with_callback
from test.api.callback import callback_for_method
from test.api.callback import CALLBACK_FILE
from test.api.callback import CACHED_METHOD_FILE
def run_n_times(func, n):
for i in range(n):
pid = os.fork()
if pid:
os.waitpid(pid, 0)
else:
func(i)
os._exit(0)
class ReplayTestCase(unittest.TestCase):
def setUp(self):
caliendo.util.register_suite()
recache()
flatfiles.CACHE_['stacks'] = {}
flatfiles.CACHE_['seeds'] = {}
flatfiles.CACHE_['cache'] = {}
with open(CALLBACK_FILE, 'w+') as f:
pass
with open(CACHED_METHOD_FILE, 'w+') as f:
pass
def test_replay(self):
def do_it(i):
@replay('test.api.callback.callback_for_method')
@patch('test.api.callback.method_with_callback')
def test(i):
cb_file = method_with_callback(callback_for_method, 0.5)
with open(cb_file, 'rb') as f:
contents = f.read()
assert contents == ('.' * (i+1)), "Got {0} was expecting {1}".format(contents, ('.' * (i+1)))
test(i)
os._exit(0)
for i in range(2):
pid = os.fork()
if pid:
os.waitpid(pid, 0)
else:
do_it(i)
with open(CACHED_METHOD_FILE, 'rb') as f:
assert f.read() == '.'
def test_replay_with_ignore(self):
def do_it(i):
@replay('test.api.callback.callback_for_method')
@patch('test.api.callback.method_with_callback', ignore=Ignore([1]))
def test_(i):
cb_file = method_with_callback(callback_for_method, random.random())
with open(cb_file, 'rb') as f:
contents = f.read()
assert contents == ('.' * (i+1)), "Got {0} was expecting {1}".format(contents, ('.' * (i+1)))
test_(i)
os._exit(0)
for i in range(2):
pid = os.fork()
if pid:
os.waitpid(pid, 0)
else:
do_it(i)
with open(CACHED_METHOD_FILE, 'rb') as f:
assert f.read() == '.'
if __name__ == '__main__':
unittest.main()
| buzzfeed/caliendo | test/test_replay.py | Python | mit | 2,701 |
import datetime
import httplib
import urllib
import os.path
import csv
import time
from datetime import timedelta
import pandas as pd
import numpy as np
def isfloat(value):
try:
float(value)
return True
except ValueError:
return False
def totimestamp(dt, epoch=datetime.date(1970,1,1)):
td = dt - epoch
# return td.total_seconds()
return (td.microseconds + (td.seconds + td.days * 86400) * 10**6) / 10**6
class stockImport(object):
def __init__(self
):
print ('setup stock importer')
def saveDate(self, date=None):
date_str = date.strftime("%m/%d/%Y")
print('{} finished'.format(date_str))
f = open('./twstock.tmp', 'w')
f.write(date_str)
def loadDate(self):
try:
f = open('./twstock.tmp', 'r')
date_str = f.readline()
#default set to 4 PM
return datetime.datetime.strptime(date_str + " 16:00:00", "%m/%d/%Y %H:%M:%S")
except IOError:
return datetime.datetime.strptime("1/1/2010 16:00:00", "%m/%d/%Y %H:%M:%S")
def downloadData(self):
start_day = datetime.date(2004, 2, 11);
today = datetime.date.today()
one_day = timedelta(days=1)
y, m, d, h, min, sec, wd, yd, i = datetime.datetime.now().timetuple()
end_time = today
if h > 16:
end_time = today + one_day
print "start download missing data"
print "checking from " + start_day.strftime("%Y-%m-%d") + " to " + today.strftime("%Y-%m-%d")
print "checking end time " + end_time.strftime("%Y-%m-%d")
download_date = start_day
while download_date < end_time:
file_name = "data/" + download_date.strftime("%Y%m%d") + ".csv"
if os.path.isfile(file_name):
download_date += one_day
continue
httpreq = httplib.HTTPConnection('www.twse.com.tw')
#http://www.twse.com.tw/exchangeReport/MI_INDEX?response=csv&date=20170526&type=ALL
#headers = {"Content-type": "application/x-www-form-urlencoded", "Accept": "text/plain"}
print
date_str = str(download_date.year - 1911 ) + download_date.strftime("/%m/%d")
form = urllib.urlencode({'download': 'csv', 'qdate': date_str, 'selectType': 'ALLBUT0999'})
#httpreq.request("POST", "/ch/trading/exchange/MI_INDEX/MI_INDEX.php", form, headers);
full_url = "exchangeReport/MI_INDEX?response=csv&date=" + download_date.strftime("%Y%m%d") + "&type=ALL"
print full_url
httpreq.request("GET", "http://www.twse.com.tw/exchangeReport/MI_INDEX?response=csv&date=" + download_date.strftime("%Y%m%d") + "&type=ALL");
httpres = httpreq.getresponse()
stock_csv = httpres.read()
print "downloading " + file_name
f = open(file_name, "w")
f.write(stock_csv)
download_date += one_day
def insertToStock(self, stockid, row, date):
try:
date_str = date.strftime("%Y-%m-%d")
df = pd.DataFrame.from_csv('bystock/' + stockid + '.csv')
#check if there is a key
df.loc[date_str].count()
#key already exist. skip it
except KeyError:
#no such key. insert it
df = pd.concat([df, row])
df.to_csv('bystock/' + stockid + '.csv')
#print df
except IOError:
print('stock id: {} not exist'.format(stockid))
row.to_csv('bystock/' + stockid + '.csv')
def prepcsv(self, csv):
ret = []
for i in csv:
tmp = i
tmp = tmp.replace(',', '')
tmp = tmp.replace('\'', '')
tmp = tmp.replace('\"', '')
tmp = tmp.replace('=', '')
ret.append(tmp)
return ret
def convertCSV(self, file_path=None, date=None):
print('convert csv {}'.format(file_path))
with open(file_path, 'rb') as csvfile:
spamreader = csv.reader(csvfile, delimiter=',', quotechar='"')
for row in spamreader:
if len(row) < 16:
#abnormal some column missing?
continue
#if len(row) == 17:
#abnormal should not more than 16 column
#print(row)
if len(row) == 17:
stockid=row[0].replace('=', '')
stockid=stockid.replace('"', '')
stockid=stockid.strip()
if stockid.startswith('('):
continue
checkrow = row[11].replace(',', '')
checkrow = checkrow.replace('"', '')
checkrow = checkrow.replace('=', '')
if not checkrow[0].isdigit():
#skip column title
continue
row = self.prepcsv(row)
TV=int(row[2])
TC=int(row[3])
TO=int(row[4])
RD=row[9]
if RD == '+':
DF=float(row[10])
RD=1
elif RD == '-':
DF=0-float(row[10])
RD=-1
else:
DF=0
RD=0
PE=float(row[15])
try:
OP=float(row[5])
CP=float(row[8])
HP=float(row[6])
LP=float(row[7])
except ValueError:
OP=None
CP=None
HP=None
LP=None
#print('OP:{} CP:{} HP:{} LP:{} DF:{} RD:{} TV:{} TC:{} TO:{}\n'.format( OP, CP, HP, LP, DF, RD, TV, TC, TO))
cols = ['OP', 'CP', 'HP', 'LP', 'DF', 'RD', 'TV', 'TC', 'TO']
date_index = pd.date_range(date.strftime("%m/%d/%Y"), periods=1)
df1 = pd.DataFrame([[OP, CP, HP, LP, DF, RD, TV, TC, TO]], columns=cols)
df1['date'] = date_index
df1 = df1.set_index(['date'])
#print stockid
#print df1
self.insertToStock(stockid, df1, date)
self.saveDate(date)
def getExpectCP(self, df, date):
today = datetime.date.today()
one_day = timedelta(days=1)
if date > today:
#print "over today"
#print date.strftime("%Y-%m-%d")
return None
try:
date_str = date.strftime("%Y-%m-%d")
return df.loc[date_str, 'CP']
except KeyError as e:
return self.getExpectCP(df, date + one_day)
def loadTrainDataById(self, stock_id, start_date, days, expect):
one_day = timedelta(days=1)
stop_date = start_date + one_day * days
expect_date = start_date + one_day * (days + expect)
today = datetime.date.today()
if stop_date > today:
return None
try:
start_date_str = start_date.strftime("%Y-%m-%d")
stop_date_str = stop_date.strftime("%Y-%m-%d")
expect_date_str = expect_date.strftime("%Y-%m-%d")
df = pd.DataFrame.from_csv('bystock/' + stock_id + '.csv')
print "from:" + start_date_str + " to:" + stop_date_str
dft = df.loc[start_date_str:stop_date_str]
#print dft.as_matrix()
#print dft.reset_index().values
dfcp = df.loc[start_date_str:stop_date_str, 'CP']
#print df.loc[start_date_str:expect_date_str, 'CP']
expcp = self.getExpectCP(df, expect_date)
if expcp == None:
return
#print dfcp
print 'max during train:' + str(dfcp.max())
print str(expect) + ' days ' + expect_date_str + ' close price' + str(expcp)
if expcp > dfcp.max():
print 'up'
else:
print 'down'
except KeyError as e:
print "out of range , try next day"
except IOError:
print "no such stock id"
def loadTrainDataByIdFixedRow(self, stock_id, start_date, days, expect):
one_day = timedelta(days=1)
stop_date = start_date + one_day * days
expect_date = start_date + one_day * (days + expect)
today = datetime.date.today()
res = 0
if stop_date > today:
return None, None
try:
start_date_str = start_date.strftime("%Y-%m-%d")
stop_date_str = stop_date.strftime("%Y-%m-%d")
expect_date_str = expect_date.strftime("%Y-%m-%d")
today_date_str = datetime.date.today()
#read data frame from stock file
df = pd.DataFrame.from_csv('bystock/' + stock_id + '.csv')
#print "from:" + start_date_str + " to:" + stop_date_str
#count total record from start to now, check if data record is enough
dft = df.loc[start_date_str:today_date_str]
if dft['CP'].count() < days + expect:
print 'data is not enough for train or validate'
return None, None
#retrive enough data record days + expect days
dft = dft[:days + expect]
#get the expect date data record
expcpdf = dft.tail(1)
#print dft
#print dft[:days]
#print dft.as_matrix()
#print dft.reset_index().values
#first n days data record for training
dfcp = dft[:days]
#convert to matrix
data = dfcp.as_matrix()
#get expected close price
expcp = expcpdf['CP'].max()
#get max close price in training data
tmax = dft[:days]['CP'].max()
#print 'last price:' + str(expcpdf['CP'])
#print 'max during train:' + str(tmax)
#print str(expect) + ' days close price:' + str(expcp)
if expcp > tmax:
res = 1
#print 'up'
else:
res = 0
#print 'down'
return data, res
except KeyError as e:
print "out of range , try next day"
except IOError:
print "no such stock id"
def loadAllTrainDataByStock(self, stock_id, start_date, days, expect):
today = datetime.date.today()
one_day = timedelta(days=1)
da = start_date
X = []
Y = []
while da < today:
da = da + one_day
d, r = self.loadTrainDataByIdFixedRow(stock_id, da, days, expect)
if d is None:
break
x = sum(d.tolist(), [])
X.append(x)
Y.append(r)
#print("---------------------------------------------")
#print x
#print("---------------------------------------------")
#print r
return X,Y
| kaija/tw-stock | stock.py | Python | mit | 11,215 |
import psycopg2
import urlparse
import os
def server_db():
urlparse.uses_netloc.append("postgres")
url = urlparse.urlparse(os.environ["DATABASE_URL"])
conn = psycopg2.connect(database=url.path[1:], user=url.username, password=url.password, host=url.hostname, port=url.port)
cur = conn.cursor()
return cur
def local_db():
conn = psycopg2.connect(host="", user="", password="", dbname="")
cur = conn.cursor()
return cur
| debasishbai/django_blog | blog/database_config.py | Python | mit | 454 |
__author__ = 'SM'
import sys
import
# from tkinter import *
# #create new window
# root = Tk()
# root.title("Hello world app")
# root.geometry('200x85')
#
# app = Frame()
# app.grid()
# lbl = Label(app, text = "Hello World!")
# lbl.grid()
#
# bttn1 = Button(app, text = "Press")
# bttn1.grid()
#
# root.mainloop()
from qt import *
a = QApplication(sys.argv)
# Our function to call when the button is clicked
def sayHello():
print("Hello world")
# Instantiate the button
hellobutton = QPushButton("Say 'Hello world!'",None)
# And connect the action "sayHello" to the event "button has been clicked"
a.connect(hellobutton, SIGNAL("clicked()"), sayHello)
# The rest is known already...
a.setMainWidget(hellobutton)
hellobutton.show()
a.exec_loop() | sevmardi/University-of-Applied-Sciences-Leiden | Python/ifscp_Opdrachten/Opdracht1.py | Python | mit | 758 |
#!/usr/bin/env python3
# https://docs.python.org/3/library/modulefinder.html
from modulefinder import ModuleFinder
finder = ModuleFinder()
finder.run_script('graph1.py')
print('Loaded modules:')
for name, mod in finder.modules.items():
print('%s: ' % name, end='')
print(','.join(list(mod.globalnames.keys())[:3]))
print('-'*50)
print('Modules not imported:')
print('\n'.join(finder.badmodules.keys()))
| jtraver/dev | python3/graphics/modulefinder1.py | Python | mit | 416 |
"""
SUPPRESS-GO-AHEAD
This supports suppressing or activating Evennia
the GO-AHEAD telnet operation after every server reply.
If the client sends no explicit DONT SUPRESS GO-AHEAD,
Evennia will default to supressing it since many clients
will fail to use it and has no knowledge of this standard.
It is set as the NOGOAHEAD protocol_flag option.
http://www.faqs.org/rfcs/rfc858.html
"""
from builtins import object
SUPPRESS_GA = chr(3)
# default taken from telnet specification
# try to get the customized mssp info, if it exists.
class SuppressGA(object):
"""
Implements the SUPRESS-GO-AHEAD protocol. Add this to a variable on the telnet
protocol to set it up.
"""
def __init__(self, protocol):
"""
Initialize suppression of GO-AHEADs.
Args:
protocol (Protocol): The active protocol instance.
"""
self.protocol = protocol
self.protocol.protocol_flags["NOGOAHEAD"] = True
# tell the client that we prefer to suppress GA ...
self.protocol.will(SUPPRESS_GA).addCallbacks(self.do_suppress_ga, self.dont_suppress_ga)
# ... but also accept if the client really wants not to.
self.protocol.do(SUPPRESS_GA).addCallbacks(self.do_suppress_ga, self.dont_suppress_ga)
def dont_suppress_ga(self, option):
"""
Called when client requests to not suppress GA.
Args:
option (Option): Not used.
"""
self.protocol.protocol_flags["NOGOAHEAD"] = True
self.protocol.handshake_done()
def do_suppress_ga(self, option):
"""
Client wants to suppress GA
Args:
option (Option): Not used.
"""
self.protocol.protocol_flags["NOGOAHEAD"] = True
self.protocol.handshake_done()
| whitehorse-io/encarnia | evennia/evennia/server/portal/suppress_ga.py | Python | mit | 1,804 |
from django.test import TestCase, Client
from jpspapp.models import Club, Activity,UserProfile
from django.contrib.auth.models import User
from django.contrib.auth import authenticate, login
import datetime
# Create your tests here.
class ClubTestCase(TestCase):
def setUp(self):
User.objects.create_user(username="clubtest", email='123@123.com', password='jp123456')
Club.objects.create(ClubObject=User.objects.get(username='clubtest'), ClubName="测试社团", ClubId=601, Type='1',
ShezhangName="社长", ShezhangQq="12345678", ShezhangGrade='1', ShezhangClass='1',
IfRecruit=True, EnrollGroupQq='12345678')
def test_club_update(self):
club = Club.objects.get(ClubName="测试社团")
club.ShezhangName = "社长姓名"
club.save()
self.assertEqual(club.ShezhangName, "社长姓名")
def test_club_del(selfs):
club = Club.objects.get(ClubName="测试社团")
club.delete()
user = User.objects.get(username="clubtest")
user.delete()
class ActivityModelTest(TestCase):
def setUp(self):
User.objects.create_user(username="clubtest", email='123@123.com', password='jp123456')
Club.objects.create(ClubObject=User.objects.get(username='clubtest'), ClubName="测试社团", ClubId=601, Type='1',
ShezhangName="社长", ShezhangQq="12345678", ShezhangGrade='1', ShezhangClass='1',
IfRecruit=True, EnrollGroupQq='12345678')
Activity.objects.create(Name="活动名称", Region="活动地点", ClubObject=Club.objects.get(ClubName="测试社团"),
Content="活动内容", Date1=datetime.datetime.now(),
Date2=datetime.datetime.now() + datetime.timedelta(days=1), State='0', Type='普通')
def test_update(self):
activity = Activity.objects.get(Name="活动名称")
activity.Content = "活动内容测试"
activity.save()
self.assertEqual(activity.Content, '活动内容测试')
def test_delete(self):
Activity.objects.get(Region="活动地点").delete()
Club.objects.get(ShezhangName='社长').delete()
User.objects.get(username='clubtest').delete()
class UserProfileModelTest(TestCase):
def setUp(self):
User.objects.create(username='userprofiletest',email='123@123.com',password='jp123456')
UserProfile.objects.create(UserObject=User.objects.get(username='userprofiletest'),UserName='测试用户',Class=1,Grade=1,AttendYear='2017',QQ='12345678',Phone='12345678901',Email='123@123.com')
def test_update(self):
user = UserProfile.objects.get(UserName='测试用户')
user.Class= 2
user.save()
self.assertEqual(user.Class,2)
def test_delete(self):
user = UserProfile.objects.get(UserName='测试用户')
user.delete()
class UserModelTest(TestCase):
def create(self):
pass
def update(selfs):
pass
def delete(self):
pass
class PostModelTest(TestCase):
def test(self):
pass
def update(selfs):
pass
def delete(self):
pass
| AlienStudio/jpsp_python | jpsp/jpspapp/tests.py | Python | mit | 3,241 |
from __future__ import print_function
import os
import json
import base64
import psycopg2
pg_host = os.getenv('PGHOST', "172.17.0.1")
pg_user = os.getenv('PGUSER', "postgres")
pg_password = os.getenv('PGPASSWORD', "root")
pg_database = os.getenv('PGDATABASE', "db_server")
pg_port = os.getenv('PGPORT', "5432")
print('Loading function:' + pg_host)
def createOperation(row):
sql = None
operation = row["operation"]
if operation == "insert":
keys = []
pairs = []
for key, value in row["payload"].iteritems():
keys.append(key)
if type(value) is object:
pairs.append("'" + json.dumps(value) + "'")
elif type(value) is unicode:
pairs.append("'" + value + "'")
else:
pairs.append(str(value))
columns = ','.join(keys)
values = ','.join(pairs)
sql = "INSERT INTO " + row["table"] + " (" + columns + ") VALUES (" + values + ")"
elif operation == "update":
pairs = []
for key, value in row["payload"].iteritems():
if key == "id":
continue
if type(value) is object:
pairs.append(key + "='" + json.dumps(value) + "'")
elif type(value) is unicode:
pairs.append(key + "='" + value + "'")
else:
pairs.append(key + "=" + str(value))
values = ','.join(pairs)
payload_id = row["payload"]["id"]
sql = "UPDATE " + row["table"] + " SET " + values + " WHERE id=" + str(payload_id)
elif operation == "delete":
payload_id = row["payload"]["id"]
sql = "DELETE FROM " + row["table"] + " WHERE id=" + str(payload_id)
return sql
pg_conn = psycopg2.connect("user='" + pg_user + "' \
host='" + pg_host + "' password='" + pg_password + "' dbname='" + pg_database + "' port=" + pg_port)
print("Connection done: " + pg_database)
Records = [{
"table": "table_core_msisdns",
"timestamp": 1503171224178,
"operation": "insert",
"payload": {
"id": 37699,
"membership_no": "Z-1534328463-1",
"msisdn": "1913263343"
}
}, {
"table": "table_core_msisdns",
"timestamp": 1503171224178,
"operation": "update",
"payload": {
"id": 37699,
"membership_no": "Z-1534328464-1",
"msisdn": "1913263343"
}
}, {
"table": "table_core_msisdns",
"timestamp": 1503171224178,
"operation": "delete",
"payload": {
"id": 37699
}
}]
for record in Records:
payload = json.dumps(record)
print("Decoded payload: " + payload)
row = json.loads(payload)
sql = createOperation(row)
if sql is not None:
print(sql)
try:
cur = pg_conn.cursor()
cur.execute(sql)
cur.close()
pg_conn.commit()
print("Succeed")
except Exception as ex:
pg_conn.rollback()
print(ex.message)
| mdmamunhasan/pgsync | lmdtest.py | Python | mit | 3,012 |
import _plotly_utils.basevalidators
class HoverlabelValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(self, plotly_name="hoverlabel", parent_name="scattercarpet", **kwargs):
super(HoverlabelValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Hoverlabel"),
data_docs=kwargs.pop(
"data_docs",
"""
align
Sets the horizontal alignment of the text
content within hover label box. Has an effect
only if the hover label text spans more two or
more lines
alignsrc
Sets the source reference on Chart Studio Cloud
for `align`.
bgcolor
Sets the background color of the hover labels
for this trace
bgcolorsrc
Sets the source reference on Chart Studio Cloud
for `bgcolor`.
bordercolor
Sets the border color of the hover labels for
this trace.
bordercolorsrc
Sets the source reference on Chart Studio Cloud
for `bordercolor`.
font
Sets the font used in hover labels.
namelength
Sets the default length (in number of
characters) of the trace name in the hover
labels for all traces. -1 shows the whole name
regardless of length. 0-3 shows the first 0-3
characters, and an integer >3 will show the
whole name if it is less than that many
characters, but if it is longer, will truncate
to `namelength - 3` characters and add an
ellipsis.
namelengthsrc
Sets the source reference on Chart Studio Cloud
for `namelength`.
""",
),
**kwargs
)
| plotly/plotly.py | packages/python/plotly/plotly/validators/scattercarpet/_hoverlabel.py | Python | mit | 2,062 |
"""Tests if it "compiles" (except that it doesn't).
Also: trivial usage example.
"""
from da import Node, Network
import topo
class MyNode(Node):
def run(self):
self.send(0, self.ID)
self.send(1, self.ID)
p, m = self.recv()
p, m = self.recv()
self.log('terminating', m, p)
def run(n):
net = Network(MyNode, topo.C(n))
net.run()
if __name__ == '__main__':
run(47)
| AnotherKamila/distributed-algorithms-emulator | examples/test.py | Python | mit | 424 |
import os
from inspect import signature
from . import db
from . import settings
class Entry:
def __init__(self, id_=None, author=None, date=None, body=None, body_html=None, url=None,
plus=None, media_url=None, tags=None, is_nsfw=None, entry_id=None, type_=None):
self.id_ = id_
self.author = author
self.date = date
self.body = body
self.body_html = body_html
self.url = url
self.plus = plus
self.media_url = media_url
self.tags = tags
self.is_nsfw = is_nsfw
self.entry_id = entry_id # only for comment
self.type_ = type_
def __iter__(self):
return self.attrs_gen()
def attrs_gen(self):
attrs = list(signature(self.__init__).parameters.keys()) # attributes from __init__()
return (getattr(self, attr) for attr in attrs[:11])
def __str__(self):
if self.entry_id:
return '{}_{}'.format(self.entry_id, self.id_)
return str(self.id_)
def download_info(self):
return {
'id_': self.__str__(),
'media_url': self.media_url,
'is_nsfw': self.is_nsfw,
'local_file_path': self.local_file_path,
}
@property
def comments_count(self):
if not self.entry_id: # if entry_id is not none it's a comment
return db.DB.count_comments(self.id_)
@property
def media_ext(self):
if self.media_url:
_, ext = os.path.splitext(self.media_url)
if len(ext) > 4 and '?' in ext: # fix for urls with '?'
ext = ext.split('?')[0]
elif not ext and 'gfycat.com' in self.media_url:
ext = '.webm'
return ext
else:
return None
@property
def local_file_path(self):
path = settings.FILES_DIR_NAME
ext = self.media_ext
if self.media_url and ext:
if self.is_nsfw:
path = os.path.join(path, settings.NSFW_DIR_NAME)
if self.entry_id: # it's a comment
return os.path.join(path, settings.COMMENTS_DIR_NAME,
'{}_{}{}'.format(self.entry_id, self.id_, ext))
return os.path.join(path, '{}{}'.format(self.id_, ext))
return ''
| kosior/taktyk | taktyk/entry.py | Python | mit | 2,324 |
class PluginBase(object):
name = ''
doc = 'doc about this class'
methods_subclass = {}
def __init__(self, **kwargs):
self.methods = {
'help': 'doc about help method',
'get_methods': 'doc about get_methods method'
}
self.methods.update(self.methods_subclass)
def on_import(self, term_system):
pass
def get_methods(self):
return [key for key in self.methods]
def help(self, method_name):
doc = self.methods.get(method_name[0], None)
if doc:
ret = doc
else:
ret = '# %s: %s: %s not found' % (
self.name, 'help', method_name)
return ret
@staticmethod
def get_args_kwargs_from_text(text):
start_str = (None, -1)
strings_found = []
kwargs_found = {}
args_found = []
for i, char in enumerate(text):
if char in ("'", '"'):
if start_str[0]:
if char == start_str[0]:
rev = text[:i+1][::-1]
b = rev[i+1 - start_str[1]:].find(' ')
if b != -1:
strings_found.append((start_str[1] - b, i+1))
else:
strings_found.append((start_str[1], i+1))
start_str = (None, -1)
else:
start_str = (char, i)
if strings_found:
last_end = 0
for start, end in strings_found:
before = text[last_end:start]
for x in before.split(' '):
if x:
args_found.append(x)
args_found.append(text[start:end])
last_end = end
for x in text[end:].split(' '):
if x:
args_found.append(x)
else:
args_found = text.split(' ')
remlist = []
for i, x in enumerate(args_found):
a = x.find('=')
if a != -1:
yes = False
c = x.find("'")
b = x.find('"')
if b == -1 and c == -1:
yes = True
else:
start = b
if c != -1 and c < b:
start = c
a = x[:start].find('=')
if a != -1:
yes = True
if yes:
kwargs_found[x[:a]] = x[a+1:]
remlist.append(i)
for x in reversed(remlist):
del args_found[x]
return args_found, kwargs_found
@staticmethod
def get_from_locals_globals(term_system, text):
ret = term_system.exec_locals.get(text, None)
if not ret:
ret = term_system.get_globals().get(text, None)
return ret
@staticmethod
def slice_fname(text):
fname = ''
text2 = ''
args = ''
if text:
b = text.find(' ')
if b != -1:
text2 = text[b+1:]
fname = text[:b]
else:
fname = text
return fname, text2
@staticmethod
def get_method_args(text):
fname = ''
method = ''
args = []
if text:
aspl = text.split(' ')
fname = aspl[0]
if len(aspl) > 1:
method = aspl[1]
if len(aspl) > 2:
args = aspl[2:]
return fname, method, tuple(args)
@staticmethod
def get_method_args_kwargs(text):
fname, method, args, kwargs = '', '', [], {}
if text:
aspl = text.split(' ')
fname = aspl[0]
if len(aspl) > 1:
method = aspl[1]
if len(aspl) > 2:
args, kwargs = PluginBase.get_args_kwargs_from_text(
' '.join(aspl[2:]))
return fname, method, tuple(args), kwargs
def handle_input(self, term_system, term_globals, exec_locals, text):
fname, method, args, kwargs = self.get_method_args_kwargs(text)
found = False
if method in self.methods:
m = getattr(self, method, None)
if m:
found = True
if args and kwargs:
result = m(*args, **kwargs)
elif args:
result = m(*args)
elif kwargs:
result = m(**kwargs)
else:
result = m()
if not found:
result = (
'# %s: Method "%s" not found\n'
'# Available methods are %s\n'
'# Type "help [method_name]" for help') % (
self.name, method, self.get_methods())
return result
| Bakterija/mmplayer | mmplayer/kivy_soil/terminal_widget/plugins/_base.py | Python | mit | 4,885 |
from flask import Flask
app = Flask(__name__)
app.config.from_object("configs.appconfig.DevelopmentConfig")
| oyang/testFalsk | app.py | Python | mit | 109 |