repo_name
stringlengths 5
92
| path
stringlengths 4
221
| copies
stringclasses 19
values | size
stringlengths 4
6
| content
stringlengths 766
896k
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 32
997
| alpha_frac
float64 0.25
0.96
| autogenerated
bool 1
class | ratio
float64 1.5
13.6
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
mitodl/ccxcon
|
courses/models_test.py
|
1
|
2788
|
"""
Tests for Models
"""
import json
from django.test import TestCase
from django.contrib.auth.models import User
from .factories import CourseFactory, ModuleFactory
from courses.models import Course, Module, UserInfo
# pylint: disable=no-self-use
class CourseTests(TestCase):
"""
Tests for Course
"""
def test_tostring(self):
"""
Test behavior of str(Course)
"""
assert str(Course(title='test')) == 'test'
def test_towebhook(self):
"""
test to_webhook implementation returns valid json object
"""
course = CourseFactory.create()
out = course.to_webhook()
json.dumps(out) # Test to ensure it's json dumpable.
ex_pk = out['external_pk']
assert out['instance'] == course.edx_instance.instance_url
assert out['course_id'] == course.course_id
assert out['author_name'] == course.author_name
assert out['overview'] == course.overview
assert out['description'] == course.description
assert out['image_url'] == course.image_url
assert out['instructors'] == [str(instructor) for instructor in course.instructors.all()]
assert isinstance(ex_pk, str)
assert '-' in ex_pk
class ModuleTests(TestCase):
"""
Tests for Module
"""
def test_tostring(self):
"""
Test behavior of str(Module)
"""
assert str(Module(title='test')) == 'test'
def test_ordering(self):
"""
Test module ordering is by course/order.
"""
c1 = CourseFactory.create()
c2 = CourseFactory.create()
# Intentionally not in created in course-order so we can validate it's
# not by id.
m10 = ModuleFactory.create(course=c1, order=0)
m21 = ModuleFactory.create(course=c2, order=1)
m20 = ModuleFactory.create(course=c2, order=0)
m11 = ModuleFactory.create(course=c1, order=1)
result = [x.id for x in Module.objects.all()]
assert result == [m10.id, m11.id, m20.id, m21.id]
def test_towebhook(self):
"""
test to_webhook implementation returns valid json object
"""
module = ModuleFactory.build()
web_out = module.to_webhook()
json.dumps(web_out) # Test to ensure it's json dumpable.
assert web_out['instance'] == module.course.edx_instance.instance_url
for k in ('external_pk', 'course_external_pk'):
assert isinstance(web_out[k], str)
assert '-' in web_out[k]
class UserInfoTests(TestCase):
"""
Tests for UserInfo
"""
def test_tostring(self):
"""
Test behavior of str(UserInfo)
"""
assert str(UserInfo(user=User(username='test'))) == 'Profile for test'
|
agpl-3.0
| 3,154,952,932,236,635,600
| 29.977778
| 97
| 0.602582
| false
| 3.965861
| true
| false
| false
|
brianmckenna/sci-wms
|
wmsrest/views.py
|
1
|
4583
|
# -*- coding: utf-8 -*-
from rest_framework.views import APIView
from rest_framework.permissions import IsAuthenticatedOrReadOnly
from wms.models import Dataset, Layer, VirtualLayer, Variable
from wmsrest.serializers import DatasetSerializer, SGridDatasetSerializer, UGridDatasetSerializer, RGridDatasetSerializer, LayerSerializer, VirtualLayerSerializer, VariableSerializer
from rest_framework.response import Response
from rest_framework import status
from rest_framework import mixins
from rest_framework import generics
from django.http import Http404
class DatasetList(APIView):
"""
List all datasets, or create a new dataset.
"""
def get(self, request, format=None):
snippets = Dataset.objects.select_related().all()
serializer = DatasetSerializer(snippets, many=True)
return Response(serializer.data)
def post(self, request, format=None):
if 'ugrid' in request.data['type']:
request.data['type'] = 'wms.ugriddataset'
serializer = UGridDatasetSerializer(data=request.data)
elif 'sgrid' in request.data['type']:
request.data['type'] = 'wms.sgriddataset'
serializer = SGridDatasetSerializer(data=request.data)
elif 'rgrid' in request.data['type']:
request.data['type'] = 'wms.rgriddataset'
serializer = RGridDatasetSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class DatasetDetail(APIView):
"""
Get or update a specific Sci-WMS dataset.
Supports GET, PUT, DELETE, and PATCH methods.
A DELETE on a dataset with a defined m2m relationship
to another table will also delete that relationship.
PUT and PATCH requests with a defined m2m relations
to another table will be updated accordingly.
"""
permission_classes = (IsAuthenticatedOrReadOnly,)
queryset = Dataset.objects.all()
serializer_class = DatasetSerializer
def get_object(self, pk):
try:
return Dataset.objects.get(pk=pk)
except Dataset.DoesNotExist:
raise Http404
def get(self, request, pk, format=None):
dataset = self.get_object(pk)
serializer = DatasetSerializer(dataset)
return Response(serializer.data)
def put(self, request, pk, format=None):
dataset = self.get_object(pk)
if 'ugrid' in request.data['type']:
request.data['type'] = 'wms.ugriddataset'
serializer = UGridDatasetSerializer(dataset, data=request.data)
elif 'sgrid' in request.data['type']:
request.data['type'] = 'wms.sgriddataset'
serializer = SGridDatasetSerializer(dataset, data=request.data)
elif 'rgrid' in request.data['type']:
request.data['type'] = 'wms.rgriddataset'
serializer = RGridDatasetSerializer(dataset, data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def delete(self, request, pk, format=None):
dataset = self.get_object(pk)
dataset.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
class LayerDetail(generics.RetrieveUpdateAPIView):
permission_classes = (IsAuthenticatedOrReadOnly,)
serializer_class = LayerSerializer
queryset = Layer.objects.all()
class VirtuallLayerDetail(generics.RetrieveUpdateAPIView):
permission_classes = (IsAuthenticatedOrReadOnly,)
serializer_class = VirtualLayerSerializer
queryset = VirtualLayer.objects.all()
class DefaultDetail(generics.RetrieveUpdateAPIView):
permission_classes = (IsAuthenticatedOrReadOnly,)
serializer_class = VariableSerializer
queryset = Variable.objects.all()
class DefaultList(APIView):
"""
List all datasets, or create a new dataset.
"""
def get(self, request, format=None):
snippets = Variable.objects.all()
serializer = VariableSerializer(snippets, many=True)
return Response(serializer.data)
def post(self, request, format=None):
serializer = VariableSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
|
gpl-3.0
| -4,777,197,042,386,780,000
| 36.876033
| 182
| 0.690814
| false
| 4.279178
| false
| false
| false
|
uclapi/uclapi
|
backend/uclapi/timetable/migrations/0010_auto_20190220_1835.py
|
1
|
3406
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.18 on 2019-02-20 18:35
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('timetable', '0009_coursea_courseb'),
]
operations = [
migrations.AlterField(
model_name='modulegroupsa',
name='csize',
field=models.IntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='modulegroupsa',
name='estsize',
field=models.IntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='modulegroupsa',
name='groupnum',
field=models.IntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='modulegroupsa',
name='maxsize',
field=models.IntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='modulegroupsa',
name='mequivid',
field=models.IntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='modulegroupsa',
name='minsize',
field=models.IntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='modulegroupsa',
name='parentkey',
field=models.IntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='modulegroupsa',
name='prefmaxsize',
field=models.IntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='modulegroupsa',
name='thiskey',
field=models.IntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='modulegroupsb',
name='csize',
field=models.IntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='modulegroupsb',
name='estsize',
field=models.IntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='modulegroupsb',
name='groupnum',
field=models.IntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='modulegroupsb',
name='maxsize',
field=models.IntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='modulegroupsb',
name='mequivid',
field=models.IntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='modulegroupsb',
name='minsize',
field=models.IntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='modulegroupsb',
name='parentkey',
field=models.IntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='modulegroupsb',
name='prefmaxsize',
field=models.IntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='modulegroupsb',
name='thiskey',
field=models.IntegerField(blank=True, null=True),
),
]
|
mit
| 4,278,143,463,914,907,600
| 31.438095
| 61
| 0.551674
| false
| 4.553476
| false
| false
| false
|
AlexanderSavelyev/rdkit
|
rdkit/Chem/test_list.py
|
1
|
2106
|
tests=[
("python","UnitTestChem.py",{}),
("python","UnitTestChemv2.py",{}),
("python","UnitTestChemAtom.py",{}),
("python","UnitTestChemBond.py",{}),
("python","UnitTestChemSmarts.py",{}),
("python","UnitTestFragmentDescriptors.py",{}),
("python","UnitTestGraphDescriptors.2.py",{}),
("python","UnitTestLipinski.py",{}),
("python","MCS.py",{}),
("python","UnitTestMCS.py",{}),
("python","UnitTestOldBugs.py",{}),
("python","UnitTestSATIS.py",{}),
("python","UnitTestSmiles.py",{}),
("python","UnitTestSuppliers.py",{}),
("python","UnitTestSurf.py",{}),
("python","UnitTestMol3D.py",{}),
("python","FragmentMatcher.py",{}),
("python","MACCSkeys.py",{}),
("python","Descriptors.py",{}),
("python","UnitTestCatalog.py",{}),
("python","TemplateAlign.py",{}),
("python","Recap.py",{}),
("python","BRICS.py",{}),
("python","UnitTestDescriptors.py",{}),
("python","AllChem.py",{}),
("python","PropertyMol.py",{}),
("python","UnitTestInchi.py",{}),
("python","SaltRemover.py",{}),
("python","UnitTestFunctionalGroups.py",{}),
("python","UnitTestCrippen.py",{}),
("python","__init__.py",{}),
("python","PandasTools.py",{}),
("python","test_list.py",{'dir':'AtomPairs'}),
("python","test_list.py",{'dir':'ChemUtils'}),
("python","test_list.py",{'dir':'EState'}),
("python","test_list.py",{'dir':'FeatMaps'}),
("python","test_list.py",{'dir':'Fingerprints'}),
("python","test_list.py",{'dir':'Pharm2D'}),
("python","test_list.py",{'dir':'Pharm3D'}),
#("python","test_list.py",{'dir':'Subshape'}),
("python","test_list.py",{'dir':'Suppliers'}),
("python","test_list.py",{'dir':'Scaffolds'}),
("python","test_list.py",{'dir':'Draw'}),
("python","test_list.py",{'dir':'Fraggle'}),
("python","test_list.py",{'dir':'SimpleEnum'}),
]
longTests=[
("python","UnitTestArom.py",{}),
("python","UnitTestGraphDescriptors.2.py -l",{}),
("python","UnitTestSurf.py -l",{}),
]
if __name__=='__main__':
import sys
from rdkit import TestRunner
failed,tests = TestRunner.RunScript('test_list.py',0,1)
sys.exit(len(failed))
|
bsd-3-clause
| -6,715,822,941,456,160,000
| 32.967742
| 57
| 0.583571
| false
| 2.929068
| true
| true
| false
|
bjamesv/pyweatherviz
|
daily_json_to_dict.py
|
1
|
4201
|
import api_info
from dateutil.parser import parse
import requests
import json
import pandas as pd
import logging
map_ghcn_by_date_tuple = {}
#dictionary, caching fully downloaded/parsed GHCN in memory
def get_ncei_daily_climate_dicts( date_start, date_xend):
"""
obtain daily Global Historical Climatology Network data, via disk cache
or NCEI web API registered developer token.
"""
# get climate dict from this module's in-memory cache
requested_period = (date_start,date_xend)
try:
ghcn_rows = map_ghcn_by_date_tuple[ requested_period ]
logging.info('Using inmemory NCEI data: {}'.format(requested_period))
except KeyError:
# fall back to disk cache, or NCEI RESTful api
list_raw_dicts = _get_list_ncei_daily_climate( date_start, date_xend)
# build dicts, & return the collection.
ghcn_rows = _get_daily_climate_dicts( list_raw_dicts)
# add to module's in-memory cache
map_ghcn_by_date_tuple[ requested_period] = ghcn_rows
return ghcn_rows
def _get_list_ncei_daily_climate( date_start, date_xend):
"""
returns collection of dicts, representing raw daily Global Historical
Climatology Network data.
"""
token = {'Token': api_info.key }
url = "https://www.ncdc.noaa.gov/cdo-web/api/v2/data?\
datasetid=GHCND&stationid=GHCND:USC00205567\
&startdate={start}&enddate={xend}\
&limit=1000"
dict_range={
'start': "{:%Y-%m-%d}".format( date_start)
,'xend' : "{:%Y-%m-%d}".format( date_xend)
}
file_cache = 'daily_json_{start}_{xend}.json'.format( **dict_range)
try:
cache = open( file_cache)
logging.info('Opening local NCEI cache: ({})'.format(file_cache))
list_json_response = json.load( cache)
except FileNotFoundError:
url_req = url.format( **dict_range)
msg = 'Local NCEI cache ({}) not found, downloading: ({})'
logging.info(msg.format(file_cache,url_req))
# default requests behavior for connect timeout (infinte wait?) was no
# good on a poorly configured IPv6 network (many, dead routes)
max_s = (5,45) #docs.python-requests.org/en/latest/user/advanced/#timeouts
list_json_response = requests.get( url_req, headers=token, timeout=max_s).json().get('results')
json.dump( list_json_response, open( file_cache, 'w'))
return list_json_response
def _get_daily_climate_dicts( list_daily_climate):
"""
returns collection of dicts, each representing one day of daily Global
Historical Climatolody Network data.
>>> l = [{'date':'2013-01-01T00:00:00','datatype':'TMAX','value':25}\
,{'date':'2013-01-01T00:00:00','datatype':'SNWD','value':175}\
,{'date':'2013-01-01T00:00:00','datatype':'PRCP','value':90}]
>>> out = _get_daily_climate_dicts( l)
>>> from pprint import pprint
>>> pprint( out)
[{'DATE': datetime.datetime(2013, 1, 1, 0, 0),
'PRCP_MM': 9.0,
'SNWD_MM': 175,
'TMAX_C': 2.5}]
"""
list_one_row_per_day = []
df_by_date = pd.DataFrame(list_daily_climate).groupby('date')
for str_group in df_by_date.groups.keys():
# build dict - add date
dict_day = {'DATE': parse(str_group)}
# extract TMAX
df_day = df_by_date.get_group( str_group)
if 'TMAX' in df_day.datatype.values:
tmax_tenth_degC = df_day[ df_day.datatype == 'TMAX'].value
dict_day['TMAX_C'] = int(tmax_tenth_degC) / 10
# extract TMIN
if 'TMIN' in df_day.datatype.values:
tmin_tenth_degC = df_day[ df_day.datatype == 'TMIN'].value
dict_day['TMIN_C'] = int(tmin_tenth_degC) / 10
# extract snow depth in mm
dict_day['SNWD_MM'] = 0
if 'SNWD' in df_day.datatype.values:
dict_day['SNWD_MM'] = int(df_day[ df_day.datatype == 'SNWD'].value)
# extract precipitation in mm
dict_day['PRCP_MM'] = 0
if 'PRCP' in df_day.datatype.values:
tenth_mm = int(df_day[ df_day.datatype == 'PRCP'].value)
dict_day['PRCP_MM'] = tenth_mm / 10
# add dict to list
list_one_row_per_day.append( dict_day)
return list_one_row_per_day
|
gpl-3.0
| 8,346,215,010,290,300,000
| 40.594059
| 101
| 0.620567
| false
| 3.158647
| false
| false
| false
|
ContinuumIO/dask
|
dask/dataframe/optimize.py
|
2
|
4215
|
""" Dataframe optimizations """
import operator
from dask.base import tokenize
from ..optimization import cull, fuse
from .. import config, core
from ..highlevelgraph import HighLevelGraph
from ..utils import ensure_dict
from ..blockwise import optimize_blockwise, fuse_roots, Blockwise
def optimize(dsk, keys, **kwargs):
if isinstance(dsk, HighLevelGraph):
# Think about an API for this.
flat_keys = list(core.flatten(keys))
dsk = optimize_read_parquet_getitem(dsk, keys=flat_keys)
dsk = optimize_blockwise(dsk, keys=flat_keys)
dsk = fuse_roots(dsk, keys=flat_keys)
dsk = ensure_dict(dsk)
if isinstance(keys, list):
dsk, dependencies = cull(dsk, list(core.flatten(keys)))
else:
dsk, dependencies = cull(dsk, [keys])
fuse_subgraphs = config.get("optimization.fuse.subgraphs")
if fuse_subgraphs is None:
fuse_subgraphs = True
dsk, dependencies = fuse(
dsk, keys, dependencies=dependencies, fuse_subgraphs=fuse_subgraphs,
)
dsk, _ = cull(dsk, keys)
return dsk
def optimize_read_parquet_getitem(dsk, keys):
# find the keys to optimize
from .io.parquet.core import ParquetSubgraph
read_parquets = [k for k, v in dsk.layers.items() if isinstance(v, ParquetSubgraph)]
layers = dsk.layers.copy()
dependencies = dsk.dependencies.copy()
for k in read_parquets:
columns = set()
update_blocks = {}
for dep in dsk.dependents[k]:
block = dsk.layers[dep]
# Check if we're a read_parquet followed by a getitem
if not isinstance(block, Blockwise):
# getitem are Blockwise...
return dsk
if len(block.dsk) != 1:
# ... with a single item...
return dsk
if list(block.dsk.values())[0][0] != operator.getitem:
# ... where this value is __getitem__...
return dsk
if any(block.output == x[0] for x in keys if isinstance(x, tuple)):
# if any(block.output == x[0] for x in keys if isinstance(x, tuple)):
# ... but bail on the optimization if the getitem is what's requested
# These keys are structured like [('getitem-<token>', 0), ...]
# so we check for the first item of the tuple.
# See https://github.com/dask/dask/issues/5893
return dsk
block_columns = block.indices[1][0]
if isinstance(block_columns, str):
block_columns = [block_columns]
columns |= set(block_columns)
update_blocks[dep] = block
old = layers[k]
if columns and columns < set(old.meta.columns):
columns = list(columns)
meta = old.meta[columns]
name = "read-parquet-" + tokenize(old.name, columns)
assert len(update_blocks)
for block_key, block in update_blocks.items():
# (('read-parquet-old', (.,)), ( ... )) ->
# (('read-parquet-new', (.,)), ( ... ))
new_indices = ((name, block.indices[0][1]), block.indices[1])
numblocks = {name: block.numblocks[old.name]}
new_block = Blockwise(
block.output,
block.output_indices,
block.dsk,
new_indices,
numblocks,
block.concatenate,
block.new_axes,
)
layers[block_key] = new_block
dependencies[block_key] = {name}
dependencies[name] = dependencies.pop(k)
else:
# Things like df[df.A == 'a'], where the argument to
# getitem is not a column name
name = old.name
meta = old.meta
columns = list(meta.columns)
new = ParquetSubgraph(
name, old.engine, old.fs, meta, columns, old.index, old.parts, old.kwargs
)
layers[name] = new
if name != old.name:
del layers[old.name]
new_hlg = HighLevelGraph(layers, dependencies)
return new_hlg
|
bsd-3-clause
| 241,666,005,083,893,500
| 33.268293
| 88
| 0.551601
| false
| 3.913649
| false
| false
| false
|
HonzaKral/warehouse
|
warehouse/packaging/models.py
|
1
|
12439
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import enum
from citext import CIText
from pyramid.security import Allow
from pyramid.threadlocal import get_current_request
from sqlalchemy import (
CheckConstraint, Column, Enum, ForeignKey, ForeignKeyConstraint, Index,
Boolean, DateTime, Integer, Table, Text,
)
from sqlalchemy import func, orm, sql
from sqlalchemy.orm.exc import NoResultFound
from sqlalchemy.ext.associationproxy import association_proxy
from sqlalchemy.ext.declarative import declared_attr
from sqlalchemy.ext.hybrid import hybrid_property
from warehouse import db
from warehouse.accounts.models import User
from warehouse.classifiers.models import Classifier
from warehouse.sitemap.models import SitemapMixin
from warehouse.utils.attrs import make_repr
class Role(db.Model):
__tablename__ = "roles"
__table_args__ = (
Index("roles_pack_name_idx", "package_name"),
Index("roles_user_name_idx", "user_name"),
)
__repr__ = make_repr("role_name", "user_name", "package_name")
role_name = Column(Text)
user_name = Column(
CIText,
ForeignKey("accounts_user.username", onupdate="CASCADE"),
)
package_name = Column(
Text,
ForeignKey("packages.name", onupdate="CASCADE"),
)
user = orm.relationship(User, lazy=False)
project = orm.relationship("Project", lazy=False)
class ProjectFactory:
def __init__(self, request):
self.request = request
def __getitem__(self, project):
try:
return self.request.db.query(Project).filter(
Project.normalized_name == func.normalize_pep426_name(project)
).one()
except NoResultFound:
raise KeyError from None
class Project(SitemapMixin, db.ModelBase):
__tablename__ = "packages"
__table_args__ = (
CheckConstraint(
"name ~* '^([A-Z0-9]|[A-Z0-9][A-Z0-9._-]*[A-Z0-9])$'::text",
name="packages_valid_name",
),
)
__repr__ = make_repr("name")
name = Column(Text, primary_key=True, nullable=False)
normalized_name = orm.column_property(func.normalize_pep426_name(name))
stable_version = Column(Text)
autohide = Column(Boolean, server_default=sql.true())
comments = Column(Boolean, server_default=sql.true())
bugtrack_url = Column(Text)
hosting_mode = Column(Text, nullable=False, server_default="pypi-only")
created = Column(
DateTime(timezone=False),
nullable=False,
server_default=sql.func.now(),
)
has_docs = Column(Boolean)
upload_limit = Column(Integer, nullable=True)
releases = orm.relationship(
"Release",
backref="project",
cascade="all, delete-orphan",
lazy="dynamic",
)
def __getitem__(self, version):
try:
return self.releases.filter(Release.version == version).one()
except NoResultFound:
raise KeyError from None
def __acl__(self):
session = orm.object_session(self)
acls = []
# Get all of the users for this project.
query = session.query(Role).filter(Role.project == self)
query = query.options(orm.lazyload("project"))
query = query.options(orm.joinedload("user").lazyload("emails"))
for role in sorted(
query.all(),
key=lambda x: ["Owner", "Maintainer"].index(x.role_name)):
acls.append((Allow, role.user.id, ["upload"]))
return acls
@property
def documentation_url(self):
# TODO: Move this into the database and elimnate the use of the
# threadlocal here.
request = get_current_request()
# If the project doesn't have docs, then we'll just return a None here.
if not self.has_docs:
return
return request.route_url("legacy.docs", project=self.name)
class DependencyKind(enum.IntEnum):
requires = 1
provides = 2
obsoletes = 3
requires_dist = 4
provides_dist = 5
obsoletes_dist = 6
requires_external = 7
# TODO: Move project URLs into their own table, since they are not actually
# a "dependency".
project_url = 8
class Dependency(db.Model):
__tablename__ = "release_dependencies"
__table_args__ = (
Index("rel_dep_name_idx", "name"),
Index("rel_dep_name_version_idx", "name", "version"),
Index("rel_dep_name_version_kind_idx", "name", "version", "kind"),
ForeignKeyConstraint(
["name", "version"],
["releases.name", "releases.version"],
onupdate="CASCADE",
),
)
__repr__ = make_repr("name", "version", "kind", "specifier")
name = Column(Text)
version = Column(Text)
kind = Column(Integer)
specifier = Column(Text)
def _dependency_relation(kind):
return orm.relationship(
"Dependency",
primaryjoin=lambda: sql.and_(
Release.name == Dependency.name,
Release.version == Dependency.version,
Dependency.kind == kind.value,
),
viewonly=True,
)
class Release(db.ModelBase):
__tablename__ = "releases"
@declared_attr
def __table_args__(cls): # noqa
return (
Index("release_created_idx", cls.created.desc()),
Index("release_name_created_idx", cls.name, cls.created.desc()),
Index("release_name_idx", cls.name),
Index("release_pypi_hidden_idx", cls._pypi_hidden),
Index("release_version_idx", cls.version),
)
__repr__ = make_repr("name", "version")
name = Column(
Text,
ForeignKey("packages.name", onupdate="CASCADE"),
primary_key=True,
)
version = Column(Text, primary_key=True)
author = Column(Text)
author_email = Column(Text)
maintainer = Column(Text)
maintainer_email = Column(Text)
home_page = Column(Text)
license = Column(Text)
summary = Column(Text)
description = Column(Text)
keywords = Column(Text)
platform = Column(Text)
download_url = Column(Text)
_pypi_ordering = Column(Integer)
_pypi_hidden = Column(Boolean)
cheesecake_installability_id = Column(
Integer,
ForeignKey("cheesecake_main_indices.id"),
)
cheesecake_documentation_id = Column(
Integer,
ForeignKey("cheesecake_main_indices.id"),
)
cheesecake_code_kwalitee_id = Column(
Integer,
ForeignKey("cheesecake_main_indices.id"),
)
requires_python = Column(Text)
description_from_readme = Column(Boolean)
created = Column(
DateTime(timezone=False),
nullable=False,
server_default=sql.func.now(),
)
_classifiers = orm.relationship(
Classifier,
backref="project_releases",
secondary=lambda: release_classifiers,
order_by=Classifier.classifier,
)
classifiers = association_proxy("_classifiers", "classifier")
files = orm.relationship(
"File",
backref="release",
cascade="all, delete-orphan",
lazy="dynamic",
order_by=lambda: File.filename,
)
dependencies = orm.relationship("Dependency")
_requires = _dependency_relation(DependencyKind.requires)
requires = association_proxy("_requires", "specifier")
_provides = _dependency_relation(DependencyKind.provides)
provides = association_proxy("_provides", "specifier")
_obsoletes = _dependency_relation(DependencyKind.obsoletes)
obsoletes = association_proxy("_obsoletes", "specifier")
_requires_dist = _dependency_relation(DependencyKind.requires_dist)
requires_dist = association_proxy("_requires_dist", "specifier")
_provides_dist = _dependency_relation(DependencyKind.provides_dist)
provides_dist = association_proxy("_provides_dist", "specifier")
_obsoletes_dist = _dependency_relation(DependencyKind.obsoletes_dist)
obsoletes_dist = association_proxy("_obsoletes_dist", "specifier")
_requires_external = _dependency_relation(DependencyKind.requires_external)
requires_external = association_proxy("_requires_external", "specifier")
_project_urls = _dependency_relation(DependencyKind.project_url)
project_urls = association_proxy("_project_urls", "specifier")
class File(db.Model):
__tablename__ = "release_files"
__table_args__ = (
ForeignKeyConstraint(
["name", "version"],
["releases.name", "releases.version"],
onupdate="CASCADE",
),
Index("release_files_name_idx", "name"),
Index("release_files_name_version_idx", "name", "version"),
Index("release_files_packagetype_idx", "packagetype"),
Index("release_files_version_idx", "version"),
)
name = Column(Text)
version = Column(Text)
python_version = Column(Text)
packagetype = Column(
Enum(
"bdist_dmg", "bdist_dumb", "bdist_egg", "bdist_msi", "bdist_rpm",
"bdist_wheel", "bdist_wininst", "sdist",
),
)
comment_text = Column(Text)
filename = Column(Text, unique=True)
size = Column(Integer)
has_signature = Column(Boolean)
md5_digest = Column(Text, unique=True)
downloads = Column(Integer, server_default=sql.text("0"))
upload_time = Column(DateTime(timezone=False), server_default=func.now())
@hybrid_property
def path(self):
return "/".join([
self.python_version,
self.release.project.name[0],
self.release.project.name,
self.filename,
])
@path.expression
def path(self):
return func.concat_ws(
sql.text("'/'"),
self.python_version,
func.substring(self.name, sql.text("1"), sql.text("1")),
self.name,
self.filename,
)
@hybrid_property
def pgp_path(self):
return self.path + ".asc"
@pgp_path.expression
def pgp_path(self):
return func.concat(self.path, ".asc")
class Filename(db.ModelBase):
__tablename__ = "file_registry"
id = Column(Integer, primary_key=True, nullable=False)
filename = Column(Text, unique=True, nullable=False)
release_classifiers = Table(
"release_classifiers",
db.metadata,
Column("name", Text()),
Column("version", Text()),
Column("trove_id", Integer(), ForeignKey("trove_classifiers.id")),
ForeignKeyConstraint(
["name", "version"],
["releases.name", "releases.version"],
onupdate="CASCADE",
),
Index("rel_class_name_idx", "name"),
Index("rel_class_name_version_idx", "name", "version"),
Index("rel_class_trove_id_idx", "trove_id"),
Index("rel_class_version_id_idx", "version"),
)
class JournalEntry(db.ModelBase):
__tablename__ = "journals"
@declared_attr
def __table_args__(cls): # noqa
return (
Index(
"journals_changelog",
"submitted_date", "name", "version", "action",
),
Index("journals_id_idx", "id"),
Index("journals_name_idx", "name"),
Index("journals_version_idx", "version"),
Index(
"journals_latest_releases",
"submitted_date", "name", "version",
postgresql_where=(
(cls.version != None) & (cls.action == "new release") # noqa
),
),
)
id = Column(Integer, primary_key=True, nullable=False)
name = Column(Text)
version = Column(Text)
action = Column(Text)
submitted_date = Column(DateTime(timezone=False))
_submitted_by = Column(
"submitted_by",
CIText,
ForeignKey(
"accounts_user.username",
onupdate="CASCADE",
),
)
submitted_by = orm.relationship(User)
submitted_from = Column(Text)
|
apache-2.0
| 3,921,414,559,749,365,000
| 29.045894
| 81
| 0.614438
| false
| 3.941381
| false
| false
| false
|
Wintermute0110/plugin.program.advanced.MAME.launcher
|
dev-graphics/test_generate_fanart.py
|
1
|
5637
|
#!/usr/bin/python
#
#
from PIL import Image
from PIL import ImageFont
from PIL import ImageDraw
#
# Scales and centers img into a box of size (box_x_size, box_y_size).
# Scaling keeps original img aspect ratio.
# Returns an image of size (box_x_size, box_y_size)
#
def PIL_resize_proportional(img, layout, dic_key, CANVAS_COLOR = (0, 0, 0)):
box_x_size = layout[dic_key]['width']
box_y_size = layout[dic_key]['height']
# log_debug('PIL_resize_proportional() Initialising ...')
# log_debug('img X_size = {} | Y_size = {}'.format(img.size[0], img.size[1]))
# log_debug('box X_size = {} | Y_size = {}'.format(box_x_size, box_y_size))
# --- First try to fit X dimension ---
# log_debug('PIL_resize_proportional() Fitting X dimension')
wpercent = (box_x_size / float(img.size[0]))
hsize = int((float(img.size[1]) * float(wpercent)))
r_x_size = box_x_size
r_y_size = hsize
x_offset = 0
y_offset = (box_y_size - r_y_size) / 2
# log_debug('resize X_size = {} | Y_size = {}'.format(r_x_size, r_y_size))
# log_debug('resize x_offset = {} | y_offset = {}'.format(x_offset, y_offset))
# --- Second try to fit Y dimension ---
if y_offset < 0:
# log_debug('Fitting Y dimension')
hpercent = (box_y_size / float(img.size[1]))
wsize = int((float(img.size[0]) * float(hpercent)))
r_x_size = wsize
r_y_size = box_y_size
x_offset = (box_x_size - r_x_size) / 2
y_offset = 0
# log_debug('resize X_size = {} | Y_size = {}'.format(r_x_size, r_y_size))
# log_debug('resize x_offset = {} | y_offset = {}'.format(x_offset, y_offset))
# >> Create a new image and paste original image centered.
canvas_img = Image.new('RGB', (box_x_size, box_y_size), CANVAS_COLOR)
# >> Resize and paste
img = img.resize((r_x_size, r_y_size), Image.ANTIALIAS)
canvas_img.paste(img, (x_offset, y_offset, x_offset + r_x_size, y_offset + r_y_size))
return canvas_img
def PIL_paste_image(img, img_title, layout, dic_key):
box = (
layout[dic_key]['x_pos'],
layout[dic_key]['y_pos'],
layout[dic_key]['x_pos'] + layout[dic_key]['x_size'],
layout[dic_key]['y_pos'] + layout[dic_key]['y_size']
)
img.paste(img_title, box)
return img
# --- Fanart layout ---
layout = {
'title' : {'x_size' : 450, 'y_size' : 450, 'x_pos' : 50, 'y_pos' : 50},
'snap' : {'x_size' : 450, 'y_size' : 450, 'x_pos' : 50, 'y_pos' : 550},
'flyer' : {'x_size' : 450, 'y_size' : 450, 'x_pos' : 1420, 'y_pos' : 50},
'cabinet' : {'x_size' : 300, 'y_size' : 425, 'x_pos' : 1050, 'y_pos' : 625},
'artpreview' : {'x_size' : 450, 'y_size' : 550, 'x_pos' : 550, 'y_pos' : 500},
'PCB' : {'x_size' : 300, 'y_size' : 300, 'x_pos' : 1500, 'y_pos' : 525},
'clearlogo' : {'x_size' : 450, 'y_size' : 200, 'x_pos' : 1400, 'y_pos' : 850},
'cpanel' : {'x_size' : 300, 'y_size' : 100, 'x_pos' : 1050, 'y_pos' : 500},
'marquee' : {'x_size' : 800, 'y_size' : 275, 'x_pos' : 550, 'y_pos' : 200},
'text' : { 'x_pos' : 550, 'y_pos' : 50, 'size' : 72},
}
# --- Create fanart canvas ---
img = Image.new('RGB', (1920, 1080), (0, 0, 0))
draw = ImageDraw.Draw(img)
font_mono = ImageFont.truetype('../fonts/Inconsolata.otf', layout['text']['size'])
# --- Title and Snap (colour rectangle for placement) ---
# img_title = Image.new('RGB', (TITLE_X_SIZE, TITLE_Y_SIZE), (25, 25, 25))
# img_snap = Image.new('RGB', (SNAP_X_SIZE, SNAP_Y_SIZE), (0, 200, 0))
# print('Title X_size = {} | img Y_size = {}'.format(img_title.size[0], img_title.size[1]))
# print(img_title.format, img_title.size, img_title.mode)
# --- Title and Snap (open PNG actual screenshot) ---
img_title = Image.open('dino_title.png')
img_snap = Image.open('dino_snap.png')
img_artpreview = Image.open('dino_artpreview.png')
img_cabinet = Image.open('dino_cabinet.png')
img_clearlogo = Image.open('dino_clearlogo.png')
img_cpanel = Image.open('dino_cpanel.png')
img_flyer = Image.open('dino_flyer.png')
img_marquee = Image.open('dino_marquee.png')
img_PCB = Image.open('dino_PCB.png')
# --- Resize keeping aspect ratio ---
img_title = PIL_resize_proportional(img_title, layout, 'title')
img_snap = PIL_resize_proportional(img_snap, layout, 'snap')
img_artpreview = PIL_resize_proportional(img_artpreview, layout, 'artpreview')
img_cabinet = PIL_resize_proportional(img_cabinet, layout, 'cabinet')
img_clearlogo = PIL_resize_proportional(img_clearlogo, layout, 'clearlogo')
img_cpanel = PIL_resize_proportional(img_cpanel, layout, 'cpanel')
img_flyer = PIL_resize_proportional(img_flyer, layout, 'flyer')
img_marquee = PIL_resize_proportional(img_marquee, layout, 'marquee')
img_PCB = PIL_resize_proportional(img_PCB, layout, 'PCB')
# --- Compsite fanart ---
# NOTE The box dimensions must have the same size as the pasted image.
img = PIL_paste_image(img, img_title, layout, 'title')
img = PIL_paste_image(img, img_snap, layout, 'snap')
img = PIL_paste_image(img, img_artpreview, layout, 'artpreview')
img = PIL_paste_image(img, img_cabinet, layout, 'cabinet')
img = PIL_paste_image(img, img_clearlogo, layout, 'clearlogo')
img = PIL_paste_image(img, img_cpanel, layout, 'cpanel')
img = PIL_paste_image(img, img_flyer, layout, 'flyer')
img = PIL_paste_image(img, img_marquee, layout, 'marquee')
img = PIL_paste_image(img, img_PCB, layout, 'PCB')
# --- Print machine name ---
draw.text((layout['text']['x_pos'], layout['text']['y_pos']),
'dino', (255, 255, 255), font = font_mono)
# --- Save test fanart ---
img.save('fanart.png')
|
gpl-2.0
| -9,040,719,822,525,605,000
| 43.738095
| 95
| 0.604932
| false
| 2.658962
| false
| false
| false
|
abreen/socrates.py
|
logisim/subcircuit.py
|
1
|
8875
|
from logisim.util import num_rotations
from logisim.errors import NoValueGivenError
from logisim.debug import narrate, suppress_narration
from logisim.location import Location
from logisim.component import Component
from logisim.pins import InputPin, OutputPin
class Subcircuit(Component):
def __init__(self, circuit, defaults=None):
# Logisim global defaults
self.facing = 'east'
Component.__init__(self, defaults)
# reference to Circuit object
self.circuit = circuit
self.label = circuit.name
# TODO custom subcircuit appearance
self.appearance = None
def get_output_locations(self):
"""Use the underlying Circuit object's appearance data
(or the default logic) to produce a list of output pin locations.
"""
if not self.appearance:
locs = _default_subcircuit_locations(self)
return [loc for loc, pin in locs.items() if type(pin) is OutputPin]
else:
raise NotImplementedError
def get_input_locations(self):
"""Use the underlying Circuit object's appearance data
(or the default logic) to produce a list of input pin locations.
"""
if not self.appearance:
locs = _default_subcircuit_locations(self)
return [loc for loc, pin in locs.items() if type(pin) is InputPin]
else:
raise NotImplementedError
def get_pin_at(self, loc):
"""Given the location of a pin on this subcircuit, return
the pin at that location. This method produces the location of the
pin on this subcircuit's representation, not the location of the pin
on the underlying circuit's coordinate plane.
"""
if not self.appearance:
locs = _default_subcircuit_locations(self)
else:
raise NotImplementedError
for pin_loc, pin in locs.items():
if pin_loc == loc:
return pin
return None
def eval(self, at_loc):
if not self.appearance:
pins = _default_subcircuit_locations(self)
input_vals = {}
for in_pin_loc, tup in self.input_from.items():
component, out_pin_loc = tup
in_pin = pins[in_pin_loc]
try:
input_vals[in_pin] = component.eval(at_loc=out_pin_loc)
except NoValueGivenError:
# this subcircuit might still work, if this input pin is
# never used in the underlying circuit, so we don't
# do anything now
continue
output_vals = self.circuit.eval(input_vals)
return output_vals[pins[at_loc]]
else:
raise NotImplementedError
def _default_subcircuit_locations(subcircuit):
circuit = subcircuit.circuit
# for a subcircuit's default appearance, Logisim places each pin on
# an edge of the subcircuit rectangle by which direction they face in
# the actual circuit
pins_facing = {'north': [], 'east': [], 'south': [], 'west': []}
for pin in circuit.input_pins:
pins_facing[pin.facing].append(pin)
for pin in circuit.output_pins:
pins_facing[pin.facing].append(pin)
# sort the pins the way Logisim would sort them (for each facing
# direction, left to right or top to bottom)
for facing in pins_facing:
if facing in ['east', 'west']:
pins_facing[facing].sort(key=lambda pin: pin.loc.y)
else:
pins_facing[facing].sort(key=lambda pin: pin.loc.x)
# we construct a 2D list representing the subcircuit's appearance
top = pins_facing['south']
bottom = pins_facing['north']
left = pins_facing['east']
right = pins_facing['west']
# n rows, m columns
n = max(len(left), len(right))
m = max(len(top), len(bottom))
corner_spacing = (top or bottom) and (left or right)
if corner_spacing:
m += 2
n += 2
top = [None] + top + [None] if top else top
bottom = [None] + bottom + [None] if bottom else bottom
left = [None] + left + [None] if left else left
right = [None] + right + [None] if right else right
n = max(n, 4)
m = max(m, 4)
pin_layout = _make2d(n, m)
if top:
_overwrite_row(pin_layout, 0, top)
if bottom:
_overwrite_row(pin_layout, n - 1, bottom)
if left:
_overwrite_col(pin_layout, 0, left)
if right:
_overwrite_col(pin_layout, m - 1, right)
# we have the subcircuit's location, which is the location of what
# Logisim calls its "anchor"; by default, the anchor is placed over
# the first pin facing west (then south, east, and north, if there
# is no such pin)
# we will find the position of the anchor pin (the position being its
# row and column index into the 'pin_layout' 2-D list)
if len(pins_facing['west']) > 0:
# pins on the right
anchor_pos = (1 if corner_spacing else 0, m - 1)
elif len(pins_facing['south']) > 0:
# pins on the top
anchor_pos = (0, 1 if corner_spacing else 0)
elif len(pins_facing['east']) > 0:
# pins on the left
anchor_pos = (1 if corner_spacing else 0, 0)
elif len(pins_facing['north']) > 0:
# pins on the bottom
anchor_pos = (n - 1, 1 if corner_spacing else 0)
else:
# TODO subcircuit has no pins?
pass
# if this subcircuit is not facing east (the default), rotate the
# 2-D list and change the anchor position accordingly
rotations = num_rotations('east', subcircuit.facing)
if rotations != 0:
pin_layout, anchor_pos = _rotate(pin_layout, anchor_pos, rotations)
# redefine: n rows, m columns, if this rotate changed them
n, m = len(pin_layout), len(pin_layout[0])
x, y = subcircuit.loc.x, subcircuit.loc.y
# finds location of each pin given the subcircuit's anchor
# position by finding each position's difference in position
# in the list, and using that to find its absolute position
def pin_location(val, row, col):
y_offset = row - anchor_pos[0]
x_offset = col - anchor_pos[1]
return Location(x + (x_offset * 10), y + (y_offset * 10))
pin_locs = _map2d(pin_location, pin_layout)
return {pin_locs[r][c]: pin_layout[r][c]
for r in range(n) for c in range(m)
if type(pin_layout[r][c]) is not None}
def _map2d(f, list2d):
new_list2d = []
for r in range(len(list2d)):
new_row = []
for c in range(len(list2d[r])):
new_row.append(f(list2d[r][c], r, c))
new_list2d.append(new_row)
return new_list2d
def _make2d(rows, cols):
return [[None for _ in range(cols)] for _ in range(rows)]
def _overwrite_row(list_, index, row):
"""Given a reference to a 2-D list and a row index, replace the
row with the values in the new row. If the new row has fewer columns
than the existing one, the new row is centered and Nones are added
as padding.
"""
cols = len(list_[index])
if cols < len(row):
raise ValueError("row is too big ({}, expected {})".format(len(row),
cols))
elif cols == len(row):
new_row = row
else:
left = [None] * ((cols - len(row)) // 2)
right = [None] * (cols - len(row) - len(left))
new_row = left + row + right
for c in range(cols):
list_[index][c] = new_row[c]
def _overwrite_col(list_, index, col):
"""See overwrite_row(). This function does the same thing, but
column-wise.
"""
rows = len(list_)
if rows < len(col):
raise ValueError("column is too big ({}, expected {})".format(len(col),
rows))
elif rows == len(col):
new_col = col
else:
above = [None] * ((rows - len(col)) // 2)
below = [None] * (rows - len(col) - len(above))
new_col = above + col + below
for r in range(rows):
list_[r][index] = new_col[r]
def _rotate(pin_layout, anchor_pos, times):
for n in range(times):
anchor_pos = _rotate90_pos(anchor_pos, len(pin_layout))
pin_layout = _rotate90_2d(pin_layout)
return pin_layout, anchor_pos
def _rotate90_pos(anchor_pos, num_rows):
row_index, col_index = anchor_pos
return (col_index, num_rows - row_index - 1)
def _rotate90_2d(list_):
rows, cols = len(list_), len(list_[0])
rotated = [[None for _ in range(rows)] for _ in range(cols)]
for r in range(rows):
for c in range(cols):
new_r, new_c = _rotate90_pos((r, c), rows)
rotated[new_r][new_c] = list_[r][c]
return rotated
|
gpl-2.0
| 3,178,043,507,803,182,600
| 30.810036
| 79
| 0.586479
| false
| 3.727425
| false
| false
| false
|
muchu1983/104_cameo
|
cameo/utility.py
|
1
|
31383
|
# -*- coding: utf-8 -*-
"""
Copyright (C) 2015, MuChu Hsu
Contributed by Muchu Hsu (muchu1983@gmail.com)
This file is part of BSD license
<https://opensource.org/licenses/BSD-3-Clause>
"""
import os
import re
import json
import time
import datetime
import dateparser
import pkg_resources
import shutil
import smtplib
import logging
import random
from email.mime.text import MIMEText
from scrapy import Selector
from geopy.geocoders import GoogleV3
from bennu.filesystemutility import FileSystemUtility
#共用工具程式
class Utility:
#建構子
def __init__(self):
self.fsUtil = FileSystemUtility()
self.strListOfCountryByContinentJsonFilePath = self.fsUtil.getPackageResourcePath(strPackageName="cameo_res", strResourceName="list_of_country_by_continent.json")
self.parseListOfCountryWikiPage()
#email helper setting
self.DEFAULT_SMTP = "smtp.gmail.com:587"
self.DEFAULT_ACCOUNT = "cameoinfotech.tw@gmail.com"
self.DEFAULT_PASSWORD = "cameo70525198"
#寄送 email
def sendEmail(self, strSubject=None, strFrom=None, strTo=None, strMsg=None, lstStrTarget=None, strSmtp=None, strAccount=None, strPassword=None):
if not strSmtp:
strSmtp = self.DEFAULT_SMTP
if not strAccount:
strAccount = self.DEFAULT_ACCOUNT
if not strPassword:
strPassword = self.DEFAULT_PASSWORD
msg = MIMEText(strMsg)
msg["Subject"] = strSubject
msg["From"] = strFrom
msg["To"] = strTo
try:
server = smtplib.SMTP(strSmtp)
server.ehlo()
server.starttls()
server.login(strAccount, strPassword)
server.sendmail(strAccount, lstStrTarget, msg.as_string())
server.quit()
except Exception, e:
logging.error("[eMail Helper] Sending email failed! ErrorMessage: %s"%str(e))
#儲存檔案
def overwriteSaveAs(self, strFilePath=None, unicodeData=None):
with open(strFilePath, "w+") as file:
file.write(unicodeData.encode("utf-8"))
#讀取 json 檔案內容,回傳 dict 物件
def readObjectFromJsonFile(self, strJsonFilePath=None):
dicRet = None
with open(strJsonFilePath, "r") as jsonFile:
dicRet = json.load(jsonFile, encoding="utf-8")
return dicRet
#將 dict 物件的內容寫入到 json 檔案內
def writeObjectToJsonFile(self, dicData=None, strJsonFilePath=None):
with open(strJsonFilePath, "w+") as jsonFile:
jsonFile.write(json.dumps(dicData, ensure_ascii=False, indent=4, sort_keys=True).encode("utf-8"))
#取得子目錄的路徑
def getSubFolderPathList(self, strBasedir=None):
lstStrSubFolderPath = []
for base, dirs, files in os.walk(strBasedir):
if base == strBasedir:
for dir in dirs:
strFolderPath = base + "\\" + dir
lstStrSubFolderPath.append(strFolderPath)
return lstStrSubFolderPath
#取得 strBasedir 目錄中,檔名以 strSuffixes 結尾的檔案路徑
def getFilePathListWithSuffixes(self, strBasedir=None, strSuffixes=None):
lstStrFilePathWithSuffixes = []
for base, dirs, files in os.walk(strBasedir):
if base == strBasedir:#just check base dir
for strFilename in files:
if strFilename.endswith(strSuffixes):#find target files
strFilePath = base + "\\" + strFilename
lstStrFilePathWithSuffixes.append(strFilePath)
return lstStrFilePathWithSuffixes
#深層取得 strBasedir 目錄中,檔名以 strSuffixes 結尾的檔案路徑
def recursiveGetFilePathListWithSuffixes(self, strBasedir=None, strSuffixes=None):
lstStrFilePathWithSuffixes = []
for base, dirs, files in os.walk(strBasedir):
for strFilename in files:
if strFilename.endswith(strSuffixes):#find target files
strFilePath = base + "\\" + strFilename
lstStrFilePathWithSuffixes.append(strFilePath)
return lstStrFilePathWithSuffixes
#轉換 簡化數字字串 成 純數字 (ex:26.3k -> 26300)
def translateNumTextToPureNum(self, strNumText=None):
strNumText = strNumText.lower()
fPureNum = 0.0
strFloatPartText = re.match("^([0-9\.]*)k?m?$", strNumText)
if strFloatPartText != None:
strFloatPartText = strFloatPartText.group(1)
if strNumText.endswith("k"):
fPureNum = float(strFloatPartText) * 1000
elif strNumText.endswith("m"):
fPureNum = float(strFloatPartText) * 1000000
else:
fPureNum = float(strFloatPartText) * 1
return int(fPureNum)
#轉換 剩餘日期表示字串 成 純數字
def translateTimeleftTextToPureNum(self, strTimeleftText=None, strVer=None):
dicVer = {"INDIEGOGO": self.translateTimeleftTextToPureNum_INDIEGOGO,
"WEBACKERS": self.translateTimeleftTextToPureNum_WEBACKERS}
return dicVer[strVer](strTimeleftText=strTimeleftText)
#轉換 剩餘日期表示字串 成 純數字 (ex:100 day left -> 100)
def translateTimeleftTextToPureNum_INDIEGOGO(self, strTimeleftText=None):
intDays = 0
if strTimeleftText == None:
return intDays
strTimeleftText = strTimeleftText.lower().strip()
if "hours left" in strTimeleftText:
strHoursText = re.match("^([0-9]*) hours left$", strTimeleftText)
if strHoursText != None:
strHoursText = strHoursText.group(1)
intDays = (int(strHoursText)+24)/24 #不足24h以1天計
elif "days left" in strTimeleftText:
strDaysText = re.match("^([0-9]*) days left$", strTimeleftText)
if strDaysText != None:
strDaysText = strDaysText.group(1)
intDays = int(strDaysText)
else:
intDays = 0
return intDays
#剩餘日期轉換為日數 (ex.2個月13天後結束 -> 73天)
def translateTimeleftTextToPureNum_WEBACKERS(self, strTimeleftText=None):
intDays = 0
if strTimeleftText is not None:
if strTimeleftText in (u"已完成", u"已結束"):
return 0
strMonth = re.match(u"^([0-9]*)個月[0-9]*天後結束$", strTimeleftText)
strDay = re.match(u"^[0-9]*?個?月?([0-9]*)天後結束$", strTimeleftText)
if strMonth is not None:
strMonth = strMonth.group(1)
intDays = intDays + (int(strMonth)*30)
if strDay is not None:
strDay = strDay.group(1)
intDays = intDays + int(strDay)
return intDays
#取得檔案的建立日期
def getCtimeOfFile(self, strFilePath=None):
fCTimeStamp = os.path.getctime(strFilePath)
dtCTime = datetime.datetime.fromtimestamp(fCTimeStamp)
strCTime = dtCTime.strftime("%Y-%m-%d")
return strCTime
#使用 geopy 整理原始地區資訊
def geopyGeocode(self, strOriginLocation=""):
lstStrApiKey = [
u"AIzaSyB71s7yWXJajGDgfZXHGBXYnOww6eLx9vU",
u"AIzaSyDFYBYcwMkicRxE1hVUIHVNk5K2UFvV9Yk",
u"AIzaSyCCU72G1ID4zIfWN8I8zeoRtkLWFSG_jC8",
u"AIzaSyDc71hTtE2XTTiVnad-Jz3rXe338VcqWBY"
]
geolocator = GoogleV3(api_key=random.choice(lstStrApiKey))
time.sleep(1) #避免太快送出 geopy 查詢
location = None
try:
location = geolocator.geocode(strOriginLocation, exactly_one=True)
except:
logging.error("[geopy error] find geocode faild. origin string: %s"%strOriginLocation)
(strAddress, fLatitude, fLongitude) = (None, 0, 0)
if location is not None:
strAddress = location.address
fLatitude = location.latitude
fLongitude = location.longitude
return (strAddress, fLatitude, fLongitude)
#解析 list_of_country_by_continent_on_wikipedia.html
def parseListOfCountryWikiPage(self):
strLOCBCWikiPageFilePath = self.fsUtil.getPackageResourcePath(strPackageName="cameo_res", strResourceName="list_of_country_by_continent_on_wikipedia.html")
strParsedResultJsonFilePath = self.fsUtil.getPackageResourcePath(strPackageName="cameo_res", strResourceName="list_of_country_by_continent.json")
dicCountryNameCodeMapping = {}
strISO3166WikiPageFilePath = self.fsUtil.getPackageResourcePath(strPackageName="cameo_res", strResourceName="iso_3166_1_on_wikipedia.html")
with open(strISO3166WikiPageFilePath, "r") as pageISO3166File: #parse iso_3166_1_on_wikipedia.html
strPageSource = pageISO3166File.read()
root = Selector(text=strPageSource)
elesCountryTr = root.css("table.wikitable:nth-of-type(1) tbody tr")
for eleCountryTr in elesCountryTr:
strCountryNameText = eleCountryTr.css("td:nth-of-type(1) a::text").extract_first().lower()
strCountryCodeText = eleCountryTr.css("td:nth-of-type(2) a span::text").extract_first()
dicCountryNameCodeMapping[strCountryNameText] = strCountryCodeText
with open(strLOCBCWikiPageFilePath, "r") as pageLOCBCFile: #parse list_of_country_by_continent_on_wikipedia.html
strPageSource = pageLOCBCFile.read()
root = Selector(text=strPageSource)
elesContinentTable = root.css("table.wikitable")
dicParsedResult = {}
dicContinentName = {0:"AF", 1:"AS", 2:"EU", 3:"NA",
4:"SA", 5:"OC", 6:"AN"}
for intCurrentTableIndex, eleContinentTable in enumerate(elesContinentTable):
lstDicCountryData = []
lstStrCountryName = eleContinentTable.css("tr td:nth-of-type(2) i > a::text, tr td:nth-of-type(2) b > a::text").extract()
for strCountryName in lstStrCountryName:
dicCountryData = {}
#country name
dicCountryData["name"] = strCountryName.lower()
#country iso-3316-1 code
dicCountryData["code"] = None
for strCountryNameKey in dicCountryNameCodeMapping:
if re.search(dicCountryData["name"], strCountryNameKey):
dicCountryData["code"] = dicCountryNameCodeMapping[strCountryNameKey]
if dicCountryData.get("code", None) is not None:
lstDicCountryData.append(dicCountryData)
dicParsedResult[dicContinentName[intCurrentTableIndex]] = lstDicCountryData
#自訂資料區
dicParsedResult["NA"].append({"name":"united states", "code":"US"})
dicParsedResult["NA"].append({"name":"usa", "code":"US"})
dicParsedResult["EU"].append({"name":"uk", "code":"GB"})
self.writeObjectToJsonFile(dicData=dicParsedResult, strJsonFilePath=strParsedResultJsonFilePath)
#取得國家簡碼 IOS-3166-1
def getCountryCode(self, strCountryName=None):
dicListOfCountryByContinent = self.readObjectFromJsonFile(strJsonFilePath=self.strListOfCountryByContinentJsonFilePath)
strCountryCodeMatched = None
if strCountryName: # is not None
for strContinentName in dicListOfCountryByContinent:
lstDicCountryData = dicListOfCountryByContinent[strContinentName]
for dicCountryData in lstDicCountryData:
if unicode(strCountryName.lower().strip()) == dicCountryData["name"]:
strCountryCodeMatched = dicCountryData["code"]
return strCountryCodeMatched
#使用 wiki 頁面 查找 洲別 資料 (list_of_country_by_continent.json)
def getContinentByCountryNameWikiVersion(self, strCountryName=None):
dicListOfCountryByContinent = self.readObjectFromJsonFile(strJsonFilePath=self.strListOfCountryByContinentJsonFilePath)
strContinentNameMatched = None
if strCountryName:# is not None
for strContinentName in dicListOfCountryByContinent:
lstDicCountryData = dicListOfCountryByContinent[strContinentName]
for dicCountryData in lstDicCountryData:
if unicode(strCountryName.lower().strip()) == dicCountryData["name"]:
strContinentNameMatched = strContinentName
return strContinentNameMatched
#以 dateparser 模組轉換日期
def parseStrDateByDateparser(self, strOriginDate=None, strBaseDate=datetime.datetime.now().strftime("%Y-%m-%d")):
strParsedDateBaseOnGivenBaseDate = None
dtBaseDate = datetime.datetime.strptime(strBaseDate, "%Y-%m-%d")
dToday = datetime.date.today()
dtToday = datetime.datetime.combine(dToday, datetime.datetime.min.time())
timedeltaNowToBase = dtToday - dtBaseDate
if strOriginDate: #is not None
dtParsedDateBaseOnNow = dateparser.parse(strOriginDate)
if dtParsedDateBaseOnNow:#is not None
strParsedDateBaseOnGivenBaseDate = (dtParsedDateBaseOnNow - timedeltaNowToBase).strftime("%Y-%m-%d")
return strParsedDateBaseOnGivenBaseDate
#如果沒有重覆,附加一行文字至 txt 檔案的最後面
def appendLineToTxtIfNotExists(self, strTxtFilePath=None, strLine=None):
lstStrLineInTxt = []
strLine = strLine.strip() + u"\n"
if os.path.exists(strTxtFilePath):
with open(strTxtFilePath, "r") as txtFile:
lstStrLineInTxt = txtFile.readlines()
if strLine not in lstStrLineInTxt:#檢查有無重覆
with open(strTxtFilePath, "a") as txtFile:
#append line to .txt
txtFile.write(strLine)
#將字串陣列先一一去除換行符 接著合併之後再 strip
def stripTextArray(self, lstStrText=None):
strTextLine = u""
for strText in lstStrText:
if strText is not None:
strText = re.sub("\s", " ", strText)
strTextLine = strTextLine + u" " + strText.strip()
return strTextLine.strip()
#測試 crunchbase html 檔案重新命名
def crunchbaseOrganizationHtmlFileRename(self, strSourceFolder=None, strTargetFolder=None):
lstStrSourceHtmlFilePath = self.getFilePathListWithSuffixes(strBasedir=strSourceFolder, strSuffixes="crunchbase.html")
lstStrSourceHtmlFilePath = lstStrSourceHtmlFilePath + self.getFilePathListWithSuffixes(strBasedir=strSourceFolder, strSuffixes="crunchbase.htm")
for strSourceHtmlFilePath in lstStrSourceHtmlFilePath:
strCrunchbaseId = re.search("^.*\\\\(.*)crunchbase.html?$", strSourceHtmlFilePath).group(1)
strCrunchbaseId = re.sub("[^a-zA-Z0-9]+", "-", strCrunchbaseId.lower()).strip("-")
strTargetHtmlFilePath = strTargetFolder + u"\\%s_organization.html"%strCrunchbaseId
shutil.copy(strSourceHtmlFilePath, strTargetHtmlFilePath)
#使用 國家對照表 查找 洲別 資料
def getContinentByCountryName(self, strCountryName=None):
countries = [
{"code": "AD", "continent": "Europe", "name": "Andorra"},
{"code": "AF", "continent": "Asia", "name": "Afghanistan"},
{"code": "AG", "continent": "North America", "name": "Antigua and Barbuda"},
{"code": "AL", "continent": "Europe", "name": "Albania"},
{"code": "AM", "continent": "Asia", "name": "Armenia"},
{"code": "AO", "continent": "Africa", "name": "Angola"},
{"code": "AR", "continent": "South America", "name": "Argentina"},
{"code": "AT", "continent": "Europe", "name": "Austria"},
{"code": "AU", "continent": "Oceania", "name": "Australia"},
{"code": "AZ", "continent": "Asia", "name": "Azerbaijan"},
{"code": "BB", "continent": "North America", "name": "Barbados"},
{"code": "BD", "continent": "Asia", "name": "Bangladesh"},
{"code": "BE", "continent": "Europe", "name": "Belgium"},
{"code": "BF", "continent": "Africa", "name": "Burkina Faso"},
{"code": "BG", "continent": "Europe", "name": "Bulgaria"},
{"code": "BH", "continent": "Asia", "name": "Bahrain"},
{"code": "BI", "continent": "Africa", "name": "Burundi"},
{"code": "BJ", "continent": "Africa", "name": "Benin"},
{"code": "BN", "continent": "Asia", "name": "Brunei Darussalam"},
{"code": "BO", "continent": "South America", "name": "Bolivia"},
{"code": "BR", "continent": "South America", "name": "Brazil"},
{"code": "BS", "continent": "North America", "name": "Bahamas"},
{"code": "BT", "continent": "Asia", "name": "Bhutan"},
{"code": "BW", "continent": "Africa", "name": "Botswana"},
{"code": "BY", "continent": "Europe", "name": "Belarus"},
{"code": "BZ", "continent": "North America", "name": "Belize"},
{"code": "CA", "continent": "North America", "name": "Canada"},
{"code": "CD", "continent": "Africa", "name": "Democratic Republic of the Congo"},
{"code": "CG", "continent": "Africa", "name": "Republic of the Congo"},
{"code": "CI", "continent": "Africa", "name": u"Côte d'Ivoire"},
{"code": "CI", "continent": "Africa", "name": u"Cote d'Ivoire"},
{"code": "CL", "continent": "South America", "name": "Chile"},
{"code": "CM", "continent": "Africa", "name": "Cameroon"},
{"code": "CN", "continent": "Asia", "name": u"People's Republic of China"},
{"code": "CN", "continent": "Asia", "name": u"China"},
{"code": "CO", "continent": "South America", "name": "Colombia"},
{"code": "CR", "continent": "North America", "name": "Costa Rica"},
{"code": "CU", "continent": "North America", "name": "Cuba"},
{"code": "CV", "continent": "Africa", "name": "Cape Verde"},
{"code": "CY", "continent": "Asia", "name": "Cyprus"},
{"code": "CZ", "continent": "Europe", "name": "Czech Republic"},
{"code": "DE", "continent": "Europe", "name": "Germany"},
{"code": "DJ", "continent": "Africa", "name": "Djibouti"},
{"code": "DK", "continent": "Europe", "name": "Denmark"},
{"code": "DM", "continent": "North America", "name": "Dominica"},
{"code": "DO", "continent": "North America", "name": "Dominican Republic"},
{"code": "EC", "continent": "South America", "name": "Ecuador"},
{"code": "EE", "continent": "Europe", "name": "Estonia"},
{"code": "EG", "continent": "Africa", "name": "Egypt"},
{"code": "ER", "continent": "Africa", "name": "Eritrea"},
{"code": "ET", "continent": "Africa", "name": "Ethiopia"},
{"code": "FI", "continent": "Europe", "name": "Finland"},
{"code": "FJ", "continent": "Oceania", "name": "Fiji"},
{"code": "FR", "continent": "Europe", "name": "France"},
{"code": "GA", "continent": "Africa", "name": "Gabon"},
{"code": "GE", "continent": "Asia", "name": "Georgia"},
{"code": "GH", "continent": "Africa", "name": "Ghana"},
{"code": "GM", "continent": "Africa", "name": "The Gambia"},
{"code": "GN", "continent": "Africa", "name": "Guinea"},
{"code": "GR", "continent": "Europe", "name": "Greece"},
{"code": "GT", "continent": "North America", "name": "Guatemala"},
{"code": "GT", "continent": "North America", "name": "Haiti"},
{"code": "GW", "continent": "Africa", "name": "Guinea-Bissau"},
{"code": "GY", "continent": "South America", "name": "Guyana"},
{"code": "HN", "continent": "North America", "name": "Honduras"},
{"code": "HU", "continent": "Europe", "name": "Hungary"},
{"code": "ID", "continent": "Asia", "name": "Indonesia"},
{"code": "IE", "continent": "Europe", "name": "Republic of Ireland"},
{"code": "IL", "continent": "Asia", "name": "Israel"},
{"code": "IN", "continent": "Asia", "name": "India"},
{"code": "IQ", "continent": "Asia", "name": "Iraq"},
{"code": "IR", "continent": "Asia", "name": "Iran"},
{"code": "IS", "continent": "Europe", "name": "Iceland"},
{"code": "IT", "continent": "Europe", "name": "Italy"},
{"code": "JM", "continent": "North America", "name": "Jamaica"},
{"code": "JO", "continent": "Asia", "name": "Jordan"},
{"code": "JP", "continent": "Asia", "name": "Japan"},
{"code": "KE", "continent": "Africa", "name": "Kenya"},
{"code": "KG", "continent": "Asia", "name": "Kyrgyzstan"},
{"code": "KI", "continent": "Oceania", "name": "Kiribati"},
{"code": "KP", "continent": "Asia", "name": "North Korea"},
{"code": "KR", "continent": "Asia", "name": "South Korea"},
{"code": "KW", "continent": "Asia", "name": "Kuwait"},
{"code": "LB", "continent": "Asia", "name": "Lebanon"},
{"code": "LI", "continent": "Europe", "name": "Liechtenstein"},
{"code": "LR", "continent": "Africa", "name": "Liberia"},
{"code": "LS", "continent": "Africa", "name": "Lesotho"},
{"code": "LT", "continent": "Europe", "name": "Lithuania"},
{"code": "LU", "continent": "Europe", "name": "Luxembourg"},
{"code": "LV", "continent": "Europe", "name": "Latvia"},
{"code": "LY", "continent": "Africa", "name": "Libya"},
{"code": "MG", "continent": "Africa", "name": "Madagascar"},
{"code": "MH", "continent": "Oceania", "name": "Marshall Islands"},
{"code": "MK", "continent": "Europe", "name": "Macedonia"},
{"code": "ML", "continent": "Africa", "name": "Mali"},
{"code": "MM", "continent": "Asia", "name": "Myanmar"},
{"code": "MN", "continent": "Asia", "name": "Mongolia"},
{"code": "MR", "continent": "Africa", "name": "Mauritania"},
{"code": "MT", "continent": "Europe", "name": "Malta"},
{"code": "MU", "continent": "Africa", "name": "Mauritius"},
{"code": "MV", "continent": "Asia", "name": "Maldives"},
{"code": "MW", "continent": "Africa", "name": "Malawi"},
{"code": "MX", "continent": "North America", "name": "Mexico"},
{"code": "MY", "continent": "Asia", "name": "Malaysia"},
{"code": "MZ", "continent": "Africa", "name": "Mozambique"},
{"code": "NA", "continent": "Africa", "name": "Namibia"},
{"code": "NE", "continent": "Africa", "name": "Niger"},
{"code": "NG", "continent": "Africa", "name": "Nigeria"},
{"code": "NI", "continent": "North America", "name": "Nicaragua"},
{"code": "NL", "continent": "Europe", "name": "Kingdom of the Netherlands"},
{"code": "NL", "continent": "Europe", "name": "Netherlands"},
{"code": "NO", "continent": "Europe", "name": "Norway"},
{"code": "NP", "continent": "Asia", "name": "Nepal"},
{"code": "NR", "continent": "Oceania", "name": "Nauru"},
{"code": "NZ", "continent": "Oceania", "name": "New Zealand"},
{"code": "OM", "continent": "Asia", "name": "Oman"},
{"code": "PA", "continent": "North America", "name": "Panama"},
{"code": "PE", "continent": "South America", "name": "Peru"},
{"code": "PG", "continent": "Oceania", "name": "Papua New Guinea"},
{"code": "PH", "continent": "Asia", "name": "Philippines"},
{"code": "PK", "continent": "Asia", "name": "Pakistan"},
{"code": "PL", "continent": "Europe", "name": "Poland"},
{"code": "PT", "continent": "Europe", "name": "Portugal"},
{"code": "PW", "continent": "Oceania", "name": "Palau"},
{"code": "PY", "continent": "South America", "name": "Paraguay"},
{"code": "QA", "continent": "Asia", "name": "Qatar"},
{"code": "RO", "continent": "Europe", "name": "Romania"},
{"code": "RU", "continent": "Europe", "name": "Russia"},
{"code": "RU", "continent": "Europe", "name": "Russian Federation"},
{"code": "RW", "continent": "Africa", "name": "Rwanda"},
{"code": "SA", "continent": "Asia", "name": "Saudi Arabia"},
{"code": "SB", "continent": "Oceania", "name": "Solomon Islands"},
{"code": "SC", "continent": "Africa", "name": "Seychelles"},
{"code": "SD", "continent": "Africa", "name": "Sudan"},
{"code": "SE", "continent": "Europe", "name": "Sweden"},
{"code": "SG", "continent": "Asia", "name": "Singapore"},
{"code": "SI", "continent": "Europe", "name": "Slovenia"},
{"code": "SK", "continent": "Europe", "name": "Slovakia"},
{"code": "SL", "continent": "Africa", "name": "Sierra Leone"},
{"code": "SM", "continent": "Europe", "name": "San Marino"},
{"code": "SN", "continent": "Africa", "name": "Senegal"},
{"code": "SO", "continent": "Africa", "name": "Somalia"},
{"code": "SR", "continent": "South America", "name": "Suriname"},
{"code": "ST", "continent": "Africa", "name": u"República Democrática de São Tomé e Príncipe"},
{"code": "SY", "continent": "Asia", "name": "Syria"},
{"code": "TG", "continent": "Africa", "name": "Togo"},
{"code": "TH", "continent": "Asia", "name": "Thailand"},
{"code": "TJ", "continent": "Asia", "name": "Tajikistan"},
{"code": "TM", "continent": "Asia", "name": "Turkmenistan"},
{"code": "TN", "continent": "Africa", "name": "Tunisia"},
{"code": "TO", "continent": "Oceania", "name": "Tonga"},
{"code": "TR", "continent": "Asia", "name": "Turkey"},
{"code": "TT", "continent": "North America", "name": "Trinidad and Tobago"},
{"code": "TV", "continent": "Oceania", "name": "Tuvalu"},
{"code": "TZ", "continent": "Africa", "name": "Tanzania"},
{"code": "UA", "continent": "Europe", "name": "Ukraine"},
{"code": "UG", "continent": "Africa", "name": "Uganda"},
{"code": "US", "continent": "North America", "name": "United States"},
{"code": "UY", "continent": "South America", "name": "Uruguay"},
{"code": "UZ", "continent": "Asia", "name": "Uzbekistan"},
{"code": "VA", "continent": "Europe", "name": "Vatican City"},
{"code": "VE", "continent": "South America", "name": "Venezuela"},
{"code": "VN", "continent": "Asia", "name": "Vietnam"},
{"code": "VU", "continent": "Oceania", "name": "Vanuatu"},
{"code": "YE", "continent": "Asia", "name": "Yemen"},
{"code": "ZM", "continent": "Africa", "name": "Zambia"},
{"code": "ZW", "continent": "Africa", "name": "Zimbabwe"},
{"code": "DZ", "continent": "Africa", "name": "Algeria"},
{"code": "BA", "continent": "Europe", "name": "Bosnia and Herzegovina"},
{"code": "KH", "continent": "Asia", "name": "Cambodia"},
{"code": "CF", "continent": "Africa", "name": "Central African Republic"},
{"code": "TD", "continent": "Africa", "name": "Chad"},
{"code": "KM", "continent": "Africa", "name": "Comoros"},
{"code": "HR", "continent": "Europe", "name": "Croatia"},
{"code": "TL", "continent": "Asia", "name": "East Timor"},
{"code": "SV", "continent": "North America", "name": "El Salvador"},
{"code": "GQ", "continent": "Africa", "name": "Equatorial Guinea"},
{"code": "GD", "continent": "North America", "name": "Grenada"},
{"code": "KZ", "continent": "Asia", "name": "Kazakhstan"},
{"code": "LA", "continent": "Asia", "name": "Laos"},
{"code": "FM", "continent": "Oceania", "name": "Federated States of Micronesia"},
{"code": "MD", "continent": "Europe", "name": "Moldova"},
{"code": "MC", "continent": "Europe", "name": "Monaco"},
{"code": "ME", "continent": "Europe", "name": "Montenegro"},
{"code": "MA", "continent": "Africa", "name": "Morocco"},
{"code": "KN", "continent": "North America", "name": "Saint Kitts and Nevis"},
{"code": "LC", "continent": "North America", "name": "Saint Lucia"},
{"code": "VC", "continent": "North America", "name": "Saint Vincent and the Grenadines"},
{"code": "WS", "continent": "Oceania", "name": "Samoa"},
{"code": "RS", "continent": "Europe", "name": "Serbia"},
{"code": "ZA", "continent": "Africa", "name": "South Africa"},
{"code": "ES", "continent": "Europe", "name": "Spain"},
{"code": "LK", "continent": "Asia", "name": "Sri Lanka"},
{"code": "SZ", "continent": "Africa", "name": "Swaziland"},
{"code": "CH", "continent": "Europe", "name": "Switzerland"},
{"code": "AE", "continent": "Asia", "name": "United Arab Emirates"},
{"code": "GB", "continent": "Europe", "name": "United Kingdom"},
{"code": "TW", "continent": "Asia", "name": "Taiwan"},
{"code": "AW", "continent": "North America", "name": "Aruba"},
{"code": "FO", "continent": "Europe", "name": "Faroe Islands"},
{"code": "GI", "continent": "Europe", "name": "Gibraltar"},
{"code": "GU", "continent": "Oceania", "name": "Guam"},
{"code": "HK", "continent": "Asia", "name": "Hong Kong"},
{"code": "HT", "continent": "North America", "name": "Haiti"},
{"code": "IM", "continent": "Europe", "name": "Isle of Man"},
{"code": "JE", "continent": "Europe", "name": "Jersey"},
{"code": "KY", "continent": "North America", "name": "Cayman Islands"},
{"code": "MP", "continent": "Oceania", "name": "Northern Mariana Islands"},
{"code": "NC", "continent": "Oceania", "name": "New Caledonia"},
{"code": "PF", "continent": "Oceania", "name": "French Polynesia"},
{"code": "PR", "continent": "South America", "name": "Puerto Rico"},
{"code": "VI", "continent": "North America", "name": "US Virgin Islands"},
{"code": "YT", "continent": "Africa", "name": "Mayotte"},
]
strContinent = None
if strCountryName != None:
strCountryName = unicode(strCountryName.lower().strip())
for country in countries:
if strCountryName == unicode(country["name"].lower().strip()):
strContinent = country["continent"]
return strContinent
|
bsd-3-clause
| -676,311,704,294,043,800
| 57.577947
| 170
| 0.555873
| false
| 3.259389
| false
| false
| false
|
tcyb/nextgen4b
|
nextgen4b/process/sites.py
|
1
|
2110
|
from Bio import SeqIO
import yaml
import sys
import os
def replace_deletions(word, seq, idxs, del_letter='d'):
"""
Replace any '-' in word with del_letter if the nucleotides next to it in
seq are not '-'.
"""
new_word = [c for c in word]
for i, letter in enumerate(word):
# assume we're not looking at the start or end of sequence
idx = idxs[i]
assert idx > 0 and idx < len(seq)
if letter == '-':
if seq[idx-1] != '-' and seq[idx+1] != '-':
new_word[i] = del_letter
return ''.join(new_word)
def get_positions(f_name, sites, keep_dashes=True, mark_deletions=False):
"""
Reads in a fasta file of sequences (usually produced by nextgen_main.py)
at location f_name, and pulls out the bases at the (0-start) indices in
sites.
Input:
- f_name: str
- sites: list (ints)
- keep_dashes: bool
- mark_deletions: bool
Output:
- words: list
Options:
keep_dashes: if this is false, get_positions will discard any words with a
dash in them (generally denoting a deletion)
mark_deletions: if this is true, deletions (dashes flanked by non-dashes on
both sides) will be marked (with a 'd', but this should be
customizable?)
"""
words = []
with open(f_name) as f:
seq_iter = SeqIO.parse(f, 'fasta')
for s in seq_iter:
selected_letters = ''.join([str(s.seq[i]) for i in sites])
if '-' not in selected_letters:
words.append(selected_letters)
elif keep_dashes:
if mark_deletions:
words.append(replace_deletions(selected_letters, s, sites))
else:
words.append(selected_letters)
return words
if __name__ == '__main__':
in_name = sys.argv[1]
out_name = sys.argv[2]
sites = [int(x) for x in sys.argv[3:]]
words = get_positions(in_name, sites, keep_dashes=True)
with open(out_name, 'w') as of:
for word in words:
of.write('%s\n' % word)
|
mit
| 9,148,165,284,312,616,000
| 29.157143
| 79
| 0.569668
| false
| 3.564189
| false
| false
| false
|
eikonomega/file-comparison-panda
|
file_comparison_panda/file_comparison_panda.py
|
1
|
5939
|
"""
The file_comparison module exists to easily compare the contents of two
files. The functionality of this module is currently limited to CSV files.
"""
import csv
from file_comparison_exceptions import (
UnsupportedFileType, FileDoesNotExist, PermissionDeniedOnFile)
from os import unsetenv
SUPPORTED_FILE_TYPES = ['csv']
class FileComparisonPanda(object):
"""
Compares the data in two files and provides matching and unique
records.
"""
def __init__(
self, file_path_1, file_path_2):
"""
Verify that constructor arguments are actually files and
of supported types. Perform routine object
initialization tasks.
Args:
file_path_1 (str): Filepath of first file for comparison.
file_path_2 (str): Filepath of second file for comparison.
Raises:
IOError: When one of the files identified by the parameters
doesn't exist or is inaccessible.
NotImplementedError: When one of the files being compared has
a non-supported file extension.
"""
self._unique_records = dict()
self._matching_records = list()
self.file_one = file_path_1
self.file_two = file_path_2
@property
def file_one(self):
return self._file_one
@file_one.setter
def file_one(self, file_path):
FileComparisonPanda._verify_acceptable_file_extensions(
[file_path], SUPPORTED_FILE_TYPES)
FileComparisonPanda._verify_file_accessibility(file_path)
self._file_one = file_path
self._reset_file_comparison_data()
@file_one.deleter
def file_one(self):
self._file_one = None
@property
def file_two(self):
return self._file_two
@file_two.setter
def file_two(self, file_path):
FileComparisonPanda._verify_acceptable_file_extensions(
[file_path], SUPPORTED_FILE_TYPES)
FileComparisonPanda._verify_file_accessibility(file_path)
self._file_two = file_path
self._reset_file_comparison_data()
@file_two.deleter
def file_two(self):
self._file_two = None
@staticmethod
def _verify_acceptable_file_extensions(
list_of_filenames, list_of_extensions):
"""
Determine if every file in list_of_files has one of the extensions
in list_of_extensions. If so, return True. Otherwise, return False.
Caller is responsible to provide valid filenames.
"""
for filename in list_of_filenames:
filename_parts = filename.partition('.')
if filename_parts[2] not in list_of_extensions:
raise UnsupportedFileType(
"One of the file paths provided to FileComparisonPanda() "
"references an unsupported file type. The following "
"file types are supported: {}".format(SUPPORTED_FILE_TYPES))
@staticmethod
def _verify_file_accessibility(file_path):
try:
file_being_verified = open(file_path, 'rU')
except IOError as error:
if error.errno == 2:
raise FileDoesNotExist(
"One of the file paths provided to FileComparisonPanda() "
"is invalid. Verify that '{}' exists".format(
error.filename))
elif error.errno == 13:
raise PermissionDeniedOnFile(
"One of the file paths provided to FileComparisonPanda() "
"is not accessible. Verify that '{}' is readable "
"by the user running the program".format(
error.filename))
raise
else:
file_being_verified.close()
def _reset_file_comparison_data(self):
self._unique_records = dict()
self._matching_records = list()
# print self._unique_records
# print self._matching_records
def _compare_files(self):
"""
Identify unique and matching records from self._file_one and
self.file_two using various set operations.
"""
with open(self._file_one, 'rU') as file_one:
file_one_records = set(
FileComparisonPanda._load_file_into_memory(file_one))
with open(self._file_two, 'rU') as file_two:
file_two_records = set(
FileComparisonPanda._load_file_into_memory(file_two))
self._matching_records.extend(
file_one_records.intersection(file_two_records))
self._unique_records['file_one'] = list(
file_one_records.difference(file_two_records))
self._unique_records['file_two'] = list(
file_two_records.difference(file_one_records))
@staticmethod
def _load_file_into_memory(file_object):
"""
Load the contents of a CSV file into memory for faster
performance.
IMPORTANT: This creates the potential for the program
to bomb out when it encounters memory limits.
"""
csv_reader = csv.reader(file_object)
records = [tuple(record) for record in csv_reader]
return records
@property
def unique_records(self):
"""
Returns:
A dict containing two elements ['_file_one', 'file_two'] each of
which are lists of unique records found during _compare_files().
Raises:
AttributeError: When the method is called prior to _compare_files().
"""
if not self._unique_records:
self._compare_files()
return self._unique_records
@property
def matching_records(self):
"""
A list of records that were found in both files.
"""
if not self._matching_records:
self._compare_files()
return self._matching_records
|
mit
| -991,835,789,728,919,200
| 30.595745
| 80
| 0.597407
| false
| 4.425484
| false
| false
| false
|
lych0317/CNBlogs_server
|
CNBlogs/Protocol/SearchBlogProtocol.py
|
1
|
2678
|
#!/usr/local/bin/python
# -*- coding:utf8 -*-
__author__ = 'liyc'
import urllib2
import re
from bs4 import BeautifulSoup
def search_blog_keyword_page(keyword, page="0"):
url = "http://zzk.cnblogs.com/s?t=b&dateMin=2013-01-01"
if keyword:
url = url + "&w=" + keyword
if page:
url = url + "&p=" + page
print url
req = urllib2.Request(url)
con = urllib2.urlopen(req)
doc = con.read()
con.close()
soup = BeautifulSoup(doc, 'html.parser')
searchItemArray = soup.find_all("div", attrs={"class": "searchItem"})
itemArray = []
for searchItem in searchItemArray:
item = {}
tag = searchItem.find(attrs={"class": "searchItemTitle"})
if tag:
href = tag.a.get("href")
pattern = re.compile("/")
match = pattern.split(href)[-1]
if match:
pattern = re.compile("\.")
match = pattern.split(match)[0]
if match:
pattern = re.compile("^\d*$")
match = pattern.match(match)
if match:
item["identifier"] = match.group()
else:
continue
item["link"] = href
tag = searchItem.find(attrs={"class": "searchItemTitle"})
if tag:
item["title"] = tag.a.text
tag = searchItem.find(attrs={"class": "searchCon"})
if tag:
item["summary"] = tag.text.strip()
tag = searchItem.find(attrs={"class": "searchItemInfo-userName"})
if tag:
author = {"uri": tag.a.get("href"), "name": tag.a.text, "avatar": ""}
item["author"] = author
tag = searchItem.find(attrs={"class": "searchItemInfo-publishDate"})
if tag:
item["publishDate"] = tag.text
item["updateDate"] = tag.text
pattern = re.compile("\d+")
tag = searchItem.find(attrs={"class": "searchItemInfo-good"})
if tag:
good = tag.text
match = pattern.search(good)
if match:
item["diggs"] = match.group()
tag = searchItem.find(attrs={"class": "searchItemInfo-comments"})
if tag:
comments = tag.text
match = pattern.search(comments)
if match:
item["comments"] = match.group()
tag = searchItem.find(attrs={"class": "searchItemInfo-views"})
if tag:
views = tag.text
match = pattern.search(views)
if match:
item["views"] = match.group()
itemArray.append(item)
return itemArray
|
apache-2.0
| -4,214,701,241,209,484,300
| 28.428571
| 81
| 0.509709
| false
| 4.00299
| false
| false
| false
|
herereadthis/django_project
|
polls/admin.py
|
1
|
1075
|
from django.contrib import admin
from polls.models import Choice, Poll
# Register your models here.
# customize the admin form by creating a model admin object, then pass it
# as the second argument to admin.site.register()
# inline: you can do TabularInline (saves space) or StackedInline
class ChoiceInline(admin.TabularInline):
# Choice Model
model = Choice
# add extra fields
extra = 3
class PollAdmin(admin.ModelAdmin):
fieldsets = [
(None,
{
'fields': ['question']
}),
('Date information',
{
'fields': ['pub_date'],
# makes the makes the fieldset auto-hide
'classes': ['collapse']
}),
]
# tells Django: choice objects are edited on the Poll admin page. By
# default, provide enough field for 3 choices.
inlines = [ChoiceInline]
list_display = ('question', 'pub_date', 'was_published_recently')
list_filter = ['pub_date']
search_fields = ['question']
admin.site.register(Poll, PollAdmin)
|
mit
| 1,060,910,523,673,767,200
| 28.054054
| 73
| 0.612093
| false
| 4.215686
| false
| false
| false
|
dantkz/spatial-transformer-tensorflow
|
example_affine.py
|
1
|
2113
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from scipy import ndimage
import tensorflow as tf
from spatial_transformer import AffineTransformer
import numpy as np
import scipy.misc
# Input image retrieved from:
# https://raw.githubusercontent.com/skaae/transformer_network/master/cat.jpg
im = ndimage.imread('data/cat.jpg')
im = im / 255.
im = im.astype('float32')
# input batch
batch_size = 4
batch = np.expand_dims(im, axis=0)
batch = np.tile(batch, [batch_size, 1, 1, 1])
# input placeholder
x = tf.placeholder(tf.float32, [batch_size, im.shape[0], im.shape[1], im.shape[2]])
# Let the output size of the affine transformer be quarter of the image size.
outsize = (int(im.shape[0]/4), int(im.shape[1]/4))
# Affine Transformation Layer
stl = AffineTransformer(outsize)
# Identity transformation parameters
initial = np.array([1.0, 0.0, 0.0,
0.0, 1.0, 0.0]).astype('float32')
initial = np.reshape(initial, [1, stl.param_dim])
# Run session
with tf.Session() as sess:
with tf.device("/cpu:0"):
with tf.variable_scope('spatial_transformer') as scope:
# Random jitter of the identity parameters
theta = initial + 0.1*tf.random_normal([batch_size, stl.param_dim])
result = stl.transform(x, theta)
sess.run(tf.global_variables_initializer())
result_ = sess.run(result, feed_dict={x: batch})
# save our result
for i in range(result_.shape[0]):
scipy.misc.imsave('affine' + str(i) + '.png', result_[i])
|
apache-2.0
| -6,119,487,510,610,097,000
| 34.216667
| 83
| 0.684808
| false
| 3.458265
| false
| false
| false
|
google/turbinia
|
turbinia/workers/fsstat.py
|
1
|
1589
|
# -*- coding: utf-8 -*-
# Copyright 2021 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Task to run fsstat on disk partitions."""
from __future__ import unicode_literals
import os
from turbinia import TurbiniaException
from turbinia.workers import TurbiniaTask
from turbinia.evidence import EvidenceState as state
from turbinia.evidence import ReportText
class FsstatTask(TurbiniaTask):
REQUIRED_STATES = [state.ATTACHED]
def run(self, evidence, result):
"""Task to execute fsstat.
Args:
evidence (Evidence object): The evidence we will process.
result (TurbiniaTaskResult): The object to place task results into.
Returns:
TurbiniaTaskResult object.
"""
fsstat_output = os.path.join(self.output_dir, 'fsstat.txt')
output_evidence = ReportText(source_path=fsstat_output)
cmd = ['sudo', 'fsstat', evidence.device_path]
result.log('Running fsstat as [{0!s}]'.format(cmd))
self.execute(
cmd, result, stdout_file=fsstat_output, new_evidence=[output_evidence],
close=True)
return result
|
apache-2.0
| -2,246,638,979,258,569,000
| 32.125
| 79
| 0.723096
| false
| 3.611364
| false
| false
| false
|
bitmovin/bitmovin-python
|
bitmovin/resources/models/outputs/generic_s3_output.py
|
1
|
2304
|
from bitmovin.utils import Serializable
from bitmovin.resources.enums import S3SignatureVersion
from . import AbstractOutput
class GenericS3Output(AbstractOutput, Serializable):
def __init__(self, access_key, secret_key, bucket_name, host, port=None, signature_version=None, ssl=None, id_=None, custom_data=None,
name=None, description=None):
super().__init__(id_=id_, custom_data=custom_data, name=name, description=description)
self._signatureVersion = None
self.accessKey = access_key
self.secretKey = secret_key
self.bucketName = bucket_name
self.host = host
self.port = port
self.signatureVersion = signature_version
self.ssl = ssl
@property
def signatureVersion(self):
return self._signatureVersion
@signatureVersion.setter
def signatureVersion(self, new_sigver):
if new_sigver is None:
return
if isinstance(new_sigver, str):
self._signatureVersion = new_sigver
elif isinstance(new_sigver, S3SignatureVersion):
self._signatureVersion = new_sigver.value
else:
raise InvalidTypeError(
'Invalid type {} for signatureVersion: must be either str or S3SignatureVersion!'.format(type(new_signatureVersion)))
@classmethod
def parse_from_json_object(cls, json_object):
id_ = json_object['id']
bucket_name = json_object['bucketName']
access_key = json_object.get('accessKey')
secret_key = json_object.get('secretKey')
name = json_object.get('name')
description = json_object.get('description')
host = json_object.get('host')
port = json_object.get('port')
signature_version = json_object.get('signatureVersion')
ssl = json_object.get('ssl')
generic_s3_output = GenericS3Output(
access_key=access_key, secret_key=secret_key, bucket_name=bucket_name, host=host, port=port, signature_version=signature_version,
ssl=ssl, id_=id_, name=name, description=description)
return generic_s3_output
def serialize(self):
serialized = super().serialize()
serialized['signatureVersion'] = self.signatureVersion
return serialized
|
unlicense
| -5,036,721,332,386,152,000
| 38.724138
| 141
| 0.648003
| false
| 4.136445
| false
| false
| false
|
XiangyuQin/WeChat-Controller
|
code/testdecode.py
|
1
|
18959
|
# -*- coding:utf-8 -*-
#!/usr/bin/env python
__all__ = ['weChatController']
import cookielib
import urllib2
import urllib
import urlparse
import json
import poster
import hashlib
import time
import random
import sys
import os
import traceback
from cStringIO import StringIO
import tornado
import tornado.escape
from bs4 import BeautifulSoup
import uuid
#cookie = cookielib.MozillaCookieJar()
#_opener=poster.streaminghttp.register_openers()
#_openerreload(sys)
reload(sys)
sys.setdefaultencoding( "utf-8" )
class WeChatControllerException(Exception):
pass
class WeChatController(object):
def __init__(self, user='tool', redisdb=None, force=False, config=None):
"""
公众平台初始化
"""
self.test_appmsg = {
'author':'test',
'title':'test_merge2',
'sourceurl':'www.baidu.com',
'cover':'/home/pythonDir/cover.jpg',
'digest':"你好",
'content':'<p style="line-height: 25.6px; white-space: normal;"><em><span style="word-wrap: break-word; font-weight: 700;">表白,'\
'相信这个动作是每个人一生当中都会触发的一个行为,' \
'大部分人认为表白是跟女孩确定恋爱关系非常有效的一种方法,有耐心和妹子建立深层次的联系感并对妹子产生了吸引力,妹子接 ' \
'受的概念就会大增其实,盲目的表白你会发现往往到最后没有任何效果。</span></em></p><p style="line-height: 25.6px; white-spac' \
'e: normal;"><span style="line-height: 1.6;"> 有个朋友,做个一个实验,并把它录制成了一个视频剪辑,内容大概是这样的,他收集' \
'了现实生活中将近50个男人的表白现场实录,有的是在人民广场这样人流量聚焦的地区,有的像电影里那样是在很有格调的西餐厅,有的是在酒吧,有的是在朋 ' \
'友聚会,更有夸张一点的,你们可能都想不到,是在足球比赛的现场。</span></p><p style="line-height: 25.6px; white-space: normal;">最后的结果出来了,成功率几乎 '\
'只有5%不到,对的,你没看错,就是这么低。很多兄弟觉得不可思议,怎么会这样,和电视电影里的完全不一样啊,呵呵,因为这才是现实。为什么女人的反应都几乎是拒绝,完全不顾及' \
'男人的面子,也完全没有被感动的赶脚。</p><p style="line-height: 25.6px; white-space: normal;">那么我来告诉兄弟们,问题出在哪,因为这种情况下,女人会本能的产生一种压迫' \
'感,或者说是不安全感,她们会条件反射式的去拒绝。</p><p style="line-height: 25.6px; white-space: normal;">因为在进化学来看,远古时代的人类基因,仍然在现代人的基因里有' \
'保留,在古代的女人,她们一旦选定一个男人,她的生命就跟这个男人绑定在了一起,换句话说,如果这个男人不能提供足够的食物,这个女人在怀孕期间就会被饿死。</p><p style="lin' \
'e-height: 25.6px; white-space: normal;">这种选错了对象就要付出生命代价的基因一直延续至今,所以,女人一旦面对男人表白这种事情的时候,就会自动切换到理性思考模式,接受面临的是风险,而拒绝是最' \
'保险的做法,女人不傻,所以,她们会选择拒绝就不难理解了。<span style="line-height: 1.6;">现在兄弟们懂了这个道理,那有的兄弟要说了,既然这样,不去表白,怎么追到女人' \
',难道让女人对你表白么?恩,让女人表白也不是不可能的,我们家的方法就可以让你做到,让女人倒追你,我们有的是方法。</span></p><p style="line-height: 25.6px; white-s' \
'pace: normal;">这就是我们家自主开发的男神模式,它可以让你和女人的互动交流之后,让女人喜欢上你,让女人主动对你示好,对你表白。至于该怎么做,只需要关注我们' \
'的微信公众号,那里面有干货会告诉你。</p><p><br/></p>',
}
self.lastMsg = None
self._opener = None
self.user = user
self.key = "mp_%s" % user
self.ticket = None
self.ticket_id = None
self.token = None
self.email = "xjmjyqxy@sina.com"
self.password = "b3ca2251f5f48978f9e9c32aeb5fde26"
self.msgType = {'text': 1, 'image': 2, 'audio': 3, 'news': 10, 'video': 15}
self.login(force=force)
#print self.upload_img(img_url='/home/pythonDir/imagestest3.jpg')
self.add_appmsg(self.test_appmsg)
def _set_opener(self):
self._opener = poster.streaminghttp.register_openers()
self._opener.addheaders = [
('Accept', 'application/json, text/javascript, */*; q=0.01'),
('Content-Type', 'application/x-www-form-urlencoded; charset=UTF-8'),
('Referer', 'https://mp.weixin.qq.com'),
('Cache-Control', 'max-age=0'),
('Connection', 'keep-alive'),
('Host', 'mp.weixin.qq.com'),
('Origin', 'https://mp.weixin.qq.com'),
('X-Requested-With', 'XMLHttpRequest'),
('User-Agent','Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/46.0.2490.86 Safari/537.36')
]
def login(self, force=False):
"""
登陆公众平台,失败将抛出异常,token是经常更换的,登录成功后要暂停一会,否则cookie会被清空
"""
email = self.email
password = self.password
cookie = cookielib.MozillaCookieJar()
self._set_opener()
if force:
self._opener.add_handler(urllib2.HTTPCookieProcessor(cookie))
url = "https://mp.weixin.qq.com/cgi-bin/login?lang=zh_CN"
body = {'username': email, 'pwd': password, 'imgcode': '', 'f': 'json'}
req = self._opener.open(url, urllib.urlencode(body), timeout=30)
resp = req.read()
msg = json.loads(resp)
print msg
if 'base_resp' in msg and msg['base_resp']['ret'] == 0:
self.token = msg['redirect_url'].split("=")[-1]
print "token:%s" %self.token
else:
print 'login fail'
time.sleep(1)
else:
try:
print "force:%s" %force
except:
self.login(force=True)
def _ensure_login(self):
if not self.token==None:
self.check_notice()
def check_notice(self):
"""
获取系统通知
"""
url = "https://mp.weixin.qq.com/cgi-bin/sysnotify"
data = {
'count': 5,
'begin': 0,
'ajax': 1,
'random': random.random()
}
ret = self._send_request(url, data, method="GET")
return ret
def _get_ticket(self):
url = "https://mp.weixin.qq.com/cgi-bin/message"
data = {
't': 'message/list',
'count': 20,
'day': 0
}
ret = self._send_request(url, data, method="GET")
if ret:
ticket_id = ret['user_info']['user_name']
ticket = ret['base_resp']['media_ticket']
print ticket
print ticket_id
return ticket, ticket_id
else:
return None, None
def _send_request(self, url, data={}, headers={}, method='POST', jsonformat=True):
for i in xrange(3):
try:
if method == "POST":
print isinstance(data, dict)
if(isinstance(data, dict)):
data.update({'f': 'json',
'lang': 'zh_CN',
'ajax': 1,
'token': self.token,
'random': random.random()})
if 't' not in data.keys():
data.update({'t': 'ajax-response'})
resp = self._opener.open(url, urllib.urlencode(data))
else:
req = urllib2.Request(url, data, headers)
resp = urllib2.urlopen(req)
else:
data.update({'token': self.token, 'f': 'json', 'lang': 'zh_CN'})
resp = self._opener.open(url + "?" + urllib.urlencode(data))
if resp.getcode() in [200, 302, 304]:
msg = resp.read()
break
except:
print traceback.format_exc()
time.sleep(1)
if not msg:
return False
self.lastMsg = msg
# 非json格式直接返回msg
print 'msg', msg, type(msg)
if jsonformat:
try:
msg = json.loads(msg)
except:
import chardet
msg = json.loads( msg.decode( chardet.detect(msg)['encoding'] ) )
else:
return msg
# 结果是json格式,判断发送的结果,现在只有两种
if 'base_resp' in msg:
ret = int(msg['base_resp']['ret'])
else:
ret = int(msg['ret'])
# 判断返回的结果
if ret == 0:
return msg
else:
time.sleep(1)
if ret == -3:
# token过期,重新登录
print "token expired, relogin"
self.login(force=True)
return self._send_request(url, data, headers, method, jsonformat)
elif ret == -18:
# ticket过期,重新获取
self.getTicket(force=True)
print "ticket expired,reget"
return self._send_request(url, data, headers, method, jsonformat)
else:
#error
print str(msg)
return False
def upload_img(self, img_url=""):
self._ensure_login()
ticket, ticket_id = self._get_ticket()
if not ticket:
return False
url = 'https://mp.weixin.qq.com/cgi-bin/filetransfer?action=upload_material&f=json' \
'&writetype=doublewrite&groupid=1&ticket_id={0}&ticket={1}&token={2}&lang=zh_CN'.format(
ticket_id,
ticket,
self.token)
params = {'file': open(img_url, "rb")}
data, headers = poster.encode.multipart_encode(params)
headers.update({
'Accept': '*/*',
'Accept-Language': 'zh-CN,zh;q=0.8,en;q=0.6',
'Connection': 'keep-alive',
'Host': 'mp.weixin.qq.com',
'Origin': 'https://mp.weixin.qq.com',
'Referer': 'https://mp.weixin.qq.com/cgi-bin/filepage?type=2&begin=0&count=10&t=media/list&token=%s&lang=zh_CN' % self.token,
})
ret = self._send_request(url, data, headers)
if ret:
return ret['content']
else:
print ret
return False
def merge_appmsg_info(self, appMsg, index):
"""
根据图文信息合成发送格式
"""
soup = BeautifulSoup(appMsg.get('content', ''), 'html5lib')
imgs = soup.find_all('img')
for img in imgs:
url = img.get('src', img.get('data-src'))
if not url:
continue
if urlparse.urlparse(url).netloc == 'mmbiz.qlogo.cn':
continue
data = urllib2.urlopen(url).read()
im = Image.open(StringIO(data))
width = im.size[0]
ratio = im.size[1]/float(width)
filename = '/tmp/%s.%s' % (uuid.uuid4().hex, im.format.lower())
with open(filename, 'wb') as fp:
fp.write(data)
src = self.uploadAppImg(filename)
os.remove(filename)
if src:
img.attrs['src'] = src
img.attrs['data-src'] = src
img.attrs['data-ratio'] = ratio
img.attrs['data-w'] = width
appMsg['content'] = soup.body.renderContents()
# 使用getAppMsg时,返回的参数中,fileid为file_id, sourceurl为source_url
return {
'title%d' % index: tornado.escape.xhtml_unescape(appMsg.get('title', '')),
'content%d' % index: tornado.escape.xhtml_unescape(appMsg.get('content', '')),
'digest%d' % index: tornado.escape.xhtml_unescape(appMsg.get('digest', '')),
'author%d' % index: tornado.escape.xhtml_unescape(appMsg.get('author', '')),
'fileid%d' % index: appMsg.get('file_id', appMsg.get('fileid', '')),
'sourceurl%d' % index: appMsg.get('source_url', appMsg.get('sourceurl', '')),
'show_cover_pic%d' % index: appMsg.get('show_cover_pic', 0),
'shortvideofileid%d' % index: appMsg.get('shortvideofileid', ''),
'copyright_type%d' % index: appMsg.get('copyright_type', 0),
'can_reward%d' % index: appMsg.get('can_reward', 0),
'reward_wording%d' % index: appMsg.get('reward_wording', ''),
'releasefirst%d' % index: appMsg.get('releasefirst', 0),
'can_reward%d' % index: appMsg.get('can_reward', 0),
'reward_wording%d' % index: appMsg.get('reward_wording', ''),
'reprint_permit_type%d' % index: appMsg.get('reprint_permit_type', 0),
'original_article_type%d' % index: appMsg.get('original_article_type', ''),
'need_open_comment%d' % index: appMsg.get('need_open_comment', 1),
}
def packet_appmsg(self, appMsgs):
"""
打包图文
"""
ret = {}
if isinstance(appMsgs, dict):
appMsgs = [appMsgs]
for index in xrange(len(appMsgs)):
appMsg = appMsgs[index]
if not appMsg.get('file_id', None):
if not (appMsg.get('title') and appMsg.get('content') and appMsg.get('cover')):
self.logger.info("必须要有一张标题、内容和封面图片")
continue
file_id = self.upload_img(appMsg['cover'])
appMsg['file_id'] = file_id
ret.update(self.merge_appmsg_info(appMsg, index))
return ret
def get_appmsg(self, AppMsgId, isMul=0):
"""
获取id为AppMsgId的图文信息
isMul表示是否是多图文
返回内容为appMsg类型的图文信息
"""
url = "https://mp.weixin.qq.com/cgi-bin/appmsg"
data = {
'appmsgid': AppMsgId,
'isMul': isMul,
'type': 10,
't': 'media/appmsg_edit',
'action': 'edit'
}
ret = self._send_request(url, data, method="GET")
if ret:
app_msgs = json.loads(ret['app_msg_info'])['item'][0]['multi_item']
return app_msgs
def add_appmsg(self, appMsgs, AppMsgId=''):
"""
如果AppMsgId为空,则是增加图文,不为空,则是预览后保存图文
appMsgs里面包含如下内容:封面img(不可少),标题title,内容content,预览digest,
是否显示封面图片show_cover,作者author,来源sourceurl,是一个list,成员为dict
返回这个图文的id
"""
url = 'https://mp.weixin.qq.com/cgi-bin/operate_appmsg'
data = {
'AppMsgId': AppMsgId,
'count': len(appMsgs) if isinstance(appMsgs, list) else 1,
'sub': 'update' if AppMsgId else 'create',
'type': 10
}
data.update(self.packet_appmsg(appMsgs))
ret = self._send_request(url, data)
if ret:
if AppMsgId:
return AppMsgId
else:
msgIds = self.get_msg_Ids()
if msgIds and len(msgIds):
return msgIds[0]
return False
def del_appmsg(self, AppMsgId):
"""
根据id删除图文
"""
url = 'https://mp.weixin.qq.com/cgi-bin/operate_appmsg'
data = {
'AppMsgId': AppMsgId,
'sub': 'del'
}
ret = self._sendRequest(url, data)
if ret:
return True
else:
print ret
return False
def send_appmsg_by_id(self, sendTo, AppMsgId):
"""
通过图文ID发送图文
"""
ret = self._sendMsg(sendTo, {
'type': 10,
'app_id': AppMsgId,
'appmsgid': AppMsgId
})
return ret
def send_app_msg(self, sendTo, appMsgs, delete=True):
"""
主动推送图文
"""
AppMsgId = self.addAppMsg(appMsgs)
if not AppMsgId:
return False
ret = self.sendAppMsgById(sendTo, AppMsgId)
if delete:
self.delAppMsg(AppMsgId)
return ret
def get_msg_Ids(self, msgType='news', begin=0, count=None, detail=False):
"""
获取素材ID,type为'news','image','audio','video'
"""
if msgType == 'news':
url = "https://mp.weixin.qq.com/cgi-bin/appmsg"
data = {'t': 'media/appmsg_list2',
'action': 'list_card',
'count': count or 10}
elif msgType == 'video':
url = "https://mp.weixin.qq.com/cgi-bin/appmsg"
data = {'t': 'media/appmsg_list',
'action': 'list',
'count': count or 9}
elif msgType == 'image':
url = "https://mp.weixin.qq.com/cgi-bin/filepage"
data = {'1': 1,
't': 'media/img_list',
'count': count or 12}
else:
url = "https://mp.weixin.qq.com/cgi-bin/filepage"
data = {'t': 'media/list',
'count': count or 21}
data.update({
'type': self.msgType[msgType],
'begin': begin,
})
ret = self._send_request(url, data, method="GET")
if ret:
if msgType in ['news', 'video']:
msgs = ret['app_msg_info']['item']
ids = [item['app_id'] for item in msgs]
else:
msgs = ret['page_info']['file_item']
ids = [item['file_id'] for item in msgs]
if detail:
return msgs
else:
return ids
else:
return False
if __name__ == "__main__":
client = WeChatController(user='weChatController',force=True)
msg = client.check_notice()
print msg
|
apache-2.0
| -3,363,681,692,529,769,500
| 36.066225
| 140
| 0.510631
| false
| 2.821543
| false
| false
| false
|
jaeilepp/eggie
|
mne/io/kit/kit.py
|
2
|
28437
|
"""Conversion tool from SQD to FIF
RawKIT class is adapted from Denis Engemann et al.'s mne_bti2fiff.py
"""
# Author: Teon Brooks <teon@nyu.edu>
#
# License: BSD (3-clause)
import os
from os import SEEK_CUR
from struct import unpack
import time
import numpy as np
from scipy import linalg
from ..pick import pick_types
from ...coreg import (read_elp, fit_matched_points, _decimate_points,
get_ras_to_neuromag_trans)
from ...utils import verbose, logger
from ...transforms import apply_trans, als_ras_trans, als_ras_trans_mm
from ..base import _BaseRaw
from ..constants import FIFF
from ..meas_info import Info
from ..tag import _loc_to_trans
from .constants import KIT, KIT_NY, KIT_AD
from .coreg import read_hsp, read_mrk
from ...externals.six import string_types
class RawKIT(_BaseRaw):
"""Raw object from KIT SQD file adapted from bti/raw.py
Parameters
----------
input_fname : str
Path to the sqd file.
mrk : None | str | array_like, shape = (5, 3) | list of str or array_like
Marker points representing the location of the marker coils with
respect to the MEG Sensors, or path to a marker file.
If list, all of the markers will be averaged together.
elp : None | str | array_like, shape = (8, 3)
Digitizer points representing the location of the fiducials and the
marker coils with respect to the digitized head shape, or path to a
file containing these points.
hsp : None | str | array, shape = (n_points, 3)
Digitizer head shape points, or path to head shape file. If more than
10`000 points are in the head shape, they are automatically decimated.
stim : list of int | '<' | '>'
Channel-value correspondence when converting KIT trigger channels to a
Neuromag-style stim channel. For '<', the largest values are assigned
to the first channel (default). For '>', the largest values are
assigned to the last channel. Can also be specified as a list of
trigger channel indexes.
slope : '+' | '-'
How to interpret values on KIT trigger channels when synthesizing a
Neuromag-style stim channel. With '+', a positive slope (low-to-high)
is interpreted as an event. With '-', a negative slope (high-to-low)
is interpreted as an event.
stimthresh : float
The threshold level for accepting voltage changes in KIT trigger
channels as a trigger event.
preload : bool
If True, all data are loaded at initialization.
If False, data are not read until save.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
See Also
--------
mne.io.Raw : Documentation of attribute and methods.
"""
@verbose
def __init__(self, input_fname, mrk=None, elp=None, hsp=None, stim='>',
slope='-', stimthresh=1, preload=False, verbose=None):
logger.info('Extracting SQD Parameters from %s...' % input_fname)
input_fname = os.path.abspath(input_fname)
self._sqd_params = get_sqd_params(input_fname)
self._sqd_params['stimthresh'] = stimthresh
self._sqd_params['fname'] = input_fname
logger.info('Creating Raw.info structure...')
# Raw attributes
self.verbose = verbose
self.preload = False
self._projector = None
self.first_samp = 0
self.last_samp = self._sqd_params['nsamples'] - 1
self.comp = None # no compensation for KIT
self.proj = False
# Create raw.info dict for raw fif object with SQD data
self.info = Info()
self.info['meas_id'] = None
self.info['file_id'] = None
self.info['meas_date'] = int(time.time())
self.info['projs'] = []
self.info['comps'] = []
self.info['lowpass'] = self._sqd_params['lowpass']
self.info['highpass'] = self._sqd_params['highpass']
self.info['sfreq'] = float(self._sqd_params['sfreq'])
# meg channels plus synthetic channel
self.info['nchan'] = self._sqd_params['nchan'] + 1
self.info['bads'] = []
self.info['acq_pars'], self.info['acq_stim'] = None, None
self.info['filename'] = None
self.info['ctf_head_t'] = None
self.info['dev_ctf_t'] = []
self._filenames = []
self.info['dig'] = None
self.info['dev_head_t'] = None
if isinstance(mrk, list):
mrk = [read_mrk(marker) if isinstance(marker, string_types)
else marker for marker in mrk]
mrk = np.mean(mrk, axis=0)
if (mrk is not None and elp is not None and hsp is not None):
self._set_dig_kit(mrk, elp, hsp)
elif (mrk is not None or elp is not None or hsp is not None):
err = ("mrk, elp and hsp need to be provided as a group (all or "
"none)")
raise ValueError(err)
# Creates a list of dicts of meg channels for raw.info
logger.info('Setting channel info structure...')
ch_names = {}
ch_names['MEG'] = ['MEG %03d' % ch for ch
in range(1, self._sqd_params['n_sens'] + 1)]
ch_names['MISC'] = ['MISC %03d' % ch for ch
in range(1, self._sqd_params['nmiscchan'] + 1)]
ch_names['STIM'] = ['STI 014']
locs = self._sqd_params['sensor_locs']
chan_locs = apply_trans(als_ras_trans, locs[:, :3])
chan_angles = locs[:, 3:]
self.info['chs'] = []
for idx, ch_info in enumerate(zip(ch_names['MEG'], chan_locs,
chan_angles), 1):
ch_name, ch_loc, ch_angles = ch_info
chan_info = {}
chan_info['cal'] = KIT.CALIB_FACTOR
chan_info['logno'] = idx
chan_info['scanno'] = idx
chan_info['range'] = KIT.RANGE
chan_info['unit_mul'] = KIT.UNIT_MUL
chan_info['ch_name'] = ch_name
chan_info['unit'] = FIFF.FIFF_UNIT_T
chan_info['coord_frame'] = FIFF.FIFFV_COORD_DEVICE
if idx <= self._sqd_params['nmegchan']:
chan_info['coil_type'] = FIFF.FIFFV_COIL_KIT_GRAD
chan_info['kind'] = FIFF.FIFFV_MEG_CH
else:
chan_info['coil_type'] = FIFF.FIFFV_COIL_KIT_REF_MAG
chan_info['kind'] = FIFF.FIFFV_REF_MEG_CH
chan_info['eeg_loc'] = None
# create three orthogonal vector
# ch_angles[0]: theta, ch_angles[1]: phi
ch_angles = np.radians(ch_angles)
x = np.sin(ch_angles[0]) * np.cos(ch_angles[1])
y = np.sin(ch_angles[0]) * np.sin(ch_angles[1])
z = np.cos(ch_angles[0])
vec_z = np.array([x, y, z])
length = linalg.norm(vec_z)
vec_z /= length
vec_x = np.zeros(vec_z.size, dtype=np.float)
if vec_z[1] < vec_z[2]:
if vec_z[0] < vec_z[1]:
vec_x[0] = 1.0
else:
vec_x[1] = 1.0
elif vec_z[0] < vec_z[2]:
vec_x[0] = 1.0
else:
vec_x[2] = 1.0
vec_x -= np.sum(vec_x * vec_z) * vec_z
length = linalg.norm(vec_x)
vec_x /= length
vec_y = np.cross(vec_z, vec_x)
# transform to Neuromag like coordinate space
vecs = np.vstack((vec_x, vec_y, vec_z))
vecs = apply_trans(als_ras_trans, vecs)
chan_info['loc'] = np.vstack((ch_loc, vecs)).ravel()
chan_info['coil_trans'] = _loc_to_trans(chan_info['loc'])
self.info['chs'].append(chan_info)
# label trigger and misc channels
for idy, ch_name in enumerate(ch_names['MISC'] + ch_names['STIM'],
self._sqd_params['n_sens']):
chan_info = {}
chan_info['cal'] = KIT.CALIB_FACTOR
chan_info['logno'] = idy
chan_info['scanno'] = idy
chan_info['range'] = 1.0
chan_info['unit'] = FIFF.FIFF_UNIT_V
chan_info['unit_mul'] = 0 # default is 0 mne_manual p.273
chan_info['ch_name'] = ch_name
chan_info['coil_type'] = FIFF.FIFFV_COIL_NONE
chan_info['loc'] = np.zeros(12)
if ch_name.startswith('STI'):
chan_info['unit'] = FIFF.FIFF_UNIT_NONE
chan_info['kind'] = FIFF.FIFFV_STIM_CH
else:
chan_info['kind'] = FIFF.FIFFV_MISC_CH
self.info['chs'].append(chan_info)
self.info['ch_names'] = (ch_names['MEG'] + ch_names['MISC'] +
ch_names['STIM'])
self._set_stimchannels(stim, slope)
if preload:
self.preload = preload
logger.info('Reading raw data from %s...' % input_fname)
self._data, _ = self._read_segment()
assert len(self._data) == self.info['nchan']
# Create a synthetic channel
stim = self._sqd_params['stim']
trig_chs = self._data[stim, :]
if slope == '+':
trig_chs = trig_chs > stimthresh
elif slope == '-':
trig_chs = trig_chs < stimthresh
else:
raise ValueError("slope needs to be '+' or '-'")
trig_vals = np.array(2 ** np.arange(len(stim)), ndmin=2).T
trig_chs = trig_chs * trig_vals
stim_ch = trig_chs.sum(axis=0)
self._data[-1, :] = stim_ch
# Add time info
self.first_samp, self.last_samp = 0, self._data.shape[1] - 1
self._times = np.arange(self.first_samp, self.last_samp + 1,
dtype=np.float64)
self._times /= self.info['sfreq']
logger.info(' Range : %d ... %d = %9.3f ... %9.3f secs'
% (self.first_samp, self.last_samp,
float(self.first_samp) / self.info['sfreq'],
float(self.last_samp) / self.info['sfreq']))
logger.info('Ready.')
def __repr__(self):
s = ('%r' % os.path.basename(self._sqd_params['fname']),
"n_channels x n_times : %s x %s" % (len(self.info['ch_names']),
self.last_samp -
self.first_samp + 1))
return "<RawKIT | %s>" % ', '.join(s)
def read_stim_ch(self, buffer_size=1e5):
"""Read events from data
Parameter
---------
buffer_size : int
The size of chunk to by which the data are scanned.
Returns
-------
events : array, [samples]
The event vector (1 x samples).
"""
buffer_size = int(buffer_size)
start = int(self.first_samp)
stop = int(self.last_samp + 1)
pick = pick_types(self.info, meg=False, ref_meg=False,
stim=True, exclude=[])
stim_ch = np.empty((1, stop), dtype=np.int)
for b_start in range(start, stop, buffer_size):
b_stop = b_start + buffer_size
x, _ = self._read_segment(start=b_start, stop=b_stop, sel=pick)
stim_ch[:, b_start:b_start + x.shape[1]] = x
return stim_ch
def _read_segment(self, start=0, stop=None, sel=None, verbose=None,
projector=None):
"""Read a chunk of raw data
Parameters
----------
start : int, (optional)
first sample to include (first is 0). If omitted, defaults to the
first sample in data.
stop : int, (optional)
First sample to not include.
If omitted, data is included to the end.
sel : array, optional
Indices of channels to select.
projector : array
SSP operator to apply to the data.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
data : array, [channels x samples]
the data matrix (channels x samples).
times : array, [samples]
returns the time values corresponding to the samples.
"""
if sel is None:
sel = list(range(self.info['nchan']))
elif len(sel) == 1 and sel[0] == 0 and start == 0 and stop == 1:
return (666, 666)
if projector is not None:
raise NotImplementedError('Currently does not handle projections.')
if stop is None:
stop = self.last_samp + 1
elif stop > self.last_samp + 1:
stop = self.last_samp + 1
# Initial checks
start = int(start)
stop = int(stop)
if start >= stop:
raise ValueError('No data in this range')
logger.info('Reading %d ... %d = %9.3f ... %9.3f secs...' %
(start, stop - 1, start / float(self.info['sfreq']),
(stop - 1) / float(self.info['sfreq'])))
with open(self._sqd_params['fname'], 'rb', buffering=0) as fid:
# extract data
fid.seek(KIT.DATA_OFFSET)
# data offset info
data_offset = unpack('i', fid.read(KIT.INT))[0]
nchan = self._sqd_params['nchan']
buffer_size = stop - start
count = buffer_size * nchan
pointer = start * nchan * KIT.SHORT
fid.seek(data_offset + pointer)
data = np.fromfile(fid, dtype='h', count=count)
data = data.reshape((buffer_size, nchan))
# amplifier applies only to the sensor channels
n_sens = self._sqd_params['n_sens']
sensor_gain = np.copy(self._sqd_params['sensor_gain'])
sensor_gain[:n_sens] = (sensor_gain[:n_sens] /
self._sqd_params['amp_gain'])
conv_factor = np.array((KIT.VOLTAGE_RANGE /
self._sqd_params['DYNAMIC_RANGE'])
* sensor_gain, ndmin=2)
data = conv_factor * data
data = data.T
# Create a synthetic channel
trig_chs = data[self._sqd_params['stim'], :]
if self._sqd_params['slope'] == '+':
trig_chs = trig_chs > self._sqd_params['stimthresh']
elif self._sqd_params['slope'] == '-':
trig_chs = trig_chs < self._sqd_params['stimthresh']
else:
raise ValueError("slope needs to be '+' or '-'")
trig_vals = np.array(2 ** np.arange(len(self._sqd_params['stim'])),
ndmin=2).T
trig_chs = trig_chs * trig_vals
stim_ch = np.array(trig_chs.sum(axis=0), ndmin=2)
data = np.vstack((data, stim_ch))
data = data[sel]
logger.info('[done]')
times = np.arange(start, stop) / self.info['sfreq']
return data, times
def _set_dig_kit(self, mrk, elp, hsp, auto_decimate=True):
"""Add landmark points and head shape data to the RawKIT instance
Digitizer data (elp and hsp) are represented in [mm] in the Polhemus
ALS coordinate system.
Parameters
----------
mrk : None | str | array_like, shape = (5, 3)
Marker points representing the location of the marker coils with
respect to the MEG Sensors, or path to a marker file.
elp : None | str | array_like, shape = (8, 3)
Digitizer points representing the location of the fiducials and the
marker coils with respect to the digitized head shape, or path to a
file containing these points.
hsp : None | str | array, shape = (n_points, 3)
Digitizer head shape points, or path to head shape file. If more
than 10`000 points are in the head shape, they are automatically
decimated.
auto_decimate : bool
Decimate hsp points for head shape files with more than 10'000
points.
"""
if isinstance(hsp, string_types):
hsp = read_hsp(hsp)
n_pts = len(hsp)
if n_pts > KIT.DIG_POINTS:
hsp = _decimate_points(hsp, 5)
n_new = len(hsp)
msg = ("The selected head shape contained {n_in} points, which is "
"more than recommended ({n_rec}), and was automatically "
"downsampled to {n_new} points. The preferred way to "
"downsample is using FastScan.")
msg = msg.format(n_in=n_pts, n_rec=KIT.DIG_POINTS, n_new=n_new)
logger.warning(msg)
if isinstance(elp, string_types):
elp_points = read_elp(elp)[:8]
if len(elp) < 8:
err = ("File %r contains fewer than 8 points; got shape "
"%s." % (elp, elp_points.shape))
raise ValueError(err)
elp = elp_points
if isinstance(mrk, string_types):
mrk = read_mrk(mrk)
hsp = apply_trans(als_ras_trans_mm, hsp)
elp = apply_trans(als_ras_trans_mm, elp)
mrk = apply_trans(als_ras_trans, mrk)
nasion, lpa, rpa = elp[:3]
nmtrans = get_ras_to_neuromag_trans(nasion, lpa, rpa)
elp = apply_trans(nmtrans, elp)
hsp = apply_trans(nmtrans, hsp)
# device head transform
trans = fit_matched_points(tgt_pts=elp[3:], src_pts=mrk, out='trans')
self._set_dig_neuromag(elp[:3], elp[3:], hsp, trans)
def _set_dig_neuromag(self, fid, elp, hsp, trans):
"""Fill in the digitizer data using points in neuromag space
Parameters
----------
fid : array, shape = (3, 3)
Digitizer fiducials.
elp : array, shape = (5, 3)
Digitizer ELP points.
hsp : array, shape = (n_points, 3)
Head shape points.
trans : None | array, shape = (4, 4)
Device head transformation.
"""
trans = np.asarray(trans)
if fid.shape != (3, 3):
raise ValueError("fid needs to be a 3 by 3 array")
if elp.shape != (5, 3):
raise ValueError("elp needs to be a 5 by 3 array")
if trans.shape != (4, 4):
raise ValueError("trans needs to be 4 by 4 array")
nasion, lpa, rpa = fid
dig = [{'r': nasion, 'ident': FIFF.FIFFV_POINT_NASION,
'kind': FIFF.FIFFV_POINT_CARDINAL,
'coord_frame': FIFF.FIFFV_COORD_HEAD},
{'r': lpa, 'ident': FIFF.FIFFV_POINT_LPA,
'kind': FIFF.FIFFV_POINT_CARDINAL,
'coord_frame': FIFF.FIFFV_COORD_HEAD},
{'r': rpa, 'ident': FIFF.FIFFV_POINT_RPA,
'kind': FIFF.FIFFV_POINT_CARDINAL,
'coord_frame': FIFF.FIFFV_COORD_HEAD}]
for idx, point in enumerate(elp):
dig.append({'r': point, 'ident': idx, 'kind': FIFF.FIFFV_POINT_HPI,
'coord_frame': FIFF.FIFFV_COORD_HEAD})
for idx, point in enumerate(hsp):
dig.append({'r': point, 'ident': idx,
'kind': FIFF.FIFFV_POINT_EXTRA,
'coord_frame': FIFF.FIFFV_COORD_HEAD})
dev_head_t = {'from': FIFF.FIFFV_COORD_DEVICE,
'to': FIFF.FIFFV_COORD_HEAD, 'trans': trans}
self.info['dig'] = dig
self.info['dev_head_t'] = dev_head_t
def _set_stimchannels(self, stim='<', slope='-'):
"""Specify how the trigger channel is synthesized form analog channels.
Has to be done before loading data. For a RawKIT instance that has been
created with preload=True, this method will raise a
NotImplementedError.
Parameters
----------
stim : list of int | '<' | '>'
Can be submitted as list of trigger channels.
If a list is not specified, the default triggers extracted from
misc channels will be used with specified directionality.
'<' means that largest values assigned to the first channel
in sequence.
'>' means the largest trigger assigned to the last channel
in sequence.
slope : '+' | '-'
'+' means a positive slope (low-to-high) on the event channel(s)
is used to trigger an event.
'-' means a negative slope (high-to-low) on the event channel(s)
is used to trigger an event.
"""
if self.preload:
err = "Can't change stim channel after preloading data"
raise NotImplementedError(err)
self._sqd_params['slope'] = slope
if isinstance(stim, str):
picks = pick_types(self.info, meg=False, ref_meg=False,
misc=True, exclude=[])[:8]
if stim == '<':
stim = picks[::-1]
elif stim == '>':
stim = picks
else:
raise ValueError("stim needs to be list of int, '>' or "
"'<', not %r" % str(stim))
elif np.max(stim) >= self._sqd_params['nchan']:
msg = ("Tried to set stim channel %i, but squid file only has %i"
" channels" % (np.max(stim), self._sqd_params['nchan']))
raise ValueError(msg)
self._sqd_params['stim'] = stim
def get_sqd_params(rawfile):
"""Extracts all the information from the sqd file.
Parameters
----------
rawfile : str
Raw sqd file to be read.
Returns
-------
sqd : dict
A dict containing all the sqd parameter settings.
"""
sqd = dict()
sqd['rawfile'] = rawfile
with open(rawfile, 'rb', buffering=0) as fid: # buffering=0 for np bug
fid.seek(KIT.BASIC_INFO)
basic_offset = unpack('i', fid.read(KIT.INT))[0]
fid.seek(basic_offset)
# skips version, revision, sysid
fid.seek(KIT.INT * 3, SEEK_CUR)
# basic info
sysname = unpack('128s', fid.read(KIT.STRING))
sysname = sysname[0].decode().split('\n')[0]
fid.seek(KIT.STRING, SEEK_CUR) # skips modelname
sqd['nchan'] = unpack('i', fid.read(KIT.INT))[0]
if sysname == 'New York University Abu Dhabi':
KIT_SYS = KIT_AD
elif sysname == 'NYU 160ch System since Jan24 2009':
KIT_SYS = KIT_NY
else:
raise NotImplementedError
# channel locations
fid.seek(KIT_SYS.CHAN_LOC_OFFSET)
chan_offset = unpack('i', fid.read(KIT.INT))[0]
chan_size = unpack('i', fid.read(KIT.INT))[0]
fid.seek(chan_offset)
sensors = []
for i in range(KIT_SYS.N_SENS):
fid.seek(chan_offset + chan_size * i)
sens_type = unpack('i', fid.read(KIT.INT))[0]
if sens_type == 1:
# magnetometer
# x,y,z,theta,phi,coilsize
sensors.append(np.fromfile(fid, dtype='d', count=6))
elif sens_type == 2:
# axialgradiometer
# x,y,z,theta,phi,baseline,coilsize
sensors.append(np.fromfile(fid, dtype='d', count=7))
elif sens_type == 3:
# planargradiometer
# x,y,z,theta,phi,btheta,bphi,baseline,coilsize
sensors.append(np.fromfile(fid, dtype='d', count=9))
elif sens_type == 257:
# reference channels
sensors.append(np.zeros(7))
sqd['i'] = sens_type
sqd['sensor_locs'] = np.array(sensors)
# amplifier gain
fid.seek(KIT_SYS.AMPLIFIER_INFO)
amp_offset = unpack('i', fid.read(KIT_SYS.INT))[0]
fid.seek(amp_offset)
amp_data = unpack('i', fid.read(KIT_SYS.INT))[0]
gain1 = KIT_SYS.GAINS[(KIT_SYS.GAIN1_MASK & amp_data)
>> KIT_SYS.GAIN1_BIT]
gain2 = KIT_SYS.GAINS[(KIT_SYS.GAIN2_MASK & amp_data)
>> KIT_SYS.GAIN2_BIT]
if KIT_SYS.GAIN3_BIT:
gain3 = KIT_SYS.GAINS[(KIT_SYS.GAIN3_MASK & amp_data)
>> KIT_SYS.GAIN3_BIT]
sqd['amp_gain'] = gain1 * gain2 * gain3
else:
sqd['amp_gain'] = gain1 * gain2
# filter settings
sqd['lowpass'] = KIT_SYS.LPFS[(KIT_SYS.LPF_MASK & amp_data)
>> KIT_SYS.LPF_BIT]
sqd['highpass'] = KIT_SYS.HPFS[(KIT_SYS.HPF_MASK & amp_data)
>> KIT_SYS.HPF_BIT]
sqd['notch'] = KIT_SYS.BEFS[(KIT_SYS.BEF_MASK & amp_data)
>> KIT_SYS.BEF_BIT]
# only sensor channels requires gain. the additional misc channels
# (trigger channels, audio and voice channels) are passed
# through unaffected
fid.seek(KIT_SYS.CHAN_SENS)
sens_offset = unpack('i', fid.read(KIT_SYS.INT))[0]
fid.seek(sens_offset)
sens = np.fromfile(fid, dtype='d', count=sqd['nchan'] * 2)
sensitivities = (np.reshape(sens, (sqd['nchan'], 2))
[:KIT_SYS.N_SENS, 1])
sqd['sensor_gain'] = np.ones(KIT_SYS.NCHAN)
sqd['sensor_gain'][:KIT_SYS.N_SENS] = sensitivities
fid.seek(KIT_SYS.SAMPLE_INFO)
acqcond_offset = unpack('i', fid.read(KIT_SYS.INT))[0]
fid.seek(acqcond_offset)
acq_type = unpack('i', fid.read(KIT_SYS.INT))[0]
if acq_type == 1:
sqd['sfreq'] = unpack('d', fid.read(KIT_SYS.DOUBLE))[0]
_ = fid.read(KIT_SYS.INT) # initialized estimate of samples
sqd['nsamples'] = unpack('i', fid.read(KIT_SYS.INT))[0]
else:
err = ("You are probably trying to load a file that is not a "
"continuous recording sqd file.")
raise ValueError(err)
sqd['n_sens'] = KIT_SYS.N_SENS
sqd['nmegchan'] = KIT_SYS.NMEGCHAN
sqd['nmiscchan'] = KIT_SYS.NMISCCHAN
sqd['DYNAMIC_RANGE'] = KIT_SYS.DYNAMIC_RANGE
return sqd
def read_raw_kit(input_fname, mrk=None, elp=None, hsp=None, stim='>',
slope='-', stimthresh=1, preload=False, verbose=None):
"""Reader function for KIT conversion to FIF
Parameters
----------
input_fname : str
Path to the sqd file.
mrk : None | str | array_like, shape = (5, 3) | list of str or array_like
Marker points representing the location of the marker coils with
respect to the MEG Sensors, or path to a marker file.
If list, all of the markers will be averaged together.
elp : None | str | array_like, shape = (8, 3)
Digitizer points representing the location of the fiducials and the
marker coils with respect to the digitized head shape, or path to a
file containing these points.
hsp : None | str | array, shape = (n_points, 3)
Digitizer head shape points, or path to head shape file. If more than
10`000 points are in the head shape, they are automatically decimated.
stim : list of int | '<' | '>'
Channel-value correspondence when converting KIT trigger channels to a
Neuromag-style stim channel. For '<', the largest values are assigned
to the first channel (default). For '>', the largest values are
assigned to the last channel. Can also be specified as a list of
trigger channel indexes.
slope : '+' | '-'
How to interpret values on KIT trigger channels when synthesizing a
Neuromag-style stim channel. With '+', a positive slope (low-to-high)
is interpreted as an event. With '-', a negative slope (high-to-low)
is interpreted as an event.
stimthresh : float
The threshold level for accepting voltage changes in KIT trigger
channels as a trigger event.
preload : bool
If True, all data are loaded at initialization.
If False, data are not read until save.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
"""
return RawKIT(input_fname=input_fname, mrk=mrk, elp=elp, hsp=hsp,
stim=stim, slope=slope, stimthresh=stimthresh,
preload=preload, verbose=verbose)
|
bsd-2-clause
| 406,977,401,301,382,140
| 40.574561
| 79
| 0.540001
| false
| 3.63041
| false
| false
| false
|
JackWalpole/finite_strain
|
ellipsoid_visualisation.py
|
1
|
1324
|
#!/usr/bin/env python
"""Visualise finite strian ellipsoids"""
import numpy as np
from mayavi.api import Engine
from mayavi.sources.api import ParametricSurface
from mayavi.modules.api import Surface
from mayavi import mlab
def gen_ellipsoid(position,shape,orientation):
"""given the existence of a scene generate ellipsoid"""
surface = Surface()
source.add_module(surface)
actor = surface.actor
actor.property.opacity = 0.5
actor.property.color = tuple(np.random.rand(3))
actor.mapper.scalar_visibility = False
actor.property.backface_culling = True
actor.actor.orientation = orientation
actor.actor.origin = np.zeros(3)
actor.actor.position = position
actor.actor.scale = shape
return surface
engine = Engine()
engine.start()
scene = engine.new_scene()
# scene.scene.disable_render = True
source = ParametricSurface()
source.function = 'ellipsoid'
engine.add_source(source)
# start with a sphere
surface = gen_ellipsoid(np.zeros(3),np.ones(3),np.zeros(3))
for ii in range(100):
print ii
surface.actor.actor.set(scale = [1 + ii*.2,1,1])
# surfaces = []
# for ii in range(10):
# surfaces.append(gen_ellipsoid(np.random.rand(3),np.random.rand(3),np.random.rand(3)*360))
# scene.scene.disable_render = False
# mlab.show()
|
mit
| -7,504,901,609,433,819,000
| 23.090909
| 95
| 0.699396
| false
| 3.31
| false
| false
| false
|
fabiocorneti/django-easytree
|
easytree/forms.py
|
1
|
4661
|
from django import forms
from django.utils.translation import ugettext_lazy as _
from easytree import utils
from easytree.exceptions import EasyTreeException
pos_map = {
'first-sibling': _('First sibling'),
'left': _('Previous sibling'),
'right': _('Next sibling'),
'last-sibling': _('Last sibling'),
'sorted-sibling': _('Sorted sibling'),
'first-child': _('First child'),
'last-child': _('Last child'),
'sorted-child': _('Sorted child')
}
class EasyTreeModelChoiceField(forms.ModelChoiceField):
def label_from_instance(self, obj):
return u'%s %s' % (
u'>>>' * ((obj.depth or 1) -1),
super(EasyTreeModelChoiceField, self).label_from_instance(obj)
)
class BaseEasyTreeForm(forms.ModelForm):
toplevel_model_cache = None
def get_toplevel_model(self):
if not self.toplevel_model_cache:
self.toplevel_model_cache = utils.get_toplevel_model(self._meta.model)
return self.toplevel_model_cache
toplevel_model = property(get_toplevel_model)
def __init__(self, *args, **kwargs):
super(BaseEasyTreeForm, self).__init__(*args, **kwargs)
raw_relative_to = getattr(self.instance._easytree_meta, 'raw_relative_to', False)
choice_field_kwargs = {
'queryset': self.toplevel_model.objects.order_by('tree_id', 'lft'),
'required': False,
'label': _("Relative to %(modelname)s") % {'modelname': self.toplevel_model._meta.verbose_name}
}
if raw_relative_to:
choice_field_kwargs['widget'] = forms.TextInput
self.fields['relative_to'] = EasyTreeModelChoiceField(**choice_field_kwargs)
max_depth = getattr(self.instance._easytree_meta, 'max_depth', 0)
if max_depth == 1:
relative_positions_choices = ('left', 'right', 'first-sibling', 'last-sibling')
elif getattr(self.instance, 'node_order_by', None):
relative_positions_choices = ('sorted-sibling', 'sorted-child')
else:
relative_positions_choices = [k for k in pos_map.keys() if k not in ('sorted-sibling', 'sorted-child')]
self.fields['relative_position'] = forms.ChoiceField(
required=False,
choices=[('','-------')] + [(k, v) for k, v in pos_map.items() if k in relative_positions_choices],
label=_("Relative position")
)
def clean(self):
cleaned_data = self.cleaned_data
model = self.toplevel_model
relative_to = cleaned_data.get('relative_to')
relative_position = cleaned_data.get('relative_position')
if not self.instance.pk:
if not relative_to:
try:
model.objects.validate_root(None, relative_to, pos=relative_position, cleaned_data=cleaned_data)
except EasyTreeException, e:
raise forms.ValidationError, e.message
else:
if relative_position in ('last-child', 'first-child', 'sorted-child'):
try:
model.objects.validate_child(None, relative_to, pos=relative_position, cleaned_data=cleaned_data)
except EasyTreeException, e:
raise forms.ValidationError, e.message
else:
try:
model.objects.validate_sibling(None, relative_to, pos=relative_position, cleaned_data=cleaned_data)
except EasyTreeException, e:
raise forms.ValidationError, e.message
else:
if relative_to:
try:
model.objects.validate_move(self.instance, relative_to, pos=relative_position, cleaned_data=cleaned_data)
except EasyTreeException, e:
raise forms.ValidationError, e.message
cleaned_data['relative_to'] = relative_to
return cleaned_data
def save(self, **kwargs):
instance = super(BaseEasyTreeForm, self).save(commit=False)
relative_to = self.cleaned_data.get('relative_to', None)
relative_position = self.cleaned_data.get('relative_position')
if relative_to:
instance.easytree_relative_position = relative_position
instance.easytree_relative_to = relative_to
if kwargs.get('commit', False):
instance.save()
return instance
|
bsd-3-clause
| -2,380,818,350,016,450,000
| 39.181034
| 125
| 0.566831
| false
| 4.380639
| false
| false
| false
|
meithan/NFL_website
|
cgi-bin/QueryPage.py
|
1
|
4651
|
#!/usr/bin/env python
# Convenience script to issue MySQL queries to the DB
import cgi, cgitb
import sys
cgitb.enable()
from common import *
# =====================================
def showBadLogin():
print '<br><strong>You must have the proper credentials to access this page!</strong>'
print '</body></html>'
# =====================================
def showGoodPage():
# Get query string, if present
formdata = cgi.FieldStorage()
if formdata.has_key("Query"):
queryStr = formdata["Query"].value
else:
queryStr = ""
responseStr = ""
# =======================================
# Send query to DB, obtain response
if queryStr != "":
# Import MySQLdb module
library_loaded = False
responseStr += "Loading MySQLdb ..."
try:
sys.path.append('/home/meithanx/mysql')
import MySQLdb
library_loaded = True
except:
responseStr += "\nCouldn't load MySQLdb!\n"
if library_loaded:
responseStr += " Loaded.\n"
# Connect to DB
connected = False
responseStr += "Establishing connection to DB %s ..." % (DB_NAME)
try:
db = MySQLdb.connect("localhost","meithanx_sql","foobar",DB_NAME)
dbcursor = db.cursor()
connected = True
except:
responseStr += "Couldn't connect to DB %s!!\n" % (DB_NAME)
if connected:
responseStr += " Connected.\n"
responseStr += "===============================\n\n"
responseStr += "> %s\n\n" % (queryStr)
query = queryStr.strip()
dbcursor.execute(query)
db.commit()
rows_affected = dbcursor.rowcount
rowset = dbcursor.fetchall()
if len(rowset)==0:
responseStr += repr(rowset) + "\n"
for row in rowset:
responseStr += repr(row) + "\n"
responseStr += "%i rows processed.\n" % (rows_affected)
# =======================================
print '<form method="GET">'
print '<div style="width: 800px; margin:0 auto;">'
print '<br>'
print 'Query:<br>'
print '<textarea id="QueryField" name="Query" cols="40" rows="5" style="width: 800px;">%s</textarea>' % (queryStr)
print '<br>'
print '<input type="submit" value="Submit"> '
print '<input type="button" onClick="clearQueryField()" value="Clear">'
print ' Queries: <input type="button" onClick="enterSelect()" value="SELECT">'
print ' <input type="button" onClick="enterUpdate()" value="UPDATE">'
print ' <input type="button" onClick="enterInsert()" value="INSERT">'
print ' <input type="button" onClick="enterDelete()" value="DELETE">'
print ' <input type="button" onClick="enterDescribe()" value="DESCRIBE">'
print '<br>'
print '<hr>'
print '</form>'
print 'Response:<br>'
print '<textarea readonly id="Response" cols="40" rows="40" style="width: 800px;">%s</textarea>' % (responseStr)
print '<div>'
print '</body></html>'
# =====================================
print "Content-type:text/html"
print # THIS BLANK LIKE IS MANDATORY
print '<!DOCTYPE html>'
print '<html lang="en">'
print '<head>'
print '<script language="JavaScript">'
print 'function clearQueryField() {'
print ' document.getElementById("QueryField").value="";'
print '}'
print 'function enterSelect() {'
print ' document.getElementById("QueryField").value="SELECT * FROM table_name WHERE condition;";'
print '}'
print 'function enterUpdate() {'
print ' document.getElementById("QueryField").value="UPDATE table_name SET field=value WHERE condition;";'
print '}'
print 'function enterInsert() {'
print ' document.getElementById("QueryField").value="INSERT INTO table_name VALUES (value1,value2);";'
print '}'
print 'function enterDelete() {'
print ' document.getElementById("QueryField").value="DELETE FROM table_name WHERE condition;";'
print '}'
print 'function enterDescribe() {'
print ' document.getElementById("QueryField").value="DESCRIBE table_name;";'
print '}'
print '</script></head><body>'
# Determine logged user from cookie, if any
logged_user = authenticateUser()
### HACK!! USER AUTHENTICATION BYPASSED
print "<h3>WARNING: user authentication temporarily overriden! Don\'t forget to re-protect this page!</h3>"
showGoodPage()
#if logged_user != None and logged_user.Username == "Meithan":
# showGoodPage()
#else:
# showBadLogin()
|
gpl-3.0
| -4,550,998,764,767,151,000
| 31.760563
| 118
| 0.57622
| false
| 4.044348
| false
| false
| false
|
traxex33/Twitter-Analysis
|
junk/mineTweets.py
|
1
|
2336
|
import tweepy
import json
from tweepy import OAuthHandler
from tweepy import Stream
from liveListener import Listener
class TweetMiner:
def __init__(self, config_fname='config'):
self._readdetails(config_fname)
self._authenticate()
def mine(self):
self.state = None
while self.state != '1' or self.state != '2':
print ("Press 1 to calculate popularity of a phrase. Press 2 to analyze a user profile.")
self.state = str(raw_input())
if self.state == '1' or self.state == '2':
break
print ("Enter a valid choice")
# Call functions
if self.state == '1':
return self.state, self.trackLiveTweets()
elif self.state == '2':
return self.state, self.getUserTweets()
# Tracking live tweets for popularity calculation
def trackLiveTweets(self):
print ("Enter a key word to track for 5 minutes. Be as specific as possible")
self.file = 'tweets.json'
self.trackWord = str(raw_input())
self.twitter_stream = Stream(self.auth, Listener(self.file))
self.twitter_stream.filter(track=[self.trackWord])
return self.file
# Getting tweets from user profile for analysis
def getUserTweets(self):
print ("Enter the user <screen_name> to track. For example, '@user' without the quotes.")
self.screenName = str(raw_input())
self.file = self.screenName + "_tweets.json"
open(self.file, 'w').close()
for status in tweepy.Cursor(self.api.user_timeline, screen_name=self.screenName).items(200):
with open(self.file, "a") as f:
json.dump(dict(status._json), f)
f.write('\n')
return self.file
def _readdetails(self, config_fname):
with open(config_fname, 'r') as f:
self.consumer_key = f.readline().replace('\n', '')
self.consumer_secret = f.readline().replace('\n', '')
self.access_token = f.readline().replace('\n', '')
self.access_secret = f.readline().replace('\n', '')
def _authenticate(self):
self.auth = OAuthHandler(self.consumer_key, self.consumer_secret)
self.auth.set_access_token(self.access_token, self.access_secret)
self.api = tweepy.API(self.auth)
|
mit
| -6,225,401,496,223,295,000
| 39.275862
| 101
| 0.606592
| false
| 3.80456
| false
| false
| false
|
henriquegemignani/randovania
|
randovania/games/prime/patcher_file_lib/hint_formatters.py
|
1
|
3971
|
import typing
from randovania.game_description import node_search
from randovania.game_description.area import Area
from randovania.game_description.game_patches import GamePatches
from randovania.game_description.hint import Hint, HintLocationPrecision, RelativeDataArea, HintRelativeAreaName
from randovania.game_description.node import PickupNode
from randovania.game_description.resources.pickup_index import PickupIndex
from randovania.game_description.world_list import WorldList
from randovania.games.prime.patcher_file_lib import hint_lib
class LocationFormatter:
def format(self, determiner: hint_lib.Determiner, pickup_name: str, hint: Hint) -> str:
raise NotImplementedError()
class GuardianFormatter(LocationFormatter):
_GUARDIAN_NAMES = {
PickupIndex(43): "Amorbis",
PickupIndex(79): "Chykka",
PickupIndex(115): "Quadraxis",
}
def format(self, determiner: hint_lib.Determiner, pickup: str, hint: Hint) -> str:
guardian = hint_lib.color_text(hint_lib.TextColor.GUARDIAN, self._GUARDIAN_NAMES[hint.target])
return f"{guardian} is guarding {determiner}{pickup}."
class TemplatedFormatter(LocationFormatter):
def __init__(self, template: str, area_namer: hint_lib.AreaNamer):
self.template = template
self.hint_name_creator = area_namer
def format(self, determiner: hint_lib.Determiner, pickup: str, hint: Hint) -> str:
node_name = self.hint_name_creator.location_name(
hint.target,
hint.precision.location == HintLocationPrecision.WORLD_ONLY
)
return self.template.format(determiner=determiner,
pickup=pickup,
node=node_name)
class RelativeFormatter(LocationFormatter):
def __init__(self, world_list: WorldList, patches: GamePatches):
self.world_list = world_list
self.patches = patches
self._index_to_node = {
node.pickup_index: node
for node in world_list.all_nodes
if isinstance(node, PickupNode)
}
def _calculate_distance(self, source_location: PickupIndex, target: Area) -> int:
source = self._index_to_node[source_location]
return node_search.distances_to_node(self.world_list, source,
patches=self.patches, ignore_elevators=False)[target]
def relative_format(self, determiner: hint_lib.Determiner, pickup: str, hint: Hint, other_area: Area, other_name: str,
) -> str:
distance = self._calculate_distance(hint.target, other_area) + (hint.precision.relative.distance_offset or 0)
if distance == 1:
distance_msg = "one room"
else:
precise_msg = "exactly " if hint.precision.relative.distance_offset is None else "up to "
distance_msg = f"{precise_msg}{distance} rooms"
return (f"{determiner.title}{pickup} can be found "
f"{hint_lib.color_text(hint_lib.TextColor.LOCATION, distance_msg)} away from {other_name}.")
def format(self, determiner: hint_lib.Determiner, pickup_name: str, hint: Hint) -> str:
raise NotImplementedError()
class RelativeAreaFormatter(RelativeFormatter):
def format(self, determiner: hint_lib.Determiner, pickup: str, hint: Hint) -> str:
relative = typing.cast(RelativeDataArea, hint.precision.relative)
other_area = self.world_list.area_by_area_location(relative.area_location)
if relative.precision == HintRelativeAreaName.NAME:
other_name = self.world_list.area_name(other_area)
elif relative.precision == HintRelativeAreaName.FEATURE:
raise NotImplementedError("HintRelativeAreaName.FEATURE not implemented")
else:
raise ValueError(f"Unknown precision: {relative.precision}")
return self.relative_format(determiner, pickup, hint, other_area, other_name)
|
gpl-3.0
| -1,242,443,017,889,526,800
| 44.125
| 122
| 0.674893
| false
| 3.774715
| false
| false
| false
|
vlimant/IntelROCCS
|
Monitor/plotFromPickle.py
|
1
|
21846
|
#!/usr/bin/python
'''==============================================================================
This script reads information from the pickle caches and directly
makes plots
=============================================================================='''
import os, sys
import re, glob, subprocess, time
from findDatasetHistoryAll import *
import findDatasetProperties as fDP
import cPickle as pickle
import ROOT
from array import array
from Dataset import *
from operator import itemgetter
genesis=1378008000
nowish = time.time()
loadedStyle=False
rc=None
try:
monitorDB = os.environ['MONITOR_DB']
except KeyError:
sys.stderr.write('\n ERROR - a environment variable is not defined\n\n')
sys.exit(2)
'''==============================================================================
H E L P E R S
=============================================================================='''
def addData(nAllAccessed,nAccessed,debug=0):
# adding a hash array (nAccessed) to the mother of all hash arrays (nAllAccessed)
# loop through the hash array
for key in nAccessed:
# add the entries to our all access hash array
if key in nAllAccessed:
nAllAccessed[key] += nAccessed[key]
else:
nAllAccessed[key] = nAccessed[key]
# return the updated all hash array
return nAllAccessed
def addSites(nSites,nAccessed,debug=0):
# adding up the number of sites for each dataset
# loop through the hash array
for key in nAccessed:
# add the entries to our all access hash array
if key in nSites:
nSites[key] += 1
else:
nSites[key] = 1
# return the updated all hash array
return nSites
def convertSizeToGb(sizeTxt):
# first make sure string has proper basic format
if len(sizeTxt) < 3:
print ' ERROR - string for sample size (%s) not compliant. EXIT.'%(sizeTxt)
sys.exit(1)
# this is the text including the size units, that need to be converted
sizeGb = float(sizeTxt[0:-2])
units = sizeTxt[-2:]
# decide what to do for the given unit
if units == 'MB':
sizeGb = sizeGb/1000.
elif units == 'GB':
pass
elif units == 'TB':
sizeGb = sizeGb*1000.
else:
print ' ERROR - Could not identify size. EXIT!'
sys.exit(1)
# return the size in GB as a float
return sizeGb
def calculateAverageNumberOfSites(sitePattern,datasetSet,fullStart,end,datasetPattern):
# calculate the average number of replicas (sites) for a dataset in a given time interval
# print ' Relevant time interval: %s %s --> %d'%(time.strftime("%Y-%m-%d",time.gmtime(fullStart))\
# ,time.strftime("%Y-%m-%d",time.gmtime(end)),end-fullStart)
print ' Relevant time interval: %s %s --> %d'%(fullStart,end,end-fullStart)
# convert it into floats and take care of possible rounding issues
fullInterval = end - fullStart
predictedDatasetsOnSites={}
for datasetName in datasetSet:
predictedDatasetsOnSites[datasetName]=set([])
nSites = {}
timeOnSites = {} #timeOnSites[dataset][site] = timef
# match the intervals from the phedex history to the requested time interval
#===========================================================================
for datasetName,datasetObject in datasetSet.iteritems():
verb = (datasetName=='/GluGluZH_HToWW_M120_13TeV_powheg_pythia8/RunIIFall15MiniAODv1-PU25nsData2015v1_76X_mcRun2_asymptotic_v12-v1/MINIAODSIM')
if not re.match(datasetPattern,datasetName):
continue
# don't penalize a dataset for not existing
cTime = datasetObject.cTime
#start = max(fullStart,cTime)
start = fullStart
interval = end - start
if verb:
print fullStart,end,start,cTime,interval
if not datasetName in nSites:
nSites[datasetName] = 0
timeOnSites[datasetName] = {}
for siteName,movement in datasetObject.movement.iteritems():
if not re.match(sitePattern,siteName): # only requested sites
continue
if not siteName in timeOnSites[datasetName]:
timeOnSites[datasetName][siteName] = 0
xfers = movement[0]
dels = movement[1]
if verb:
print siteName
print '\t',xfers
print '\t',dels
lenXfer = len(xfers)
lenDel = len(dels)
if lenDel == lenXfer - 1:
dels.append(nowish)
# find this site's fraction for nSites
if not datasetName in nSites:
nSites[datasetName] = 0
siteSum = 0
i = 0
while i < lenXfer:
try:
tXfer = xfers[i]
tDel = dels[i]
except IndexError:
break
i = i + 1 # iterate before all continue statements
# four ways to be in interval
# (though this prevents you from having the same
# start and end date)
if tXfer <= start <= end <= tDel:
siteSum += 1 # should happen at most once (?)
elif tXfer <= start < tDel < end:
siteSum += float(tDel - start)/float(interval)
elif start < tXfer < end <= tDel:
siteSum += float(end - tXfer)/float(interval)
elif start < tXfer < tDel <= end:
siteSum += float(tDel - tXfer)/float(interval)
else: # have ensured tXfer > tDel
continue
if verb:
print '\t',siteSum
if siteSum>0:
timeOnSites[datasetName][siteName] += siteSum
nSites[datasetName] += siteSum
n = 0
nSkip = 0
Sum = float(0)
for datasetName in nSites:
if nSites[datasetName] == 0: # dataset not on sites in interval
nSkip += 1
continue
Sum += nSites[datasetName]
n += 1
return nSites,timeOnSites
def makeActualPlots(sitePattern,start,end,jarFile,crbLabel='',rootFile='',makeSummaryPlots=False):
if crbLabel!='' and rootFile=='':
sys.stderr.write('ERROR [plotFromPickle.makeActualPlots]: If crbLabel is defined, rootFile must be defined')
return
groupPattern = os.environ['MONITOR_GROUP']
datasetPattern = os.environ['MONITOR_PATTERN']
datasetPattern = datasetPattern.replace("_",".*")
groupPattern = groupPattern.replace("_",".*")
interval = float(end - start)/float(86400*30) # time interval in months, used for plotting
sitePattern=re.sub("\*",".*",sitePattern) # to deal with stuff like T2* --> T2.*
print "\n = = = = S T A R T A N A L Y S I S = = = =\n"
print " Dataset pattern: %s"%(datasetPattern)
print " Group pattern: %s"%(groupPattern)
print " Site pattern: %s"%(sitePattern)
pickleJar = None
if type(jarFile)==type(''):
pickleJar = open(jarFile,"rb")
pickleDict = pickle.load(pickleJar)
else:
pickleDict = jarFile
datasetSet = pickleDict["datasetSet"]
nSiteAccess = pickleDict["nSiteAccess"]
# last step: produce plots!
global loadedStyle,rc
if not loadedStyle:
stylePath = os.environ.get("MIT_ROOT_STYLE")
print stylePath
rc = ROOT.gROOT.LoadMacro(stylePath) # figure out using so later
if rc:
print "Warning, MitRootStyle could not be loaded from %s"%(stylePath)
else:
ROOT.MitRootStyle.Init()
loadedStyle=True
print 'MAKING SUMMARY:',makeSummaryPlots
if makeSummaryPlots:
c11 = ROOT.TCanvas("c11","c11",800,800)
nTiers=7
hVolumeFrac = ROOT.TH1F("hVolumeFrac","hVolumeFrac",nTiers,-0.5,nTiers-.5)
hUsageFrac = ROOT.TH1F("hUsageFrac","hUsageFrac",nTiers,-0.5,nTiers-.5)
tiers = {'AODSIM':0, 'AOD':1, 'MINIAODSIM':2,'MINIAOD':3,'GEN-SIM-RAW':4,'GEN-SIM-RECO':5,'Other':6}
for hist in [hUsageFrac,hVolumeFrac]:
xaxis = hist.GetXaxis()
for tier,nBin in tiers.iteritems():
xaxis.SetBinLabel(nBin+1,tier)
totalVolume=0
totalUsage=0
siteAccessDict = {}
miniaodSizeNoRepl=0
miniaodSizeRepl=0
for datasetName,datasetObject in datasetSet.iteritems():
tier = datasetName.split('/')[-1]
datasetVolume = max(0,len(datasetObject.currentSites)*datasetObject.sizeGB)
if tier.find('MINIAOD')>=0 and len(datasetObject.currentSites)>0:
# print datasetName,datasetObject.sizeGB
miniaodSizeNoRepl += datasetObject.sizeGB
miniaodSizeRepl += datasetVolume
datasetUsage = 0
for s,a in datasetObject.nAccesses.iteritems():
if not re.match(sitePattern,s):
continue
if s not in siteAccessDict:
siteAccessDict[s] = [0,0]
for t,n in a.iteritems():
if (nowish-t)<(86400*30):
datasetUsage+=n
totalVolume += datasetVolume
totalUsage += datasetUsage
if tier not in tiers:
tier = 'Other'
if tier in tiers:
val = tiers[tier]
hVolumeFrac.Fill(val,datasetVolume)
hUsageFrac.Fill(val,datasetUsage)
hVolumeFrac.Scale(1./totalVolume)
hUsageFrac.Scale(1./totalUsage)
for hist in [hUsageFrac,hVolumeFrac]:
ROOT.MitRootStyle.InitHist(hist,"","",1)
hVolumeFrac.GetYaxis().SetTitle('current volume fraction')
hUsageFrac.GetYaxis().SetTitle('usage fraction (30 days)')
# hUsageFrac.SetNormFactor()
for hist in [hUsageFrac,hVolumeFrac]:
hist.SetFillColor(8)
hist.SetLineColor(8)
hist.SetFillStyle(1001)
hist.SetMinimum(0.)
hist.SetTitle('')
c11.Clear()
c11.cd()
c11.SetBottomMargin(.2)
c11.SetRightMargin(.2)
hist.Draw("hist")
if hist==hVolumeFrac:
c11.SaveAs(monitorDB+'/FractionVolume_%s.png'%(groupPattern))
else:
c11.SaveAs(monitorDB+'/FractionUsage_%s.png'%(groupPattern))
print "no replication ",miniaodSizeNoRepl
print "with replication",miniaodSizeRepl
c21 = ROOT.TCanvas("c21","c21",1000,600)
for h in [hVolumeFrac,hUsageFrac]:
h.Delete()
return
print "Computing average number of sites"
nAverageSites,timeOnSites = calculateAverageNumberOfSites(sitePattern,datasetSet,start,end,datasetPattern)
'''==============================================================================
our usage plots
=============================================================================='''
cUsage = ROOT.TCanvas("c1","c1",800,800)
maxBin = 8
nBins = 60.
l = []
low = 0
i = 0
while low < maxBin:
l.append(low)
low += (maxBin/nBins) * (1.1)**(i)
i += 1
l.append(maxBin)
hUsage = ROOT.TH1F("dataUsage","Data Usage",len(l)-1,array('f',l))
# delta = float(maxBin)/(2*(nBins-1)) # so that bins are centered
# hUsage = ROOT.TH1F("dataUsage","Data Usage",nBins,-delta,maxBin+delta)
kBlack = 1
if not rc:
ROOT.MitRootStyle.InitHist(hUsage,"","",kBlack)
titles = "; Accesses/month; Fraction of total data volume"
hUsage.SetTitle(titles)
meanVal = 0.
sumWeight = 0.
nEntries = 0
totalSize = 0
for datasetName,datasetObject in datasetSet.iteritems():
if not re.match(datasetPattern,datasetName):
# print "did not match pattern"
continue
if datasetObject.nFiles==0:
# what
continue
nSitesAv = nAverageSites[datasetName]
if nSitesAv == 0:
# it was nowhere
continue
nAccess = 0
for siteName in datasetObject.nAccesses:
if not re.match(sitePattern,siteName):
# maybe we should get rid of this functionality to speed things up
continue
for utime,n in datasetObject.nAccesses[siteName].iteritems():
if utime >= start and utime <= end:
nAccess += n
value = float(nAccess)/float(datasetObject.nFiles*nSitesAv*interval)
weight = float(nSitesAv) * float(datasetObject.sizeGB)/1000.
totalSize += weight
meanVal += value
sumWeight += weight
nEntries += 1
hUsage.Fill(min(maxBin,value),weight)
#totalSize = hUsage.Integral()
print "Found %i datasets, corresponding to an average volume of %3f PB"%(nEntries,float(totalSize)/1000.)
if (sumWeight==0):
sumWeight=1;
totalSize=1;
meanVal = meanVal/sumWeight
hUsage.Scale(1./float(totalSize))
maxy = hUsage.GetMaximum()
hUsage.SetMaximum(maxy*10.)
ROOT.gPad.SetLogy(1) # big zero bins
cUsage.cd()
try:
histColor = os.environ['MONITOR_COLOR']
hUsage.SetLineColor(histColor)
except KeyError:
pass
hUsage.Draw("hist")
ROOT.MitRootStyle.OverlayFrame()
ROOT.MitRootStyle.AddText("Overflow added to last bin.")
if groupPattern == ".*":
groupPattern = "All"
integralTexts = []
integralTexts.append( "Group: %s"%(groupPattern) )
integralTexts.append( "Period: [%s, %s]\n"%( strftime("%Y-%m-%d",gmtime(start)) , strftime("%Y-%m-%d",gmtime(end)) ) )
integralTexts.append( "Average data managed: %.3f PB\n"%(totalSize/1000.) )
# integralTexts.append( "Mean: %.3f accesses/month\n"%( meanVal ) )
integralTexts.append( "Mean: %.3f accesses/month\n"%(hUsage.GetMean()) )
positions = [0.85,0.8, 0.75, 0.7]
plotTText = [None,None,None,None]
for i in range(4):
plotTText[i] = ROOT.TText(.3,positions[i],integralTexts[i])
plotTText[i].SetTextSize(0.04)
plotTText[i].SetTextColor(2)
plotTText[i].Draw()
try:
cUsage.SaveAs(monitorDB+"/Usage_%s_%s.png"%(groupPattern,os.environ['MONITOR_PLOTTEXT']))
except KeyError:
cUsage.SaveAs(monitorDB+"/Usage_%s_%i_%i.png"%(groupPattern,start,end))
# houtFile = ROOT.TFile(monitorDB+"/outfile.root","UPDATE")
# houtFile.cd()
# hUsage.Write("%s_%s"%(groupPattern,os.environ['MONITOR_PLOTTEXT']))
# houtFile.Close()
'''==============================================================================
CRB usage plots
=============================================================================='''
ROOT.gPad.SetLogy(0)
hCRB = ROOT.TH1F("CRBUsage","Data Usage",17,-1.5,15.5)
hZeroOne = ROOT.TH1F("CRBZeroOne","Zero and One Bin",100,0,1.);
hTime = ROOT.TH1F("time","time",100,-0.1,1.1);
if not rc:
ROOT.MitRootStyle.InitHist(hCRB,"","",kBlack)
ROOT.MitRootStyle.InitHist(hZeroOne,"","",kBlack)
ROOT.MitRootStyle.InitHist(hTime,"","",kBlack)
titles = "; <n_{accesses}>; Prorated data volume [TB]"
hCRB.SetTitle(titles)
hZeroOne.SetTitle(titles)
titles = "; Prorated Time Fraction; Data volume [TB]"
hTime.SetTitle(titles)
cCRB = ROOT.TCanvas("c2","c2",800,800)
cZeroOne = ROOT.TCanvas("c3","c3",800,800)
cTime = ROOT.TCanvas("c4","c4",800,800)
for datasetName,datasetObject in datasetSet.iteritems():
if datasetObject.cTime>end:
continue
if not re.match(datasetPattern,datasetName):
continue
if datasetObject.nFiles==0:
# what
continue
sizeGB = datasetObject.sizeGB
for siteName in datasetObject.movement:
if not re.match(sitePattern,siteName):
continue
timeOnSite = timeOnSites[datasetName][siteName]
# print timeOnSite
value = 0
if siteName in datasetObject.nAccesses:
for utime,n in datasetObject.nAccesses[siteName].iteritems():
if utime <= end and utime >= start:
value += float(n)/datasetObject.nFiles
fillValue = min(max(1,value), 14.5)
# if value < 1:
# print value,fillValue
if value == 0:
if datasetObject.cTime > start:
fillValue = 0
else:
fillValue = -1
weight = float(sizeGB * timeOnSite)/1000.
# print datasetObject
# print fillValue,weight
# sys.exit(-1)
hCRB.Fill(fillValue,weight)
if (fillValue == 0) or (fillValue == 1):
hZeroOne.Fill(value,weight)
hTime.Fill(timeOnSite,sizeGB/1000.)
try:
histColor = os.environ['MONITOR_COLOR']
hCRB.SetLineColor(histColor)
hTime.SetLineColor(histColor)
hZeroOne.SetLineColor(histColor)
except KeyError:
pass
if crbLabel!='':
print 'Updating',rootFile
fSave = ROOT.TFile(rootFile,'UPDATE')
histName = 'h_'+os.environ['MONITOR_PLOTTEXT']
fSave.WriteTObject(hCRB,histName,"Overwrite")
# fSave.WriteTObject(hTime,histName+'_time','Overwrite')
fSave.Close()
xaxis = hCRB.GetXaxis()
xaxis.SetBinLabel(1,"0 old")
xaxis.SetBinLabel(2,"0 new")
xaxis.SetBinLabel(3,"< 1")
xaxis.SetBinLabel(4,"2")
xaxis.SetBinLabel(5,"3")
xaxis.SetBinLabel(6,"4")
xaxis.SetBinLabel(7,"5")
xaxis.SetBinLabel(8,"6")
xaxis.SetBinLabel(9,"7")
xaxis.SetBinLabel(10,"8")
xaxis.SetBinLabel(11,"9")
xaxis.SetBinLabel(12,"10")
xaxis.SetBinLabel(13,"11")
xaxis.SetBinLabel(14,"12")
xaxis.SetBinLabel(15,"13")
xaxis.SetBinLabel(16,"14")
xaxis.SetBinLabel(17,">14")
cCRB.cd()
hCRB.Draw("hist")
ROOT.MitRootStyle.OverlayFrame()
ROOT.MitRootStyle.AddText("Overflow in last bin.")
totalSize = hCRB.Integral()
integralTexts = ["Period: [%s, %s]\n"%( strftime("%Y-%m-%d",gmtime(start)) , strftime("%Y-%m-%d",gmtime(end)) )]
integralTexts.append( "Average data on disk: %.3f PB\n"%(totalSize/1000.) )
positions = [0.8,0.75]
plotTText = [None,None]
for i in range(2):
plotTText[i] = ROOT.TText(.3,positions[i],integralTexts[i])
plotTText[i].SetTextSize(0.04)
plotTText[i].SetTextColor(2)
plotTText[i].Draw()
if groupPattern == ".*":
groupPattern = "All"
try:
hCRB.SaveAs(monitorDB+"/CRBUsage_%s_%s.C"%(groupPattern,os.environ['MONITOR_PLOTTEXT']))
cCRB.SaveAs(monitorDB+"/CRBUsage_%s_%s.png"%(groupPattern,os.environ['MONITOR_PLOTTEXT']))
except KeyError:
cCRB.SaveAs(monitorDB+"/CRBUsage_%s_%i_%i.png"%(groupPattern,start,end))
cZeroOne.cd()
hZeroOne.Draw("hist")
ROOT.MitRootStyle.OverlayFrame()
plotTText = [None,None]
for i in range(1):
plotTText[i] = ROOT.TText(.3,positions[i],integralTexts[i])
plotTText[i].SetTextSize(0.04)
plotTText[i].SetTextColor(2)
plotTText[i].Draw()
try:
cZeroOne.SaveAs(monitorDB+"/CRB01_%s_%s.png"%(groupPattern,os.environ['MONITOR_PLOTTEXT']))
except KeyError:
cZeroOne.SaveAs(monitorDB+"/CRB01_%s_%i_%i.png"%(groupPattern,start,end))
cTime.cd()
hTime.Draw("hist")
ROOT.MitRootStyle.OverlayFrame()
plotTText = [None,None]
for i in range(1):
plotTText[i] = ROOT.TText(.3,positions[i],integralTexts[i])
plotTText[i].SetTextSize(0.04)
plotTText[i].SetTextColor(2)
plotTText[i].Draw()
try:
cTime.SaveAs(monitorDB+"/CRBTime_%s_%s.png"%(groupPattern,os.environ['MONITOR_PLOTTEXT']))
except KeyError:
cTime.SaveAs(monitorDB+"/CRBTime_%s_%i_%i.png"%(groupPattern,start,end))
if pickleJar:
pickleJar.close()
for h in [hUsage,hCRB,hZeroOne,hTime]:
h.Delete()
if __name__ == '__main__':
'''==============================================================================
M A I N
=============================================================================='''
debug = 0
sizeAnalysis = True
addNotAccessedDatasets = True
usage = "\n"
usage += " plottingWrapper.py <sitePattern> <startDate> <endDate> <pickleJar>\n"
usage += "\n"
usage += " sitePattern - pattern to select particular sites (ex. T2* or T2_U[SK]* etc.)\n"
usage += " startDate - epoch time of starting date\n"
usage += " endDate - epoch time of ending date\n"
usage += " pickleJar - *.pkl file containing the relevant aggregated data\n"
# decode command line parameters
crbLabel = ''
rootFile = ''
if len(sys.argv)>=5:
sitePattern = str(sys.argv[1])
start = max(genesis,int(sys.argv[2]))
end = min(nowish,int(sys.argv[3]))
jarFileName = str(sys.argv[4])
if len(sys.argv)>=6:
crbLabel = str(sys.argv[5])
rootFile = str(sys.argv[6])
makeSummaryPlots = False
elif len(sys.argv)==3:
sitePattern = sys.argv[1]
start = genesis
end = nowish
jarFileName = sys.argv[2]
makeSummaryPlots = True
else:
sys.stderr.write(' ERROR - wrong number of arguments\n')
sys.stderr.write(usage)
sys.exit(2)
makeActualPlots(sitePattern,start,end,jarFileName,crbLabel,rootFile,makeSummaryPlots)
|
mit
| 1,943,402,565,564,367,600
| 36.927083
| 151
| 0.564039
| false
| 3.627698
| false
| false
| false
|
relekang/accio
|
accio/webhooks/tests.py
|
1
|
1727
|
import pytest
@pytest.fixture
def push_based_project(accio_project):
accio_project.deploy_on = 'push'
accio_project.save()
return accio_project
@pytest.fixture
def status_based_project(accio_project):
accio_project.deploy_on = 'status'
accio_project.save()
return accio_project
@pytest.mark.django_db
def test_github_push_should_deploy(mock_runners, push_based_project, github_webhooks):
response = github_webhooks(name='push', event='push')
assert response.content.decode() == 'Deploy queued'
assert response.status_code == 200
@pytest.mark.django_db
def test_github_push_should_not_deploy_other_branch(push_based_project, github_webhooks):
response = github_webhooks(name='push_other_branch', event='push')
assert response.content.decode() == 'Not on master branch'
assert response.status_code == 400
@pytest.mark.django_db
def test_github_status_success_should_deploy(mock_runners, status_based_project, github_webhooks):
response = github_webhooks(name='status_success', event='status')
assert response.content.decode() == 'Deploy queued'
assert response.status_code == 200
@pytest.mark.django_db
def test_github_status_failure_should_not_deploy(status_based_project, github_webhooks):
response = github_webhooks(name='status_failure', event='status')
assert response.content.decode() == 'Status is not success'
assert response.status_code == 400
@pytest.mark.django_db
def test_github_status_not_master_should_not_deploy(status_based_project, github_webhooks):
response = github_webhooks(name='status_not_master', event='status')
assert response.content.decode() == 'Not on master branch'
assert response.status_code == 400
|
mit
| -6,829,933,572,956,118,000
| 33.54
| 98
| 0.7348
| false
| 3.495951
| true
| false
| false
|
opensource-expert/customers-formula
|
customers/customers_passwords.py
|
1
|
4681
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# vim: set ft=python:
#
# python password generator for customers
#
# Depend: pwqgen
#
# Usage:
# ./customers_passwords.py customers_top pillarpath/to/customers.sls pillarpath/user_passwords.yaml
#
# Output: Int, the number of created passwords in pillarpath/user_passwords.sls
#
# fileformat:
# /!\ input files are PURE yaml file format, no jinja
#
# customers.sls:
# customers_top:
# customers:
# client1: <-- key used for password match as username
# […]
# client2:
# client3:
#
# user_passwords.yaml: will be overwritten if any, don't put anything non-yaml
# client1:
# mysql: bla
# shell: piou
# websmtp: somepass_for_controling_email_from_the_web
# hash: $1$17391272$rgWtYpRIDVUrT202c89Fp1
# client2:
# mysql: somepassword
# shell: shelllpassword
# websmtp: my_web_pass_for_mail
# hash: $1$17391272$rgWtYpRIDVUrT202c89Fp1
# # one entry per customers name…
# unittest: See ../tests/test_customers_passwords.py
from __future__ import absolute_import
import subprocess
import sys
import yaml
import random
from collections import OrderedDict
def random_pass():
res = subprocess.check_output(["pwqgen"]).rstrip()
return res
def unix_pass(password):
saltpw = str(random.randint(2**10, 2**32))
args = ['openssl', 'passwd', '-1', '-salt', saltpw, password]
res = subprocess.check_output(args).rstrip()
return res
def read_yaml(filename):
f = open(filename)
data = yaml.safe_load(f)
f.close()
return data
def create_all_pass():
"""
retrun an OrderedDict of all password
new_pass['mysql'] = random_pass()
new_pass['shell'] = shell_pass
new_pass['hash'] = unix_pass(shell_pass)
"""
shell_pass = random_pass()
new_pass = OrderedDict()
new_pass['mysql'] = random_pass()
new_pass['shell'] = shell_pass
new_pass['websmtp'] = random_pass()
new_pass['hash'] = unix_pass(shell_pass)
return new_pass
def write_password_db_yaml(fname, passDB):
"""
write ordered password db, in an yaml compatible way.
"""
f = open(fname, 'w')
for u, passwd in passDB.items():
f.write("%s:\n" % u)
for k in passwd.keys():
f.write(" %s: %s\n" % (k, passwd[k]))
# this outputer as some difficulties with OrderedDict
# f.write(yaml.dump(passDB, default_flow_style=False))
f.close()
def update_missing_fields(passDB, force_hash=False):
"""
check for missing fields, if new fields have been added
loop over all fields, and complete if any.
if force_hash is True, recompute hashes
return number of updated records
"""
# fetch fields generated pass are ignored
fields = create_all_pass().keys()
n = 0
for u, passwd in passDB.items():
# check for new added possibly missing fields
for p in fields:
# reads this passsword
myp = passwd.get(p)
if (myp == None or myp == '') or (force_hash and p == 'hash'):
if p == 'hash':
hashed = unix_pass(passDB[u]['shell'])
passDB[u]['hash'] = hashed
elif p == 'shell':
# reset hash, will be computed in next loop
passDB[u]['hash'] = None
passDB[u][p] = random_pass()
else:
passDB[u][p] = random_pass()
# we have modified some entries
n += 1
return n
def main(customers_top, user_db, password_db):
userDB = read_yaml(user_db)
# we can handle non existant password file
try:
passDB = read_yaml(password_db)
except IOError as e:
passDB = {}
# hardcoded path to access data for customers
mysql_users = userDB[customers_top]['customers'].keys()
# keys names matching username are top level
if passDB:
user_with_pass = passDB.keys()
else:
# empty
user_with_pass = []
passDB = {}
missing_password = set(mysql_users) - set(user_with_pass)
n = 0
# add missing passwords
for u in missing_password:
passDB[u] = create_all_pass()
n += 1
# update is some new fields has been added
n += update_missing_fields(passDB)
# write back modified yaml
if n > 0:
write_password_db_yaml(password_db, passDB)
# return number of new created password entries
return n
if __name__ == '__main__':
customers_top = sys.argv[1]
user_db = sys.argv[2]
password_db = sys.argv[3]
print(main(customers_top, user_db, password_db))
|
gpl-3.0
| -1,138,239,243,298,611,100
| 26.511765
| 100
| 0.598888
| false
| 3.386676
| false
| false
| false
|
rickypc/dotfiles
|
.rflint.d/table.py
|
1
|
3998
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Robot Lint Rules - Lint rules for Robot Framework data files.
# Copyright (c) 2014, 2015, 2016 Richard Huang <rickypc@users.noreply.github.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Robot Lint Rules - Lint rules for Robot Framework data files.
"""
from rflint.common import GeneralRule, WARNING
from rflint.parser import KeywordTable, Row, TestcaseTable
def _get_count(rows, has_steps=False):
"""Returns total breaklines."""
count = 0
rows = list(rows)
rows.reverse()
for row in rows:
if has_steps:
count += _get_count(row.steps)
if count > 0:
break
else:
line = row.cells if isinstance(row, Row) else row
if _is_breakline(line):
count += 1
else:
break
return count
def _get_rows(table):
"""Returns rows and step indicator."""
response = {
'has_steps': False,
'rows': [],
}
if isinstance(table, KeywordTable):
response['has_steps'] = True
response['rows'] = table.keywords
elif isinstance(table, TestcaseTable):
response['has_steps'] = True
response['rows'] = table.testcases
else:
response['rows'] = table.rows
return response
def _get_total(rows, has_steps=False):
"""Returns total rows and steps if applicable."""
total = len(rows)
if has_steps:
total += sum([len(row.statements) for row in rows])
return total
def _is_breakline(statement):
"""Returns True if statement is a breakline, False otherwise."""
return len(statement) == 1 and statement[0].strip() == ''
class TooFewTableBlankLines(GeneralRule):
"""Warn about tables without blank lines between each other.
"""
max_allowed = 1
message = 'Too few trailing blank lines in "%s" table.'
severity = WARNING
def apply(self, robot_file):
"""Apply the rule to given robot file."""
for table in robot_file.tables[:-1]:
response = _get_rows(table)
count = _get_count(**response)
total = _get_total(**response)
if count < self.max_allowed:
linenumber = table.linenumber + total
self.report(robot_file, self.message % table.name,
linenumber + self.max_allowed, 0)
def configure(self, max_allowed):
"""Configures the rule."""
self.max_allowed = int(max_allowed)
class TooManyTableBlankLines(GeneralRule):
"""Warn about tables with extra blank lines between each other.
"""
max_allowed = 1
message = 'Too many trailing blank lines in "%s" table.'
severity = WARNING
def apply(self, robot_file):
"""Apply the rule to given robot file."""
for table in robot_file.tables[:-1]:
response = _get_rows(table)
count = _get_count(**response)
total = _get_total(**response)
if count > self.max_allowed:
linenumber = (table.linenumber + total) - count
self.report(robot_file, self.message % table.name,
linenumber + self.max_allowed, 0)
def configure(self, max_allowed):
"""Configures the rule."""
self.max_allowed = int(max_allowed)
|
mit
| -4,098,794,181,597,314,600
| 32.316667
| 84
| 0.612806
| false
| 4.034309
| false
| false
| false
|
IntelLabs/hpat
|
docs/source/buildscripts/user_guide_gen.py
|
1
|
12520
|
# -*- coding: utf-8 -*-
# *****************************************************************************
# Copyright (c) 2020, Intel Corporation All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# *****************************************************************************
from module_info import get_function, get_method_attr, get_function_doc, get_function_short_description
from module_info import create_header_str
from pandas_info import get_pandas_modules, init_pandas_logging
from sdc_info import get_sdc_modules, init_sdc_logging
from texttable import Texttable
import os
PANDAS_API_STR = 'Pandas API: ' # This substring prepends Pandas API name in the documentation
APIREF_RELPATH = r'./_api_ref/' # Relative path to API Reference folder
RST_MODULES = {
'api_reference.rst': ['pandas'],
'io.rst': ['pandas.io.api', 'pandas.io.clipboards', 'pandas.io.common', 'pandas.io.excel',
'pandas.io.feather_format', 'pandas.io.formats.console', 'pandas.io.formats.format',
'pandas.io.formats.printing', 'pandas.io.gbq', 'pandas.io.html', 'pandas.io.json',
'pandas.io.msgpack', 'pandas.io.msgpack.exceptions', 'pandas.io.packers', 'pandas.io.parquet',
'pandas.io.parsers', 'pandas.io.pickle', 'pandas.io.pytables', 'pandas.io.sas',
'pandas.io.sas.sasreader', 'pandas.io.spss', 'pandas.io.sql', 'pandas.io.stata'],
'series.rst': ['pandas.Series'],
'dataframe.rst': ['pandas.DataFrame'],
''
'general_functions.rst': [],
}
pandas_modules = [] # List of Pandas submodules along with its functions and classes
sdc_modules = [] # List of Intel SDC submodules along with its functions and classes
def generate_module_doc(the_module):
module_doc = None
module_name = the_module['module_name']
# First, look up if there is RST file documenting particular module
for rst in RST_MODULES:
for mod in RST_MODULES[rst]:
if mod == module_name:
return module_doc # If there is a documentation for a given module then just return
# If there is no RST file then we create the documentation based on module's docstring
module_obj = the_module['module_object']
module_description = get_function_short_description(module_obj).strip()
if module_description is None:
module_description = ''
module_doc = module_description + '\n\nFor details please refer to Pandas API Reference for :py:mod:`' + \
module_name + '`\n\n'
return module_doc
def generate_api_index_for_module(the_module):
module_description = generate_module_doc(the_module)
if module_description is None:
module_description = ''
module_doc = ''
module_header_flag = False
# Document functions first, if any
tab = Texttable()
for func in the_module['functions']: # Iterate through the module functions
name = func['function_name']
obj = getattr(the_module['module_object'], name) # Retrieve the function object
description = get_function_short_description(obj).strip()
tab.add_rows([[name, description]], header=False)
module_name = ''
func_doc = tab.draw()
if func_doc and func_doc != '': # If the function list is not empty then add module name to the document
module_name = the_module['module_name']
module_doc += create_header_str(module_name, '~') + '\n\n' + module_description + '\n\n' + \
create_header_str('Functions:', '-') + \
'\n\n' + func_doc + '\n\n'
module_header_flag = True
# Document classes
classes_header_flag = False
for the_class in the_module['classes']: # Iterate through the module classes
tab.reset()
class_name = the_class['class_name']
class_obj = the_class['class_object']
class_description = class_obj.__doc__
if not class_description:
class_description = ''
class_doc = ''
class_header_flag = False
# Document class attributes first, if any
for attr in the_class['class_attributes']: # Iterate through the class attributes
name = attr
obj = getattr(the_class['class_object'], name) # Retrieve the attribute object
description = get_function_short_description(obj).strip()
tab.add_rows([[name, description]], header=False)
attr_doc = tab.draw()
if attr_doc and attr_doc != '': # If the attribute list is not empty then add class name to the document
class_header_flag = True
class_doc += create_header_str(class_name, '^') + '\n\n' + class_description + '\n\n' + \
create_header_str('Attributes:', '+') + \
'\n\n' + attr_doc + '\n\n'
# Document class methods, if any
for method in the_class['class_methods']: # Iterate through the class methods
name = method
obj = getattr(the_class['class_object'], name) # Retrieve the method object
description = get_function_short_description(obj).strip()
tab.add_rows([[name, description]], header=False)
method_doc = tab.draw()
if method_doc and method_doc != '': # If the method list is not empty then add class name to the document
if not class_header_flag:
class_doc += create_header_str(class_name, '^') + '\n\n' + class_description + '\n\n' + \
create_header_str('Methods:', '+') + \
'\n\n' + method_doc + '\n\n'
class_header_flag = True
else:
class_doc += create_header_str('Methods:', '+') + \
'\n\n' + method_doc + '\n\n'
if not module_header_flag: # There is no module header yet
if class_header_flag: # There were methods/attributes for the class
module_doc += create_header_str(module_name, '~') + '\n\n' + module_description + '\n\n' + \
create_header_str('Classes:', '-') + \
'\n\n' + class_doc + '\n\n'
module_header_flag = True
classes_header_flag = True
else: # The module header has been added
if class_header_flag: # There are new methods/attributes for the class
if not classes_header_flag: # First class of the module description
module_doc += create_header_str('Classes:', '-') + '\n\n'
module_doc += '\n\n' + class_doc + '\n\n'
return module_doc
def get_module_rst_fname(the_module):
file_name = the_module['module_name']
file_name = file_name.replace('.', '/')
file_name = APIREF_RELPATH + file_name + '.rst'
return file_name
def generate_api_index():
doc = '.. _apireference::\n\nAPI Reference\n*************\n\n' \
'.. toctree::\n :maxdepth: 1\n\n'
for the_module in pandas_modules: # Iterate through pandas_modules
module_doc = generate_api_index_for_module(the_module)
if len(module_doc) > 0:
file_name = get_module_rst_fname(the_module)
write_rst(file_name, module_doc)
doc += ' ' + file_name + '\n'
return doc
def generate_sdc_object_doc(sdc_func):
sdc_titled_sections = get_function_doc(sdc_func, True)
sdc_see_also_text = next((sec['text'] for sec in sdc_titled_sections
if sec['title'].lower().strip() == 'see also'), '')
sdc_limitations_text = next((sec['text'] for sec in sdc_titled_sections
if sec['title'].lower().strip() == 'limitations'), '')
sdc_examples_text = next((sec['text'] for sec in sdc_titled_sections
if sec['title'].lower().strip() == 'examples'), '')
# Get respective Pandas API name
pandas_name = sdc_titled_sections[0]['text'].strip()
pandas_name = pandas_name.replace(PANDAS_API_STR, '')
pandas_name = pandas_name.replace('\n', '')
# Find respective Pandas API
doc_object = get_method_attr(pandas_name, pandas_modules)
if not doc_object:
doc_object = get_function(pandas_name, pandas_modules)
if not doc_object:
raise NameError('Pandas API:' + pandas_name + 'does not exist')
# Extract Pandas API docstring as the list of sections
pandas_titled_sections = []
if doc_object:
pandas_titled_sections = get_function_doc(doc_object, False)
# Form final docstring which is a combination of Pandas docstring for the description, Parameters section,
# Raises section, Returns section. See Also, Limitations and Examples sections (if any) are taken from SDC docstring
short_description_section = pandas_titled_sections[0]['text'] + '\n\n'
pandas_titled_sections.pop(0)
long_description_section = ''
while pandas_titled_sections[0]['title'] == '':
long_description_section += pandas_titled_sections[0]['text'] + '\n\n'
pandas_titled_sections.pop(0)
raises_section = parameters_section = returns_section = see_also_section = \
limitations_section = examples_section = ''
for section in pandas_titled_sections:
title = section['title'].lower().strip()
if title == 'raises':
raises_section = 'Raises\n------\n\n' + section['text'] + '\n\n'
elif title == 'parameters':
parameters_section = 'Parameters\n----------\n\n' + section['text'] + '\n\n'
elif title == 'return' or title == 'returns':
returns_section = 'Returns\n-------\n\n' + section['text'] + '\n\n'
if sdc_see_also_text:
see_also_section = '\n.. seealso::\n\n' + sdc_see_also_text + '\n\n'
if sdc_limitations_text:
limitations_section = 'Limitations\n-----------\n\n' + sdc_limitations_text + '\n\n'
if sdc_examples_text:
examples_section = 'Examples\n-----------\n\n' + sdc_examples_text + '\n\n'
rst_label = pandas_name.replace('.', '_')
n = len(pandas_name)
docstring = \
'.. _' + rst_label + ':\n\n' + \
pandas_name + '\n' + '*'*n + '\n' + \
short_description_section + \
long_description_section + \
parameters_section + \
returns_section + \
raises_section + \
limitations_section + \
examples_section + \
see_also_section
file_name = rst_label + '.rst'
return file_name, docstring
def write_rst(file_name, docstring):
directory = os.path.dirname(file_name)
if len(directory) > 0 and not os.path.exists(directory):
os.makedirs(directory)
file = open(file_name, 'w')
file.write(docstring)
file.close()
if __name__ == "__main__":
init_pandas_logging()
pandas_modules = get_pandas_modules()
init_sdc_logging()
sdc_modules = get_sdc_modules()
for the_module in sdc_modules:
if the_module['module_name'] == 'sdc.datatypes.hpat_pandas_series_functions':
for func in the_module['functions']:
file_name, doc = generate_sdc_object_doc(func['function_object'])
write_rst(APIREF_RELPATH + file_name, doc)
doc = generate_api_index()
write_rst('apireference.rst', doc)
|
bsd-2-clause
| 2,918,795,782,654,178,000
| 43.714286
| 120
| 0.610304
| false
| 3.822901
| false
| false
| false
|
jpruf/building-permits-geo
|
pipeline/tasks/convert_to_dict.py
|
1
|
1327
|
import os
import csv
import json
TSV_FOLDER = "../data/tsv/"
FIELDNAMES = ("tract", "apn", "issue_date", "final_date", "lot", "permit_number", "owner",
"contractor", "applicant", "location", "approval_status", "sub_code",
"sub_code_description", "work_code", "work_code_description", "census_code",
"permit_valuation", "reroof_valuation", "square_feet", "units", "rsn", "pool",
"sewer", "enterprise", "permit_flag")
def clean_and_annotate(row, label):
title = label.split('_')
row["year"] = title[1]
row["type"] = title[2]
return row
def convert_to_dicts(label):
with open(TSV_FOLDER + label + '.txt', 'rU') as tsv_input:
tsv_reader = csv.DictReader(tsv_input, fieldnames=FIELDNAMES, delimiter='\t')
# Skip the first line of the CSV file, which contains the headers
next(tsv_reader)
return [clean_and_annotate(row, label) for row in tsv_reader]
def run():
permits = {}
# Go through all of the files, and convert them into arrays of dicts
for file_name in os.listdir(TSV_FOLDER):
if file_name.endswith(".txt"):
label = file_name.strip(".txt")
permits_for_file = convert_to_dicts(label)
permits[label] = permits_for_file
return permits
|
mit
| 3,859,655,520,245,001,700
| 39.242424
| 92
| 0.600603
| false
| 3.437824
| false
| false
| false
|
hughsons/saltwaterfish
|
admin/views.py
|
1
|
28274
|
from django.http import *
from forms import UploadForm
from django import template
from django.template.loader import get_template
from django.template import Context, RequestContext
from django.utils.decorators import method_decorator
from django.shortcuts import render_to_response
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.decorators import login_required
from django.views.generic.base import TemplateView, View
from django.views.decorators.csrf import csrf_exempt
from django.contrib.sessions.models import Session
from django.contrib.auth.models import User, Group, Permission
from models import *
from django.db import models
from django.db.models import Count, Min, Sum, Max, Avg
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.utils import unittest
from django.db import connection, transaction
import logging
import hashlib
from google.appengine.api import files
try:
files.gs
except AttributeError:
import gs
files.gs = gs
PERPAGE=50
def checkadminlogin_dispatch(f):
def wrap(request, *args, **kwargs):
if 'IsLogin' in request.session and request.session['IsLogin'] and 'Staff' in request.session and request.session['Staff'].username !="":
staff_list = Admins.objects.filter(username = request.session['Staff_username'], pass_field = hashlib.md5(request.session['Staff_password']).hexdigest())
if staff_list:
request.session['IsLogin'] = True
request.session['Staff'] = staff_list[0]
success = True
else:
return HttpResponseRedirect('/logout')
logging.info('Fetch Started:: %s', staff_list[0])
else:
return HttpResponseRedirect('/logout')
return f(request, *args, **kwargs)
return wrap
class CsrfExemptMixin(object):
@method_decorator(csrf_exempt)
def dispatch(self, request, *args, **kwargs):
return super(CsrfExemptMixin, self).dispatch(request, *args, **kwargs)
class LoginRequiredMixin(object):
@method_decorator(checkadminlogin_dispatch)
def dispatch(self,request, *args, **kwargs):
return super(LoginRequiredMixin, self).dispatch(request, *args, **kwargs)
@csrf_exempt
def render_template(request, template, data=None):
errs =""
if request.method == 'GET' and 'err' in request.GET:
data.update({'errs':request.GET['err']})
response = render_to_response(template, data,
context_instance=RequestContext(request))
return response
class CMSClass(LoginRequiredMixin, TemplateView):
def get(self, request, *args, **kwargs):
count = Extrapages.objects.count()
if request.GET['page'] == "":
page_num = 1
else:
page_num = request.GET['page']
page_num = int(page_num)
offset = page_num * 100
allpages = Extrapages.objects.all()[offset-100:offset]
content = {'page_title': "Summary",
'allpages':allpages,
'count':count,
'page_num':page_num,
}
return render_template(request, "cms_pages.htm", content)
class CMSEditClass(LoginRequiredMixin, TemplateView):
def get(self, request, *args, **kwargs):
pageid = request.GET['pageid']
allpages = Extrapages.objects.get(id=pageid)
content = {'page_title': "Summary",
'allpages':allpages,
}
return render_template(request, "cms_pages_edit.htm", content)
class EmailViewClass(LoginRequiredMixin, TemplateView):
def get(self, request, *args, **kwargs):
count = Emails.objects.count()
if request.GET['page'] == "":
page_num = 1
else:
page_num = request.GET['page']
page_num = int(page_num)
offset = page_num * 100
allpages = Emails.objects.all()[offset-100:offset]
content = {'page_title': "Admin :: Email List",
'allpages':allpages,
'count':count,
'page_num':page_num,
}
return render_template(request, "email_pages.htm", content)
class EmailEditClass(LoginRequiredMixin, TemplateView):
def get(self, request, *args, **kwargs):
pageid = request.GET['id']
allpages = Emails.objects.get(id=pageid)
content = {'page_title': "Admin::Email Edit",
'allpages':allpages,
}
return render_template(request, "email_pages_edit.htm", content)
class CMSAddFormClass(LoginRequiredMixin, TemplateView):
def get(self, request, *args, **kwargs):
content = {'page_title': "Summary",}
return render_template(request, "cms_pages_add.htm", content)
class TitlesContentClass(LoginRequiredMixin, TemplateView):
def get(self, request, *args, **kwargs):
count = Html.objects.count()
if request.GET['page'] == "":
page_num = 1
else:
page_num = request.GET['page']
page_num = int(page_num)
offset = page_num * 100
allpages = Html.objects.all()[offset-100:offset]
content = {'page_title': "Summary",
'allpages':allpages,
'count':count,
'page_num':page_num,
}
return render_template(request, "titles_content.htm", content)
class ProductWishListClass(LoginRequiredMixin, TemplateView):
def get(self, request, *args, **kwargs):
if request.GET['page'] == "":
page_num = 1
else:
#pages = count/100
page_num = request.GET['page']
page_num = int(page_num)
offset = page_num * 100
#allitems = ProductWaitinglist.objects.annotate(dcount=Count('catalogid')).values('catalogid',
# 'current_stock',
# 'products__catalogid').all()[offset-100:offset]
allitems = ProductWaitinglist.objects.raw('select count(*) as dcount,product_waitinglist.catalogid,products.id,name,current_stock from product_waitinglist,products where product_waitinglist.catalogid=products.catalogid group by catalogid')[offset-100:offset]
count = ProductWaitinglist.objects.values('catalogid').annotate(dcount=Count('catalogid')).count()
#return HttpResponse(allitems)
content = {'page_title': "Summary",
'allitems':allitems,
'count':count,
'page_num':page_num,
}
return render_template(request, "products_wish_list.htm", content)
class ProductWishViewClass(LoginRequiredMixin, TemplateView):
def get(self, request, *args, **kwargs):
if request.GET['page'] == "":
page_num = 1
else:
page_num = request.GET['page']
page_num = int(page_num)
offset = page_num * 100
itemid = request.GET['itemid']
allitems = ProductWaitinglist.objects.filter(catalogid=itemid).all()[offset-100:offset]
count = ProductWaitinglist.objects.filter(catalogid=itemid).all().count()
#return HttpResponse(allitems)
content = {'page_title': "Summary",
'allitems':allitems,
'count':count,
'page_num':page_num,
'itemid':itemid,
}
return render_template(request, "products_wish_list_view_list.htm", content)
class ReviewAllClass(LoginRequiredMixin, TemplateView):
def get(self, request, *args, **kwargs):
if request.GET['page'] == "":
page_num = 1
else:
#pages = count/100
page_num = request.GET['page']
page_num = int(page_num)
offset = page_num * 100
allitems = ProductReview.objects.raw('select count(*) as dcount,product_review.catalogid,products.id,name,thumbnail from product_review, products where product_review.catalogid=products.catalogid group by catalogid')[offset-100:offset]
count = ProductReview.objects.values('catalogid').annotate(dcount=Count('catalogid')).count()
#return HttpResponse(allitems)
content = {'page_title': "Summary",
'allitems':allitems,
'count':count,
'page_num':page_num,
}
return render_template(request, "products_7_reviews.htm", content)
class ProductsReviewsViewClass(LoginRequiredMixin, TemplateView):
def get(self, request, *args, **kwargs):
itemid = request.GET['itemid']
allitems = ProductReview.objects.filter(catalogid=itemid).all()
count = ProductReview.objects.filter(catalogid=itemid).all().count()
#return HttpResponse(allitems)
content = {'page_title': "Summary",
'allitems':allitems,
'count':count,
'itemid':itemid,
}
return render_template(request, "products_review_view_list.htm", content)
class ProductsReviewEditFormClass(LoginRequiredMixin,TemplateView):
def get(self, request, *args, **kwargs):
itemid = request.GET['itemid']
allitems = ProductReview.objects.get(id=itemid)
content = {'page_title': "Summary",
'allitems':allitems,
#'count':count,
#'page_num':page_num,
'itemid':itemid,
}
return render_template(request, "products_7_reviews_edit_2_edit.htm", content)
class ApanelViewClass(LoginRequiredMixin, TemplateView):
def get(self, request, *args, **kwargs):
content = {'page_title': "Profile",}
return render_template(request, "home-page-admin.htm", content)
class CustomersViewClass(LoginRequiredMixin,TemplateView):
def get(self, request, *args, **kwargs):
count = customers.objects.count()
if request.GET['page'] == "":
page_num = 1
else:
page_num = request.GET['page']
page_num = int(page_num)
offset = page_num * 100
content = {'page_title': "Profile",
'customers':customers.objects.all()[offset-100:offset],
'count':count,
'page_num':page_num,
}
return render_template(request, "customers.htm", content)
class CRMViewClass(LoginRequiredMixin,TemplateView):
def get(self, request, *args, **kwargs):
if request.GET['page'] == "":
page_num = 1
else:
page_num = request.GET['page']
if 'status' in request.GET and request.GET['status'] != "":
status = request.GET['status']
else:
status = 1
count = Crm.objects.filter(status=status).count()
page_num = int(page_num)
offset = page_num * 100
content = {'page_title': "Profile",
'allitems':Crm.objects.all().filter(status=status)[offset-100:offset],
'count':count,
'page_num':page_num,
}
return render_template(request, "crm.htm", content)
class CRMEditViewClass(LoginRequiredMixin,TemplateView):
def get(self, request, *args, **kwargs):
crmid = request.GET['id']
allitems = Crm.objects.get(id=crmid)
categories = ProductCategory.objects.all()
content = {'page_title': "Profile",
'allitems':allitems,
'manufacturers':Manufacturer.objects.all(),
'categories': categories,}
return render_template(request, "crm_edit.htm", content)
class StaffViewClass(LoginRequiredMixin,TemplateView):
def get(self, request, *args, **kwargs):
content = {'page_title': "Site Staff",
'customers':Admins.objects.all()[:100],
'count':Admins.objects.count(),}
return render_template(request, "admins.htm", content)
class CategoryViewClass(LoginRequiredMixin,TemplateView):
def get(self, request, *args, **kwargs):
count = Category.objects.count()
if request.GET['page'] == "":
page_num = 1
else:
#pages = count/100
page_num = request.GET['page']
page_num = int(page_num)
offset = page_num * 100
content = {'page_title': "Profile",
'customers':Category.objects.all()[offset-100:offset],
'count':count,
'page_num':page_num,}
return render_template(request, "categories.htm", content)
class CustomerAddFormClass(LoginRequiredMixin, TemplateView):
def get(self, request, *args, **kwargs):
content = {'title': "Add Customer",}
return render_template(request, "customer_add.htm", content)
class CustomerInfoClass(LoginRequiredMixin,TemplateView):
#summary = Customers.objects.all()
def get(self, request, *args, **kwargs):
cid = request.GET['id']
customer = customers.objects.get(contactid=cid)
customeremail= customer.email
customerrewards = CustomerRewards.objects.filter(contactid=cid).all()
totalrewards = CustomerRewards.objects.filter(contactid=cid).aggregate(Sum('points'))
#customers_promocode = SwfCustomerCreditsLog.objects.values_list('customers_promocode', flat=True)
#customers_promocode = customers_promocode['customers_promocode']
#storerewards = SwfCustomerCreditsLog.objects.filter(customers_email_address=customeremail)
storerewards = SwfCustomerCreditsLog.objects.raw('select *,swf_customer_credits_log.id as sid from swf_customer_credits_log , promotions where customers_promocode = coupon AND customers_email_address="'+customeremail+'" AND customers_promocode != ""')
fulldata = list(storerewards)
try:
wish_id = WshWishlist.objects.get(customerid=cid)
wishitems = WsiWishlistitems.objects.filter(wsh_id=wish_id.wsh_id)
except Exception as e:
wishitems = ""
content = {'page_title': "Customers Info",
'customer': customer,
'customerorders':Orders.objects.filter(ocustomerid=cid).all(),
'wishlists':wishitems,
'customerrewards':customerrewards,
'totalrewards':totalrewards,
'storerewards':fulldata,
}
#'count':Admins.objects.count(),}
return render_template(request, "customers_info.htm", content)
class ProductsViewClass(LoginRequiredMixin,TemplateView):
def get(self, request, *args, **kwargs):
count = Products.objects.count()
if request.GET['page'] == "":
page_num = 1
else:
#pages = count/100
page_num = request.GET['page']
page_num = int(page_num)
offset = page_num * 100
content = {'page_title': "Profile",
'allitems':Products.objects.all()[offset-100:offset],
'count':count,
'page_num':page_num,}
return render_template(request, "products.htm", content)
class ProductViewClass(LoginRequiredMixin,TemplateView):
def get(self, request, *args, **kwargs):
pid = request.GET['pid']
allitems = Products.objects.get(catalogid=pid)
categories = ProductCategory.objects.all().filter(catalogid=pid)
content = {'page_title': "Profile",
'allitems':allitems,
'manufacturers':Manufacturer.objects.all(),
'categories': categories,}
return render_template(request, "productedit.htm", content)
class ProductRelatedClass(LoginRequiredMixin,TemplateView):
def get(self, request, *args, **kwargs):
pid = request.GET['pid']
allitems = Products.objects.get(catalogid=pid)
categories = ProductCategory.objects.all().filter(catalogid=pid)
content = {'page_title': "Profile",
'allitems':allitems,
'manufacturers':Manufacturer.objects.all(),
'categories': categories,}
return render_template(request, "productrelated.htm", content)
class ProductsImagesViewClass(LoginRequiredMixin,TemplateView):
def get(self, request, *args, **kwargs):
pid = request.GET['pid']
allitems = Products.objects.get(catalogid=pid)
categories = ProductCategory.objects.all().filter(catalogid=pid)
content = {'page_title': "Profile",
'allitems':allitems,
'manufacturers':Manufacturer.objects.all(),
'categories': categories,}
return render_template(request, "images_products.htm", content)
class ApanelViewOrdersClass(LoginRequiredMixin,TemplateView):
def get(self, request, *args, **kwargs):
order_status = request.GET['order_status']
if order_status < 1:
order_status = 1
else:
order_status = order_status
count = Orders.objects.filter(order_status=order_status).count()
if request.GET['page'] == "":
page_num = 1
else:
#pages = count/100
page_num = request.GET['page']
page_num = int(page_num)
offset = page_num * 100
allitems = Orders.objects.all().filter(order_status=order_status)[offset-100:offset]
order_status_links = OrderStatus.objects.all().filter(visible='1')
#crm_messages=CrmMessages.objects.select_related(crmid__orderid='8623')
#return HttpResponse(crm_messages)
content = {'page_title': "Orders",
'allitems':allitems,
'count':count,
'page_num':page_num,
'order_status':order_status,
'order_links':order_status_links,}
return render_template(request, "vieworders.htm", content)
class ApanelViewOrdersStatusClass(LoginRequiredMixin,TemplateView):
def get(self, request, *args, **kwargs):
allitems = OrderStatus.objects.all()
content = {'page_title': "Orders Status",
'allitems':allitems,
'order_links':OrderStatus.objects.all().filter(visible='1'),}
return render_template(request, "orders_status.htm", content)
class OrderPageClass(LoginRequiredMixin,TemplateView):
def get(self, request, *args, **kwargs):
oid = request.GET['oid']
order_status_links = OrderStatus.objects.all().filter(visible='1')
allitems = Orders.objects.get(orderid=oid)
try:
transactions = Transactions.objects.get(orderid=oid)
amount = transactions.amount
totalamt = Oitems.objects.filter(orderid=oid).aggregate(Sum('unitprice'))
totalamt = totalamt['unitprice__sum']
except Exception as e:
transactions = ""
totalamt = 0
amount = 0
alloiitems = Oitems.objects.all().filter(orderid=oid)
finaltotal = (totalamt + int(allitems.oshipcost)) - allitems.coupondiscount
balance = finaltotal - amount
content = {'page_title': "Orders Status",
'allitems':allitems,
'alloiitems':alloiitems,
'order_links':order_status_links,
'totalamt':totalamt,
'finaltotal':finaltotal,
'paidamt':finaltotal,
'transactions':transactions,
'balance':balance,
}
return render_template(request, "orderpage.htm", content)
class AddAdminsFormClass(LoginRequiredMixin,TemplateView):
def get(self, request, *args, **kwargs):
allitems = Admins.objects.all()
if "mode" in request.GET:
mode = request.GET['mode']
else:
mode = ""
allitems = ""
if "id" in request.GET:
allitems = Admins.objects.get(id=request.GET['id'])
else:
allitems = ""
content = {'page_title': "Add User",
'allitems':allitems,
'mode':mode,}
return render_template(request, "admins_add.htm", content)
class RmaPagesClass(LoginRequiredMixin,TemplateView):
def get(self, request, *args, **kwargs):
count = Rma.objects.count()
if request.GET['page'] == "":
page_num = 1
else:
#pages = count/100
page_num = request.GET['page']
page_num = int(page_num)
offset = page_num * 100
allitems = Rma.objects.all()[offset-100:offset]
content = {'page_title': "Orders Status",
'allitems':allitems,
'count':count,}
return render_template(request, "rma_pages.htm", content)
class RmaViewClass(LoginRequiredMixin,TemplateView):
def get(self, request, *args, **kwargs):
rmaid=request.GET['rmaid']
allitems = Rma.objects.get(idrma=rmaid)
content = {'page_title': "View RMA",
'allitems':allitems,}
return render_template(request, "rmaview.htm", content)
class ShippingManagerViewClass(LoginRequiredMixin,TemplateView):
def get(self, request, *args, **kwargs):
if "mode" in request.GET:
mode = request.GET['mode']
else:
mode = ""
allitems = ShippingCategory.objects.all()
content = {'page_title': "Admin: Shipping Manager View",
'allitems':allitems,
'mode':mode,}
return render_template(request, "adminshippingmanager.htm", content)
class TaxManagerViewClass(LoginRequiredMixin,TemplateView):
def get(self, request, *args, **kwargs):
if "mode" in request.GET:
mode = request.GET['mode']
else:
mode = ""
allitems = Tax.objects.all()
content = {'page_title': "Admin: Tax Manager View",
'allitems':allitems,
'mode':mode,}
return render_template(request, "taxmanager.htm", content)
class GiftCertificatesViewClass(LoginRequiredMixin,TemplateView):
def get(self, request, *args, **kwargs):
count = GiftCertificates.objects.all().count()
if request.GET['page'] == "":
page_num = 1
else:
#pages = count/100
page_num = request.GET['page']
page_num = int(page_num)
offset = page_num * 100
allitems = GiftCertificates.objects.all()[offset-100:offset]
content = {'page_title': "Admin: Gift Certificate View",
'allitems':allitems,
'page_num':page_num,
'count':count,
'order_links':OrderStatus.objects.all().filter(visible='1'),}
return render_template(request, "giftcertificate_pages.htm", content)
class EditGiftCertificateClass(LoginRequiredMixin,TemplateView):
def get(self, request, *args, **kwargs):
giftid=request.GET['id']
allitems = GiftCertificates.objects.get(id=giftid)
total = allitems.certificate_amount + allitems.certificate_expenses
content = {'page_title': "Admin :: Edit Gift Certificate",
'allitems':allitems,
'order_links':OrderStatus.objects.all().filter(visible='1'),
'total':total}
return render_template(request, "edit_giftcertificate.htm", content)
class ProductArticleViewClass(LoginRequiredMixin,TemplateView):
def get(self, request, *args, **kwargs):
pid = request.GET['pid']
prod = Products.objects.get(catalogid=pid)
allitems = ProductArticle.objects.all().filter(catalogid=pid)
count = allitems.count()
content = {'page_title': "Admin: Product Articles",
'allitems':allitems,
'prod':prod,
'count':count,
}
return render_template(request, "product_articles.htm", content)
class ProductArticleEditViewClass(LoginRequiredMixin, TemplateView):
def get(self, request, *args, **kwargs):
pid = request.GET['id']
allpages = ProductArticle.objects.get(id=pid)
content = {'page_title': "Admin :: Edit Article",
'allpages':allpages,}
return render_template(request, "product_article_edit.htm", content)
class ProductArticleAddFormClass(LoginRequiredMixin, TemplateView):
def get(self, request, *args, **kwargs):
pid = request.GET['pid']
content = {'page_title': "Admin :: Add Article",
'pid':pid,}
return render_template(request, "product_article_add.htm", content)
class ProductReviewsViewClass(LoginRequiredMixin,TemplateView):
def get(self, request, *args, **kwargs):
pid = request.GET['pid']
prod = Products.objects.get(catalogid=pid)
allitems = ProductReview.objects.filter(catalogid=pid).all()
count = allitems.count()
content = {'page_title': "Admin: Product Articles",
'allitems':allitems,
'prod':prod,
'count':count,
}
return render_template(request, "product_reviews.htm", content)
class ProductOptionEditViewClass(LoginRequiredMixin, TemplateView):
def get(self, request, *args, **kwargs):
pid = request.GET['pid']
allpages = Products.objects.get(catalogid=pid)
content = {'page_title': "Admin :: Edit Options",
'allpages':allpages,
'prod':pid,}
return render_template(request, "product_options_edit.htm", content)
class BannersViewClass(LoginRequiredMixin, TemplateView):
def get(self, request, *args, **kwargs):
allpages = SiteBanners.objects.all()
content = {'page_title': "Admin :: Banner Managements",
'allitems':allpages,}
return render_template(request, "viewbanners.htm", content)
class BannerEditViewClass(LoginRequiredMixin, TemplateView):
def get(self, request, *args, **kwargs):
bid = request.GET['bid']
filename = "/gs/swf_product_images/banner/banner5.png"
allpages = SiteBanners.objects.get(id=bid)
content = {'page_title': "Admin :: Edit banner",
'allpages':allpages,
'bannerpath':filename,}
return render_template(request, "editbanner.htm", content)
class BannersAddFormClass(LoginRequiredMixin, TemplateView):
def get(self, request, *args, **kwargs):
content = {'page_title': "Admin :: Add Banner Managements",}
return render_template(request, "addbanner.htm", content)
class GCSfilesClass(LoginRequiredMixin, TemplateView):
def get(self, request, *args, **kwargs):
content = {'page_title': "Admin :: Add Banner Managements",}
file_list = files.listdir('/gs/swf_product_images')
for file_name in file_list:
if not file_name.__contains__('$folder$'):
self.response.write('<a href="https://storage.cloud.google.com/%s">%s<a><br>' %(file_name[4:], file_name[4:]))
#return render_template(request, "gcsfiles.htm", content)
class CouponsViewClass(LoginRequiredMixin,TemplateView):
def get(self, request, *args, **kwargs):
count = Promotions.objects.count()
if "page" in request.GET and request.GET['page'] != "":
page_num = request.GET['page']
else:
page_num = 1
#pages = count/100
page_num = int(page_num)
offset = page_num * 100
allitems = Promotions.objects.all()[offset-100:offset]
content = {'page_title': "Orders Status",
'allitems':allitems,
'count':count,}
return render_template(request, "viewcoupons.htm", content)
|
bsd-3-clause
| -3,690,833,010,790,051,300
| 41.326347
| 266
| 0.592594
| false
| 3.99802
| false
| false
| false
|
wingtk/icbuild
|
icbuild/modtypes/msvc.py
|
1
|
3122
|
# icbuild - a tool to ease building collections of source packages
# Copyright (C) 2015 Ignacio Casal Quinteiro
#
# msvc.py: msvc module type definitions.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
__metaclass__ = type
import os
from icbuild.errors import BuildStateError, CommandError
from icbuild.modtypes import \
Package, DownloadableModule, register_module_type, MakeModule
__all__ = [ 'MSVCModule' ]
class MSVCModule(Package, DownloadableModule):
"""Base type for modules that use MSBuild build system."""
type = 'msvc'
PHASE_CHECKOUT = DownloadableModule.PHASE_CHECKOUT
PHASE_FORCE_CHECKOUT = DownloadableModule.PHASE_FORCE_CHECKOUT
PHASE_BUILD = 'build'
PHASE_INSTALL = 'install'
def __init__(self, name, branch=None,
solution='', msvcargs=''):
Package.__init__(self, name, branch=branch)
self.solution = solution
self.msvcargs = msvcargs
def get_srcdir(self, buildscript):
return self.branch.srcdir
def get_builddir(self, buildscript):
return self.get_srcdir(buildscript)
def do_build(self, buildscript):
buildscript.set_action('Building', self)
srcdir = self.get_srcdir(buildscript)
msbuild = buildscript.config.msbuild
cmd = [ msbuild, self.solution, self.makeargs ]
buildscript.execute(cmd, cwd = srcdir)
do_build.depends = [PHASE_CHECKOUT]
do_build.error_phases = [PHASE_FORCE_CHECKOUT]
def do_install(self, buildscript):
buildscript.set_action('Installing', self)
# do nothing for now
do_install.depends = [PHASE_BUILD]
def xml_tag_and_attrs(self):
return 'msvc', [('id', 'name', None)]
def collect_args(instance, node, argtype):
if node.hasAttribute(argtype):
args = node.getAttribute(argtype)
else:
args = ''
for child in node.childNodes:
if child.nodeType == child.ELEMENT_NODE and child.nodeName == argtype:
if not child.hasAttribute('value'):
raise FatalError("<%s/> tag must contain value=''" % argtype)
args += ' ' + child.getAttribute('value')
return instance.eval_args(args)
def parse_msvc(node, config, uri, repositories, default_repo):
instance = MSVCModule.parse_from_xml(node, config, uri, repositories, default_repo)
instance.msvcargs = collect_args(instance, node, 'msvcargs')
return instance
register_module_type('msvc', parse_msvc)
|
gpl-2.0
| 1,607,374,235,951,469,600
| 34.078652
| 87
| 0.68802
| false
| 3.798054
| false
| false
| false
|
MarinusVL/scRNApipe
|
setup.py
|
1
|
1056
|
import os
from setuptools import setup
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name='scRNApipe',
version='0.1.0',
description='Package for analysing scRNA-seq in Transcript Tag Counting data.',
long_description=read('README.md'),
author='Stavros Giannoukakos',
author_email='s.p.giannoukakos@hotmail.com',
packages=['scRNApipe'],
url=['https://github.com/MarinusVL/scRNApipe'],
keywords=['single cell RNA analysis'],
install_requires=['pysam>=0.8.3', 'numpy', 'multiqc', 'STAR', 'umis', 'umi_tools', ,'python>=2.5,<3','natsort'],
dependency_links=['https://sourceforge.net/projects/subread/files/subread-1.5.2/subread-1.5.2-source.tar.gz/download',
'https://www.bioinformatics.babraham.ac.uk/projects/fastqc/fastqc_v0.11.5_source.zip'
],
package_data = {
'': ['configuration_file.txt']
},
entry_points={
'console_scripts': ['scRNApipe = scRNApipe.scRNApipe:main']
},
)
|
mit
| 4,567,553,069,334,808,000
| 35.413793
| 122
| 0.631629
| false
| 3.133531
| false
| false
| false
|
catalpainternational/OIPA
|
OIPA/api/v2/resources/advanced_resources.py
|
1
|
7277
|
from builtins import object
from tastypie.resources import ModelResource
from geodata.models import Country, Region, City
from indicator.models import Indicator
from tastypie import fields
from tastypie.serializers import Serializer
class IndicatorFiltersResource(ModelResource):
name = fields.CharField(attribute='name')
class Meta:
queryset = Indicator.objects.all()
resource_name = 'indicator-filters'
serializer = Serializer(formats=['xml', 'json'])
excludes = ['description', 'type_data', 'selection_type', 'deprivation_type', 'rain_programme']
include_resource_uri = False
# def dehydrate(self, bundle):
# bundle.data['region_id'] = bundle.obj.country.region_id
#
#
def dehydrate_name(selfself, bundle):
return bundle.data['name']
class OnlyCountryResource(ModelResource):
class Meta:
queryset = Country.objects.all().order_by('name')
include_resource_uri = False
excludes = ['center_longlat', 'dac_country_code', 'dac_region_code', 'dac_region_name', 'iso3', 'language', 'polygon']
resource_name = 'country'
limit = 1000
class OnlyRegionResource(ModelResource):
class Meta:
queryset = Region.objects.all().distinct().order_by('code')
resource_name = 'region'
include_resource_uri = False
class OnlyCityResource(ModelResource):
class Meta:
queryset = City.objects.all().order_by('name')
resource_name = 'city'
include_resource_uri = False
excludes = ['alt_name', 'ascii_name', 'geoname_id', 'location']
def dehydrate(self, bundle):
bundle.data['country'] = bundle.obj.country.code
return bundle
def apply_filters(self, request, applicable_filters):
base_object_list = super(OnlyCityResource, self).apply_filters(request, applicable_filters)
countries = request.GET.get('country', None)
filters = {}
if countries:
countries = countries.replace('|', ',').replace('-', ',').split(',')
filters.update(dict(country__iso__in=countries))
return base_object_list.filter(**filters).distinct()
#
# class UnHabitatIndicatorCountryResource(ModelResource):
# class Meta:
# queryset = UnHabitatIndicatorCountry.objects.all()
# include_resource_uri = False
# resource_name = 'indicator-country'
# serializer = Serializer(formats=['xml', 'json'])
# filtering = {"year": ALL }
# # authentication = ApiKeyAuthentication()
#
#
# def dehydrate(self, bundle):
# bundle.data['country_iso'] = bundle.obj.country.iso
# bundle.data['country_iso3'] = bundle.obj.country.iso3
#
# bundle.data['country_name'] = bundle.obj.country.get_iso_display()
# bundle.data['dac_region_code'] = bundle.obj.country.dac_region_code
# bundle.data['dac_region_name'] = bundle.obj.country.dac_region_name
# tpset = bundle.obj.typedeprivationcountry_set.all()
# tp_list = {}
# for tp in tpset:
# temp_list = {}
# temp_list['type'] = tp.get_type_deprivation_display()
# temp_list['non_slum_household'] = tp.non_slum_household
# temp_list['slum_household'] = tp.slum_household
# temp_list['one_shelter_deprivation'] = tp.one_shelter_deprivation
# temp_list['two_shelter_deprivations'] = tp.two_shelter_deprivations
# temp_list['three_shelter_deprivations'] = tp.three_shelter_deprivations
# temp_list['four_shelter_deprivations'] = tp.four_shelter_deprivations
# temp_list['gender'] = tp.gender
# temp_list['extra_type_name'] = tp.extra_type_name
# temp_list['is_matrix'] = tp.is_matrix
# temp_list['urban'] = tp.urban
# temp_list['total'] = tp.total
# temp_list['rural'] = tp.rural
#
# tp_list['deprivation_id_'+str(tp.id)] = temp_list
# bundle.data['deprivation'] = tp_list
# bundle.data.pop('id')
#
# return bundle
#
# def apply_filters(self, request, applicable_filters):
# base_object_list = super(UnHabitatIndicatorCountryResource, self).apply_filters(request, applicable_filters)
# regions = request.GET.get('regions', None)
# countries = request.GET.get('country_name', None)
# isos = request.GET.get('iso', None)
# indicator = request.GET.get('indicator', None)
#
#
#
# filters = {}
# if regions:
# # @todo: implement smart filtering with seperator detection
# regions = regions.replace('|', ',').replace('-', ',').split(',')
# filters.update(dict(country__dac_region_code__in=regions))
# if countries:
# countries = countries.replace('|', ',').replace('-', ',').split(',')
# filters.update(dict(country__country_name__in=countries))
# if isos:
# isos = isos.replace('|', ',').replace('-', ',').split(',')
# filters.update(dict(country__iso__in=isos))
# #
#
# return base_object_list.filter(**filters).distinct()
#
#
# class UnHabitatIndicatorcityResource(ModelResource):
# class Meta:
# queryset = UnHabitatIndicatorcity.objects.all()
# include_resource_uri = False
# resource_name = 'indicator-city'
# serializer = Serializer(formats=['xml', 'json'])
# filtering = {"year": ALL }
# # authentication = ApiKeyAuthentication()
#
#
# def dehydrate(self, bundle):
# bundle.data['country_iso'] = bundle.obj.city.country.iso
# bundle.data['country_name'] = bundle.obj.city.country.get_iso_display()
# bundle.data['dac_region_code'] = bundle.obj.city.country.dac_region_code
# bundle.data['dac_region_name'] = bundle.obj.city.country.dac_region_name
# bundle.data['city_name'] = bundle.obj.city.name
#
# # bundle.data['']
#
# bundle.data.pop('id')
#
# return bundle
#
# def apply_filters(self, request, applicable_filters):
# base_object_list = super(UnHabitatIndicatorcityResource, self).apply_filters(request, applicable_filters)
# regions = request.GET.get('regions', None)
# countries = request.GET.get('country_name', None)
# isos = request.GET.get('iso', None)
# city = request.GET.get('city', None)
#
#
#
# filters = {}
# if regions:
# # @todo: implement smart filtering with seperator detection
# regions = regions.replace('|', ',').replace('-', ',').split(',')
# filters.update(dict(city__country__dac_region_code__in=regions))
# if countries:
# countries = countries.replace('|', ',').replace('-', ',').split(',')
# filters.update(dict(city__country__country_name__in=countries))
# if isos:
# isos = isos.replace('|', ',').replace('-', ',').split(',')
# filters.update(dict(city__country__iso__in=isos))
# if city:
# city = city.replace('|', ',').replace('-', ',').split(',')
#
# filters.update(dict(city__name__in=city))
#
# return base_object_list.filter(**filters).distinct()
#
#
#
|
agpl-3.0
| 8,310,001,698,274,749,000
| 36.127551
| 126
| 0.600797
| false
| 3.544569
| false
| false
| false
|
zaitcev/swift-lfs
|
test/probe/common.py
|
1
|
7421
|
# Copyright (c) 2010-2012 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from httplib import HTTPConnection
from os import kill, path
from signal import SIGTERM
from subprocess import Popen, PIPE
from time import sleep, time
from swiftclient import get_auth, head_account
from swift.common.ring import Ring
from test.probe import CHECK_SERVER_TIMEOUT
def start_server(port, port2server, pids, check=True):
server = port2server[port]
if server[:-1] in ('account', 'container', 'object'):
if not path.exists('/etc/swift/%s-server/%s.conf' %
(server[:-1], server[-1])):
return None
pids[server] = Popen([
'swift-%s-server' % server[:-1],
'/etc/swift/%s-server/%s.conf' % (server[:-1], server[-1])]).pid
if check:
return check_server(port, port2server, pids)
else:
pids[server] = Popen(['swift-%s-server' % server,
'/etc/swift/%s-server.conf' % server]).pid
if check:
return check_server(port, port2server, pids)
return None
def check_server(port, port2server, pids, timeout=CHECK_SERVER_TIMEOUT):
server = port2server[port]
if server[:-1] in ('account', 'container', 'object'):
if int(server[-1]) > 4:
return None
path = '/connect/1/2'
if server[:-1] == 'container':
path += '/3'
elif server[:-1] == 'object':
path += '/3/4'
try_until = time() + timeout
while True:
try:
conn = HTTPConnection('127.0.0.1', port)
conn.request('GET', path)
resp = conn.getresponse()
# 404 because it's a nonsense path (and mount_check is false)
# 507 in case the test target is a VM using mount_check
if resp.status not in (404, 507):
raise Exception(
'Unexpected status %s' % resp.status)
break
except Exception, err:
if time() > try_until:
print err
print 'Giving up on %s:%s after %s seconds.' % (
server, port, timeout)
raise err
sleep(0.1)
else:
try_until = time() + timeout
while True:
try:
url, token = get_auth('http://127.0.0.1:8080/auth/v1.0',
'test:tester', 'testing')
account = url.split('/')[-1]
head_account(url, token)
return url, token, account
except Exception, err:
if time() > try_until:
print err
print 'Giving up on proxy:8080 after 30 seconds.'
raise err
sleep(0.1)
return None
def kill_server(port, port2server, pids):
try:
kill(pids[port2server[port]], SIGTERM)
except Exception, err:
print err
try_until = time() + 30
while True:
try:
conn = HTTPConnection('127.0.0.1', port)
conn.request('GET', '/')
conn.getresponse()
except Exception, err:
break
if time() > try_until:
raise Exception(
'Still answering on port %s after 30 seconds' % port)
sleep(0.1)
def kill_servers(port2server, pids):
for port in port2server:
kill_server(port, port2server, pids)
def kill_nonprimary_server(primary_nodes, port2server, pids):
primary_ports = [n['port'] for n in primary_nodes]
for port, server in port2server.iteritems():
if port in primary_ports:
server_type = server[:-1]
break
else:
raise Exception('Cannot figure out server type for %r' % primary_nodes)
for port, server in list(port2server.iteritems()):
if server[:-1] == server_type and port not in primary_ports:
kill_server(port, port2server, pids)
return port
def reset_environment():
p = Popen("resetswift 2>&1", shell=True, stdout=PIPE)
stdout, _stderr = p.communicate()
print stdout
pids = {}
try:
port2server = {}
config_dict = {}
for server, port in [('account', 6002), ('container', 6001),
('object', 6000)]:
for number in xrange(1, 9):
port2server[port + (number * 10)] = '%s%d' % (server, number)
for port in port2server:
start_server(port, port2server, pids, check=False)
for port in port2server:
check_server(port, port2server, pids)
port2server[8080] = 'proxy'
url, token, account = start_server(8080, port2server, pids)
account_ring = Ring('/etc/swift/account.ring.gz')
container_ring = Ring('/etc/swift/container.ring.gz')
object_ring = Ring('/etc/swift/object.ring.gz')
for name in ('account', 'container', 'object'):
for server in (name, '%s-replicator' % name):
config_dict[server] = '/etc/swift/%s-server/%%d.conf' % name
except BaseException:
try:
raise
finally:
try:
kill_servers(port2server, pids)
except Exception:
pass
return pids, port2server, account_ring, container_ring, object_ring, url, \
token, account, config_dict
def get_to_final_state():
processes = []
for job in ('account-replicator', 'container-replicator',
'object-replicator'):
for number in xrange(1, 9):
if not path.exists('/etc/swift/%s-server/%d.conf' %
(job.split('-')[0], number)):
continue
processes.append(Popen([
'swift-%s' % job,
'/etc/swift/%s-server/%d.conf' % (job.split('-')[0], number),
'once']))
for process in processes:
process.wait()
processes = []
for job in ('container-updater', 'object-updater'):
for number in xrange(1, 5):
processes.append(Popen([
'swift-%s' % job,
'/etc/swift/%s-server/%d.conf' % (job.split('-')[0], number),
'once']))
for process in processes:
process.wait()
processes = []
for job in ('account-replicator', 'container-replicator',
'object-replicator'):
for number in xrange(1, 9):
if not path.exists('/etc/swift/%s-server/%d.conf' %
(job.split('-')[0], number)):
continue
processes.append(Popen([
'swift-%s' % job,
'/etc/swift/%s-server/%d.conf' % (job.split('-')[0], number),
'once']))
for process in processes:
process.wait()
|
apache-2.0
| 5,316,278,556,826,849,000
| 35.377451
| 79
| 0.538741
| false
| 4.06185
| false
| false
| false
|
dhylands/bioloid3
|
bioloid/bus.py
|
1
|
9709
|
"""This module provides the Bus class which knows how to talk to Bioloid
devices, and the BusError exception which is raised when an error is
enountered.
"""
import pyb
from bioloid import packet
from bioloid.dump_mem import dump_mem
from bioloid.log import log
class BusError(Exception):
"""Exception which is raised when a non-successful status packet is received."""
def __init__(self, error_code, *args, **kwargs):
super(BusError, self).__init__(self, *args, **kwargs)
self.error_code = error_code
def get_error_code(self):
"""Retrieves the error code associated with the exception."""
return self.error_code
def __str__(self):
return "Rcvd Status: " + str(packet.ErrorCode(self.error_code))
class Bus:
"""The Bus class knows the commands used to talk to bioloid devices."""
SHOW_NONE = 0
SHOW_COMMANDS = (1 << 0)
SHOW_PACKETS = (1 << 1)
def __init__(self, serial_port, show=SHOW_NONE):
self.serial_port = serial_port
self.show = show
def action(self):
"""Broadcasts an action packet to all of the devices on the bus.
This causes all of the devices to perform their deferred writes
at the same time.
"""
if self.show & Bus.SHOW_COMMANDS:
log('Broadcasting ACTION')
self.fill_and_write_packet(packet.Id.BROADCAST, packet.Command.ACTION)
def fill_and_write_packet(self, dev_id, cmd, data=None):
"""Allocates and fills a packet. data should be a bytearray of data
to include in the packet, or None if no data should be included.
"""
packet_len = 6
if data is not None:
packet_len += len(data)
pkt_bytes = bytearray(packet_len)
pkt_bytes[0] = 0xff
pkt_bytes[1] = 0xff
pkt_bytes[2] = dev_id
pkt_bytes[3] = 2 # for len and cmd
pkt_bytes[4] = cmd
if data is not None:
pkt_bytes[3] += len(data)
pkt_bytes[5:packet_len - 1] = data
pkt_bytes[-1] = ~sum(pkt_bytes[2:-1]) & 0xff
if self.show & Bus.SHOW_PACKETS:
dump_mem(pkt_bytes, prefix=' W', show_ascii=True, log=log)
self.serial_port.write_packet(pkt_bytes)
def ping(self, dev_id):
"""Sends a PING request to a device.
Returns true if the device responds successfully, false if a timeout
occurs, and raises a bus.Error for any other failures.
raises a BusError for any other failures.
"""
self.send_ping(dev_id)
try:
self.read_status_packet()
except BusError as ex:
if ex.get_error_code() == packet.ErrorCode.TIMEOUT:
return False
raise ex
return True
def read(self, dev_id, offset, num_bytes):
"""Sends a READ request and returns data read.
Raises a bus.Error if any errors occur.
"""
self.send_read(dev_id, offset, num_bytes)
pkt = self.read_status_packet()
return pkt.params()
def read_status_packet(self):
"""Reads a status packet and returns it.
Rasises a bioloid.bus.BusError if an error occurs.
"""
pkt = packet.Packet(status_packet=True)
while True:
# start = pyb.micros()
byte = self.serial_port.read_byte()
if byte is None:
if self.show & Bus.SHOW_COMMANDS:
log('TIMEOUT')
if self.show & Bus.SHOW_PACKETS:
dump_mem(pkt.pkt_bytes, prefix=' R', show_ascii=True, log=log)
raise BusError(packet.ErrorCode.TIMEOUT)
err = pkt.process_byte(byte)
if err != packet.ErrorCode.NOT_DONE:
break
if err != packet.ErrorCode.NONE:
err_ex = BusError(err)
if self.show & Bus.SHOW_COMMANDS:
log(err_ex)
if self.show & Bus.SHOW_PACKETS:
dump_mem(pkt.pkt_bytes, prefix=' R', show_ascii=True, log=log)
raise err_ex
err = pkt.error_code()
if self.show & Bus.SHOW_COMMANDS:
log('Rcvd Status: {} from ID: {}'.format(packet.ErrorCode(err), pkt.dev_id))
if self.show & Bus.SHOW_PACKETS:
dump_mem(pkt.pkt_bytes, prefix=' R', show_ascii=True, log=log)
if err != packet.ErrorCode.NONE:
raise BusError(err)
return pkt
def reset(self, dev_id):
"""Sends a RESET request.
Raises a bus.Error if any errors occur.
"""
self.send_reset(dev_id)
if dev_id == packet.Id.BROADCAST:
return packet.ErrorCode.NONE
pkt = self.read_status_packet()
return pkt.error_code()
def scan(self, start_id=0, num_ids=32, dev_found=None, dev_missing=None):
"""Scans the bus, calling devFound(self, dev) for each device
which responds, and dev_missing(self, dev) for each device
which doesn't.
Returns true if any devices were found.
"""
end_id = start_id + num_ids - 1
if end_id >= packet.Id.BROADCAST:
end_id = packet.Id.BROADCAST - 1
some_dev_found = False
for dev_id in range(start_id, end_id + 1):
if self.ping(dev_id):
some_dev_found = True
if dev_found:
dev_found(self, dev_id)
else:
if dev_missing:
dev_missing(self, dev_id)
return some_dev_found
def send_ping(self, dev_id):
"""Sends a ping to a device."""
if self.show & Bus.SHOW_COMMANDS:
log('Sending PING to ID {}'.format(dev_id))
self.fill_and_write_packet(dev_id, packet.Command.PING)
def send_read(self, dev_id, offset, num_bytes):
"""Sends a READ request to read data from the device's control
table.
"""
if self.show & Bus.SHOW_COMMANDS:
log('Sending READ to ID {} offset 0x{:02x} len {}'.format(
dev_id, offset, num_bytes))
self.fill_and_write_packet(dev_id, packet.Command.READ, bytearray((offset, num_bytes)))
def send_reset(self, dev_id):
"""Sends a RESET command to the device, which causes it to reset the
control table to factory defaults.
"""
if self.show & Bus.SHOW_COMMANDS:
if dev_id == packet.Id.BROADCAST:
log('Broadcasting RESET')
else:
log('Sending RESET to ID {}'.format(dev_id))
self.fill_and_write_packet(dev_id, packet.Command.RESET)
def send_write(self, dev_id, offset, data, deferred=False):
"""Sends a WRITE request if deferred is False, or REG_WRITE
request if deferred is True to write data into the device's
control table.
data should be an array of ints, or a bytearray.
Deferred writes will occur when and ACTION command is broadcast.
"""
if self.show & Bus.SHOW_COMMANDS:
cmd_str = 'REG_WRITE' if deferred else 'WRITE'
if dev_id == packet.Id.BROADCAST:
log('Broadcasting {} offset 0x{:02x} len {}'.format(cmd_str, offset, len(data)))
else:
log('Sending {} to ID {} offset 0x{:02x} len {}'.format(cmd_str, dev_id, offset, len(data)))
cmd = packet.Command.REG_WRITE if deferred else packet.Command.WRITE
pkt_data = bytearray(len(data))
pkt_data[0] = offset
pkt_data[1:] = data
self.fill_and_write_packet(dev_id, cmd, pkt_data)
def sync_write(self, dev_ids, offset, values):
"""Sets up a synchroous write command.
dev_ids should be an array of device ids.
offset should be the offset that the data will be written to.
values should be an array of bytearrays. There should be one bytearray
for each dev_id, and each bytearray should be of the same length.
raises ValueError if the dimensionality of values is incorrect.
"""
if self.show & Bus.SHOW_COMMANDS:
ids = ', '.join(['{}'.format(id) for id in dev_ids])
log('Sending SYNC_WRITE to IDs {} offset 0x{:02x} len {}'.format(ids, offset, len(values[0])))
num_ids = len(dev_ids)
if num_ids != len(values):
raise ValueError('len(dev_ids) = {} must match len(values) = {}'.format(num_ids, len(values)))
bytes_per_id = len(values[0])
param_len = num_ids * (bytes_per_id + 1) + 2
data = bytearray(param_len)
data[0] = offset
data[1] = bytes_per_id
data_idx = 2
for id_idx in range(num_ids):
if len(values[id_idx]) != bytes_per_id:
raise ValueError('len(values[{}]) not equal {}'.format(id_idx, bytes_per_id))
data[data_idx] = dev_ids[id_idx]
data_idx += 1
data[data_idx:data_idx + bytes_per_id] = values[id_idx]
data_idx += bytes_per_id
self.fill_and_write_packet(packet.Id.BROADCAST, packet.Command.SYNC_WRITE, data)
def write(self, dev_id, offset, data, deferred=False):
"""Sends a WRITE request if deferred is False, or a REG_WRITE
request if deferred is True. Deferred writes will occur when
and ACTION command is broadcast.
data should be an array of ints, or a bytearray.
Raises a bus.Error if any errors occur.
"""
self.send_write(dev_id, offset, data, deferred)
if dev_id == packet.Id.BROADCAST:
return packet.ErrorCode.NONE
pkt = self.read_status_packet()
return pkt.error_code()
|
mit
| 4,271,133,406,594,132,500
| 36.77821
| 108
| 0.576475
| false
| 3.767559
| false
| false
| false
|
ocefpaf/ulmo
|
ulmo/cuahsi/wof/core.py
|
1
|
11413
|
"""
ulmo.wof.core
~~~~~~~~~~~~~
This module provides direct access to `CUAHSI WaterOneFlow`_ web services.
.. _CUAHSI WaterOneFlow: http://his.cuahsi.org/wofws.html
"""
from future import standard_library
standard_library.install_aliases()
from builtins import str
import io
import suds.client
import isodate
from ulmo import util
from ulmo import waterml
_suds_client = None
def get_sites(wsdl_url, suds_cache=("default",)):
"""
Retrieves information on the sites that are available from a WaterOneFlow
service using a GetSites request. For more detailed information including
which variables and time periods are available for a given site, use
``get_site_info()``.
Parameters
----------
wsdl_url : str
URL of a service's web service definition language (WSDL) description.
All WaterOneFlow services publish a WSDL description and this url is the
entry point to the service.
suds_cache: ``None`` or tuple
SOAP local cache duration for WSDL description and client object.
Pass a cache duration tuple like ('days', 3) to set a custom duration.
Duration may be in months, weeks, days, hours, or seconds.
If unspecified, the default duration (1 day) will be used.
Use ``None`` to turn off caching.
Returns
-------
sites_dict : dict
a python dict with site codes mapped to site information
"""
suds_client = _get_client(wsdl_url, suds_cache)
waterml_version = _waterml_version(suds_client)
if waterml_version == '1.0':
response = suds_client.service.GetSitesXml('')
response_buffer = io.BytesIO(util.to_bytes(response))
sites = waterml.v1_0.parse_site_infos(response_buffer)
elif waterml_version == '1.1':
response = suds_client.service.GetSites('')
response_buffer = io.BytesIO(util.to_bytes(response))
sites = waterml.v1_1.parse_site_infos(response_buffer)
return dict([
(site['network'] + ':' + site['code'], site)
for site in list(sites.values())
])
def get_site_info(wsdl_url, site_code, suds_cache=("default",)):
"""
Retrieves detailed site information from a WaterOneFlow service using a
GetSiteInfo request.
Parameters
----------
wsdl_url : str
URL of a service's web service definition language (WSDL) description.
All WaterOneFlow services publish a WSDL description and this url is the
entry point to the service.
site_code : str
Site code of the site you'd like to get more information for. Site codes
MUST contain the network and be of the form <network>:<site_code>, as is
required by WaterOneFlow.
suds_cache: ``None`` or tuple
SOAP local cache duration for WSDL description and client object.
Pass a cache duration tuple like ('days', 3) to set a custom duration.
Duration may be in months, weeks, days, hours, or seconds.
If unspecified, the default duration (1 day) will be used.
Use ``None`` to turn off caching.
Returns
-------
site_info : dict
a python dict containing site information
"""
suds_client = _get_client(wsdl_url, suds_cache)
waterml_version = _waterml_version(suds_client)
if waterml_version == '1.0':
response = suds_client.service.GetSiteInfo(site_code)
response_buffer = io.BytesIO(util.to_bytes(response))
sites = waterml.v1_0.parse_sites(response_buffer)
elif waterml_version == '1.1':
response = suds_client.service.GetSiteInfo(site_code)
response_buffer = io.BytesIO(util.to_bytes(response))
sites = waterml.v1_1.parse_sites(response_buffer)
if len(sites) == 0:
return {}
site_info = list(sites.values())[0]
series_dict = dict([
(series['variable']['vocabulary'] + ':' + series['variable']['code'],
series)
for series in site_info['series']
])
site_info['series'] = series_dict
return site_info
def get_values(wsdl_url, site_code, variable_code, start=None, end=None, suds_cache=("default",)):
"""
Retrieves site values from a WaterOneFlow service using a GetValues request.
Parameters
----------
wsdl_url : str
URL of a service's web service definition language (WSDL) description.
All WaterOneFlow services publish a WSDL description and this url is the
entry point to the service.
site_code : str
Site code of the site you'd like to get values for. Site codes MUST
contain the network and be of the form <network>:<site_code>, as is
required by WaterOneFlow.
variable_code : str
Variable code of the variable you'd like to get values for. Variable
codes MUST contain the network and be of the form
<vocabulary>:<variable_code>, as is required by WaterOneFlow.
start : ``None`` or datetime (see :ref:`dates-and-times`)
Start of a date range for a query. If both start and end parameters are
omitted, the entire time series available will be returned.
end : ``None`` or datetime (see :ref:`dates-and-times`)
End of a date range for a query. If both start and end parameters are
omitted, the entire time series available will be returned.
suds_cache: ``None`` or tuple
SOAP local cache duration for WSDL description and client object.
Pass a cache duration tuple like ('days', 3) to set a custom duration.
Duration may be in months, weeks, days, hours, or seconds.
If unspecified, the default duration (1 day) will be used.
Use ``None`` to turn off caching.
Returns
-------
site_values : dict
a python dict containing values
"""
suds_client = _get_client(wsdl_url, suds_cache)
# Note from Emilio:
# Not clear if WOF servers really do handle time zones (time offsets or
# "Z" in the iso8601 datetime strings. In the past, I (Emilio) have
# passed naive strings to GetValues(). if a datetime object is passed to
# this ulmo function, the isodate code above will include it in the
# resulting iso8601 string; if not, no. Test effect of dt_isostr having
# a timezone code or offset, vs not having it (the latter, naive dt
# strings, is what I've been using all along)
# the interpretation of start and end time zone is server-dependent
start_dt_isostr = None
end_dt_isostr = None
if start is not None:
start_datetime = util.convert_datetime(start)
start_dt_isostr = isodate.datetime_isoformat(start_datetime)
if end is not None:
end_datetime = util.convert_datetime(end)
end_dt_isostr = isodate.datetime_isoformat(end_datetime)
waterml_version = _waterml_version(suds_client)
response = suds_client.service.GetValues(
site_code, variable_code, startDate=start_dt_isostr,
endDate=end_dt_isostr)
response_buffer = io.BytesIO(util.to_bytes(response))
if waterml_version == '1.0':
values = waterml.v1_0.parse_site_values(response_buffer)
elif waterml_version == '1.1':
values = waterml.v1_1.parse_site_values(response_buffer)
if not variable_code is None:
return list(values.values())[0]
else:
return values
def get_variable_info(wsdl_url, variable_code=None, suds_cache=("default",)):
"""
Retrieves site values from a WaterOneFlow service using a GetVariableInfo
request.
Parameters
----------
wsdl_url : str
URL of a service's web service definition language (WSDL) description.
All WaterOneFlow services publish a WSDL description and this url is the
entry point to the service.
variable_code : `None` or str
If `None` (default) then information on all variables will be returned,
otherwise, this should be set to the variable code of the variable you'd
like to get more information on. Variable codes MUST contain the
network and be of the form <vocabulary>:<variable_code>, as is required
by WaterOneFlow.
suds_cache: ``None`` or tuple
SOAP local cache duration for WSDL description and client object.
Pass a cache duration tuple like ('days', 3) to set a custom duration.
Duration may be in months, weeks, days, hours, or seconds.
If unspecified, the default duration (1 day) will be used.
Use ``None`` to turn off caching.
Returns
-------
variable_info : dict
a python dict containing variable information. If no variable code is
`None` (default) then this will be a nested set of dicts keyed by
<vocabulary>:<variable_code>
"""
suds_client = _get_client(wsdl_url, suds_cache)
waterml_version = _waterml_version(suds_client)
response = suds_client.service.GetVariableInfo(variable_code)
response_buffer = io.BytesIO(util.to_bytes(response))
if waterml_version == '1.0':
variable_info = waterml.v1_0.parse_variables(response_buffer)
elif waterml_version == '1.1':
variable_info = waterml.v1_1.parse_variables(response_buffer)
if not variable_code is None and len(variable_info) == 1:
return list(variable_info.values())[0]
else:
return dict([
('%s:%s' % (var['vocabulary'], var['code']), var)
for var in list(variable_info.values())
])
def _waterml_version(suds_client):
tns_str = str(suds_client.wsdl.tns[1])
if tns_str == 'http://www.cuahsi.org/his/1.0/ws/':
return '1.0'
elif tns_str == 'http://www.cuahsi.org/his/1.1/ws/':
return '1.1'
else:
raise NotImplementedError(
"only WaterOneFlow 1.0 and 1.1 are currently supported")
def _get_client(wsdl_url, cache_duration=("default",)):
"""
Open and re-use (persist) a suds.client.Client instance _suds_client throughout
the session, to minimize WOF server impact and improve performance. _suds_client
is global in scope.
Parameters
----------
wsdl_url : str
URL of a service's web service definition language (WSDL) description.
All WaterOneFlow services publish a WSDL description and this url is the
entry point to the service.
cache_duration: ``None`` or tuple
suds client local cache duration for WSDL description and client object.
Pass a cache duration tuple like ('days', 3) to set a custom duration.
Duration may be in months, weeks, days, hours, or seconds.
If unspecified, the suds default (1 day) will be used.
Use ``None`` to turn off caching.
Returns
-------
_suds_client : suds Client
Newly or previously instantiated (reused) suds Client object.
"""
global _suds_client
# Handle new or changed client request (create new client)
if _suds_client is None or _suds_client.wsdl.url != wsdl_url:
_suds_client = suds.client.Client(wsdl_url)
if cache_duration is None:
_suds_client.set_options(cache=None)
else:
cache = _suds_client.options.cache
# could add some error catching ...
if cache_duration[0] == "default":
cache.setduration(days=1)
else:
cache.setduration(**dict([cache_duration]))
return _suds_client
|
bsd-3-clause
| 6,928,622,012,715,005,000
| 37.819728
| 98
| 0.651625
| false
| 3.932805
| false
| false
| false
|
frank2/paranoia
|
lib/base/event.py
|
1
|
2058
|
#!/usr/bin/env python
import inspect
from paranoia.base.paranoia_agent import ParanoiaAgent, ParanoiaError
__all__ = ['get_event_base', 'EventError', 'Event', 'InstantiateEvent'
,'SetPropertyEvent', 'NewAddressEvent', 'NewShiftEvent', 'NewSizeEvent'
,'SetValueEvent', 'DeclareSubregionEvent', 'MoveSubregionEvent'
,'RemoveSubregionEvent']
class EventError(ParanoiaError):
pass
def get_event_base(event_class):
if isinstance(event_class, Event):
event_class = event_class.__class__
if not inspect.isclass(event_class):
raise EventError('event class must be a class')
if not issubclass(event_class, Event):
raise EventError('class must derive Event')
if event_class == Event:
raise EventError('cannot get base of root class')
base_class = event_class
while not Event in base_class.__bases__:
base_class = base_class.__bases__[0]
return base_class
class Event(ParanoiaAgent):
def __call__(self, *args):
raise NotImplementedError
class InstantiateEvent(Event):
def __call__(self, decl, instance, kwargs):
raise NotImplementedError
class SetPropertyEvent(Event):
def __call__(self, decl, prop, value):
raise NotImplementedError
class NewAddressEvent(Event):
def __call__(self, decl, address, shift):
raise NotImplementedError
class NewShiftEvent(Event):
def __call__(self, decl, shift):
raise NotImplementedError
class NewSizeEvent(Event):
def __call__(self, decl, old_size, new_size):
raise NotImplementedError
class SetValueEvent(Event):
def __call__(self, decl, value):
raise NotImplementedError
class DeclareSubregionEvent(Event):
def __call__(self, decl, subregion):
raise NotImplementedError
class MoveSubregionEvent(Event):
def __call__(self, decl, old_offset, new_offset):
raise NotImplementedError
class RemoveSubregionEvent(Event):
def __call__(self, decl, subregion):
raise NotImplementedError
|
gpl-3.0
| -8,133,959,076,121,812,000
| 27.191781
| 82
| 0.674441
| false
| 4.051181
| false
| false
| false
|
peterbrittain/asciimatics
|
samples/treeview.py
|
1
|
2992
|
#!/usr/bin/env python3
from asciimatics.event import KeyboardEvent
from asciimatics.widgets import Frame, Layout, FileBrowser, Widget, Label, PopUpDialog, Text, \
Divider
from asciimatics.scene import Scene
from asciimatics.screen import Screen
from asciimatics.exceptions import ResizeScreenError, StopApplication
import sys
import os
try:
import magic
except ImportError:
pass
class DemoFrame(Frame):
def __init__(self, screen):
super(DemoFrame, self).__init__(
screen, screen.height, screen.width, has_border=False, name="My Form")
# Create the (very simple) form layout...
layout = Layout([1], fill_frame=True)
self.add_layout(layout)
# Now populate it with the widgets we want to use.
self._details = Text()
self._details.disabled = True
self._details.custom_colour = "field"
self._list = FileBrowser(Widget.FILL_FRAME,
os.path.abspath("."),
name="mc_list",
on_select=self.popup,
on_change=self.details)
layout.add_widget(Label("Local disk browser sample"))
layout.add_widget(Divider())
layout.add_widget(self._list)
layout.add_widget(Divider())
layout.add_widget(self._details)
layout.add_widget(Label("Press Enter to select or `q` to quit."))
# Prepare the Frame for use.
self.fix()
def popup(self):
# Just confirm whenever the user actually selects something.
self._scene.add_effect(
PopUpDialog(self._screen, "You selected: {}".format(self._list.value), ["OK"]))
def details(self):
# If python magic is installed, provide a little more detail of the current file.
if self._list.value:
if os.path.isdir(self._list.value):
self._details.value = "Directory"
elif os.path.isfile(self._list.value):
try:
self._details.value = magic.from_file(self._list.value)
except NameError:
self._details.value = "File (run 'pip install python-magic' for more details)"
else:
self._details.value = "--"
def process_event(self, event):
# Do the key handling for this Frame.
if isinstance(event, KeyboardEvent):
if event.key_code in [ord('q'), ord('Q'), Screen.ctrl("c")]:
raise StopApplication("User quit")
# Now pass on to lower levels for normal handling of the event.
return super(DemoFrame, self).process_event(event)
def demo(screen, old_scene):
screen.play([Scene([DemoFrame(screen)], -1)], stop_on_resize=True, start_scene=old_scene)
last_scene = None
while True:
try:
Screen.wrapper(demo, catch_interrupt=False, arguments=[last_scene])
sys.exit(0)
except ResizeScreenError as e:
last_scene = e.scene
|
apache-2.0
| 4,689,903,952,881,384,000
| 35.048193
| 98
| 0.602273
| false
| 4.081855
| false
| false
| false
|
simbtrix/screenmix
|
screenmix/ackModel/ack.py
|
1
|
1349
|
'''
Created on 01.08.2016
@author: mkennert
'''
from kivy.properties import ObjectProperty
from kivy.uix.gridlayout import GridLayout
from ackModel.ackRect import AckRect
class Ack(GridLayout):
'''
ack contains all acks from the different shapes. it manage which ack-should
show in the ack-menu, which is append of the cross-section shape
'''
# all acks of the application
ackRect = ObjectProperty()
#####################################################
# here you can add more ack's. When you add one more #
# make sure, that the ack has a show method like the #
# show_ack_rect #
#####################################################
# constructor
def __init__(self, **kwargs):
super(Ack, self).__init__(**kwargs)
self.cols = 1
# default ack is the ack of the rectangle shape
self.ackRect = AckRect()
self.content = self.ackRect
self.add_widget(self.content)
'''
show the ack of the shape rectangle
'''
def show_ack_rect(self):
# remove the old content
self.remove_widget(self.content)
self.add_widget(self.ackRect)
# safe the new ack as content
self.content = self.ackRect
|
gpl-3.0
| -1,378,046,519,291,623,700
| 27.326087
| 79
| 0.535211
| false
| 4.452145
| false
| false
| false
|
sfu-natlang/HMM-Aligner
|
src/models/Old/IBM1WithAlignmentType.py
|
1
|
5456
|
# -*- coding: utf-8 -*-
#
# IBM model 1 with alignment type implementation of HMM Aligner
# Simon Fraser University
# NLP Lab
#
# This is the implementation of IBM model 1 word aligner with alignment type.
#
from collections import defaultdict
from loggers import logging
from models.IBM1Base import AlignmentModelBase as IBM1Base
from evaluators.evaluator import evaluate
__version__ = "0.4a"
class AlignmentModel(IBM1Base):
def __init__(self):
self.modelName = "IBM1WithPOSTagAndAlignmentType"
self.version = "0.2b"
self.logger = logging.getLogger('IBM1')
self.evaluate = evaluate
self.fe = ()
self.s = defaultdict(list)
self.sTag = defaultdict(list)
self.index = 0
self.typeList = []
self.typeIndex = {}
self.typeDist = []
self.lambd = 1 - 1e-20
self.lambda1 = 0.9999999999
self.lambda2 = 9.999900827395436E-11
self.lambda3 = 1.000000082740371E-15
self.loadTypeDist = {"SEM": .401, "FUN": .264, "PDE": .004,
"CDE": .004, "MDE": .012, "GIS": .205,
"GIF": .031, "COI": .008, "TIN": .003,
"NTR": .086, "MTA": .002}
self.modelComponents = ["t", "s", "sTag",
"typeList", "typeIndex", "typeDist",
"lambd", "lambda1", "lambda2", "lambda3"]
IBM1Base.__init__(self)
return
def _beginningOfIteration(self):
self.c = defaultdict(float)
self.total = defaultdict(float)
self.c_feh = defaultdict(
lambda: [0.0 for h in range(len(self.typeList))])
return
def _updateCount(self, fWord, eWord, z, index):
tPr_z = self.tProbability(fWord, eWord) / z
self.c[(fWord[self.index], eWord[self.index])] += tPr_z
self.total[eWord[self.index]] += tPr_z
c_feh = self.c_feh[(fWord[self.index], eWord[self.index])]
for h in range(len(self.typeIndex)):
c_feh[h] += tPr_z * self.sProbability(fWord, eWord, h)
return
def _updateEndOfIteration(self):
for (f, e) in self.c:
self.t[(f, e)] = self.c[(f, e)] / self.total[e]
s = self.s if self.index == 0 else self.sTag
for f, e in self.c_feh:
c_feh = self.c_feh[(f, e)]
s_tmp = s[(f, e)]
for h in range(len(self.typeIndex)):
s_tmp[h] = c_feh[h] / self.c[(f, e)]
return
def sProbability(self, f, e, h):
fWord, fTag = f
eWord, eTag = e
if self.fe != (f, e):
self.fe, sKey, sTagKey = (f, e), (f[0], e[0]), (f[1], e[1])
self.sTmp = self.s[sKey] if sKey in self.s else None
self.sTagTmp = self.sTag[sTagKey] if sTagKey in self.sTag else None
sTmp = self.sTmp[h] if self.sTmp else 0
sTagTmp = self.sTagTmp[h] if self.sTagTmp else 0
if self.index == 0:
p1 = (1 - self.lambd) * self.typeDist[h] + self.lambd * sTmp
p2 = (1 - self.lambd) * self.typeDist[h] + self.lambd * sTagTmp
p3 = self.typeDist[h]
return self.lambda1 * p1 + self.lambda2 * p2 + self.lambda3 * p3
else:
return (1 - self.lambd) * self.typeDist[h] + self.lambd * sTagTmp
def tProbability(self, f, e):
return IBM1Base.tProbability(self, f, e, self.index)
def decodeSentence(self, sentence):
f, e, align = sentence
sentenceAlignment = []
for i in range(len(f)):
max_ts = 0
argmax = -1
bestType = -1
for j in range(len(e)):
t = self.tProbability(f, e)
for h in range(len(self.typeIndex)):
s = self.sProbability(f[i], e[j], h)
if t * s > max_ts:
max_ts = t * s
argmax = j
bestType = h
sentenceAlignment.append(
(i + 1, argmax + 1, self.typeList[bestType]))
return sentenceAlignment
def trainStage1(self, dataset, iterations=5):
self.logger.info("Stage 1 Start Training with POS Tags")
self.logger.info("Initialising model with POS Tags")
# self.index set to 1 means training with POS Tag
self.index = 1
self.initialiseBiwordCount(dataset, self.index)
self.sTag = self.calculateS(dataset, self.fe_count, self.index)
self.logger.info("Initialisation complete")
self.EM(dataset, iterations, 'IBM1TypeS1')
# reset self.index to 0
self.index = 0
self.logger.info("Stage 1 Complete")
return
def trainStage2(self, dataset, iterations=5):
self.logger.info("Stage 2 Start Training with FORM")
self.logger.info("Initialising model with FORM")
self.initialiseBiwordCount(dataset, self.index)
self.s = self.calculateS(dataset, self.fe_count, self.index)
self.logger.info("Initialisation complete")
self.EM(dataset, iterations, 'IBM1TypeS2')
self.logger.info("Stage 2 Complete")
return
def train(self, dataset, iterations=5):
self.logger.info("Initialising Alignment Type Distribution")
self.initialiseAlignTypeDist(dataset, self.loadTypeDist)
self.trainStage1(dataset, iterations)
self.trainStage2(dataset, iterations)
return
|
mit
| -7,192,015,633,586,471,000
| 37.422535
| 79
| 0.559751
| false
| 3.353411
| false
| false
| false
|
SMMAR11/smmarbsence
|
app/forms/admin.py
|
1
|
4838
|
# coding: utf-8
# Imports
from app.models import *
from django import forms
from django.contrib.admin.widgets import FilteredSelectMultiple
class FGroupeUtilisateur(forms.ModelForm) :
# Champ
util = forms.ModelMultipleChoiceField(
label = 'Utilisateurs composant le groupe',
queryset = TUtilisateur.objects.order_by('username'),
required = False,
widget = FilteredSelectMultiple('T_UTILISATEUR', is_stacked = False)
)
class Meta :
fields = '__all__'
model = TGroupeUtilisateur
def __init__(self, *args, **kwargs) :
super(FGroupeUtilisateur, self).__init__(*args, **kwargs)
# Définition de la valeur initiale du champ personnalisé
if self.instance.get_pk() :
self.fields['util'].initial = [u.get_pk() for u in self.instance.get_util_set().all()]
def save(self, *args, **kwargs) :
# Création/modification d'une instance TGroupeUtilisateur
obj = super(FGroupeUtilisateur, self).save(*args, **kwargs)
obj.save()
# Liaison avec la table t_groupes_utilisateur
obj.get_gpe_util_set().all().delete()
for u in self.cleaned_data.get('util') : TGroupesUtilisateur.objects.create(id_gpe_util = obj, id_util = u)
return obj
class FUtilisateurCreate(forms.ModelForm) :
# Champs
zs_password = forms.CharField(label = 'Mot de passe', widget = forms.PasswordInput())
zs_password_bis = forms.CharField(label = 'Confirmation du mot de passe', widget = forms.PasswordInput())
class Meta :
fields = [
'email',
'first_name',
'is_active',
'is_staff',
'is_superuser',
'last_name',
'username'
]
labels = { 'email' : 'Courriel principal', 'last_name' : 'Nom de famille' }
model = TUtilisateur
def __init__(self, *args, **kwargs) :
# Initialisation des arguments
self.kw_test = kwargs.pop('kw_test', False)
super(FUtilisateurCreate, self).__init__(*args, **kwargs)
# Passage de certains champs à l'état requis
self.fields['email'].required = True
self.fields['first_name'].required = True
self.fields['last_name'].required = True
def clean_zs_password_bis(self) :
# Stockage des données du formulaire
val_password = self.cleaned_data.get('zs_password')
val_password_bis = self.cleaned_data.get('zs_password_bis')
# Renvoi d'une erreur si non-similitude des mots de passe
if val_password and val_password_bis and val_password != val_password_bis :
raise forms.ValidationError('Les mots de passe saisis ne correspondent pas.')
def save(self, *args, **kwargs) :
# Création d'une instance TUtilisateur
obj = super(FUtilisateurCreate, self).save(*args, **kwargs)
obj.set_password(self.cleaned_data.get('zs_password'))
obj.save()
# Liaison obligatoire avec la table t_roles_utilisateur
if 'A' not in obj.get_type_util__list() :
TRolesUtilisateur.objects.create(code_type_util = TTypeUtilisateur.objects.get(pk = 'A'), id_util = obj)
return obj
class FUtilisateurUpdate(forms.ModelForm) :
# Import
from django.contrib.auth.forms import ReadOnlyPasswordHashField
# Champ
password = ReadOnlyPasswordHashField(
help_text = '''
Les mots de passe ne sont pas enregistrés en clair, ce qui ne permet pas d'afficher le mot de passe de cet
utilisateur, mais il est possible de le changer en utilisant <a href="../password/">ce formulaire</a>.
''',
label = 'Mot de passe'
)
class Meta :
fields = [
'email',
'first_name',
'is_active',
'is_staff',
'is_superuser',
'last_name',
'username'
]
labels = { 'email' : 'Courriel principal', 'last_name' : 'Nom de famille' }
model = TUtilisateur
def __init__(self, *args, **kwargs) :
super(FUtilisateurUpdate, self).__init__(*args, **kwargs)
# Passage de certains champs à l'état requis
self.fields['email'].required = True
self.fields['first_name'].required = True
self.fields['last_name'].required = True
def clean_password(self) : return self.initial['password']
def save(self, *args, **kwargs) :
# Modification d'une instance TUtilisateur
obj = super(FUtilisateurUpdate, self).save(*args, **kwargs).save()
# Liaison obligatoire avec la table t_roles_utilisateur
if 'A' not in obj.get_type_util__list() :
TRolesUtilisateur.objects.create(code_type_util = TTypeUtilisateur.objects.get(pk = 'A'), id_util = obj)
return obj
|
gpl-3.0
| 1,337,630,901,324,975,600
| 33.248227
| 116
| 0.612676
| false
| 3.428977
| false
| false
| false
|
Gram21/ctfcode
|
pwnit.py
|
1
|
4547
|
#!/usr/bin/env python2.7
# -*- coding: utf-8 -*-
# Python exploit template.
#
# Author: Jan Keim aka Gram21, Gramarye
from pwn import *
####################################
# Target System #
####################################
# Server Connection
target = "localhost"
port = 1337
# Process Connection
#binary = "./binary"
# Context: i386/amd64/... and linux/freebsd/windows
context.update(arch='i386', os='linux')
####################################
# Settings #
####################################
# Set the context level to debug:
DEBUG = False
# Set if recv should automatically print
PRINTER = True
# Std timeout for the connection
TIMEOUT = 2
# Std print color. None means no extra color
STD_COL = None
####################################
# Colors #
####################################
class col:
BLACK = '30'
RED = '31'
GREEN = '32'
BROWN = '33'
YELLOW = '33'
BLUE = '34'
MAGENTA = '35'
CYAN = '36'
WHITE = '37'
CLEAR = '0'
UNDERLINE = '4'
BOLD = '1'
ESCAPE_START = '\033['
ESCAPE_END = 'm'
####################################
# print methods #
####################################
"""method to print a string more pretty"""
def prettyprint(s, color=STD_COL):
if color == None:
print s
else:
# TODO differentiate between printable and "hex"?
coloring = col.ESCAPE_START + color + col.ESCAPE_END
clear = col.ESCAPE_START + col.CLEAR + col.ESCAPE_END
print coloring + s + clear
def print_good(s):
prettyprint(s, color=col.GREEN)
def print_bad(s):
prettyprint(s, color=col.RED)
def print_info(s):
prettyprint(s, color=col.YELLOW)
def print_bold(s):
prettyprint(s, color=col.BOLD)
def print_underline(s):
prettyprint(s, color=col.UNDERLINE)
####################################
# convenience wrappers #
####################################
def send(s=""):
r.send(s)
"""send with a newline at the end"""
def sendline(s=""):
r.sendline(s)
"""recvuntil then send"""
def sendafter(delim, data, shallprint=PRINTER, color=STD_COL):
tmp = r.sendafter(delim, data)
if shallprint:
prettyprint(tmp, color)
return tmp
"""recvuntil then sendline"""
def sendlineafter(delim, data, shallprint=PRINTER, color=STD_COL):
tmp = r.sendlineafter(delim, data)
if shallprint:
prettyprint(tmp, color)
return tmp
"""sendline and then recvuntil"""
def sendlinethen(delim, data, shallprint=PRINTER, color=STD_COL):
tmp = r.sendlinethen(delim, data)
if shallprint:
prettyprint(tmp, color)
return tmp
"""send and then recvuntil"""
def sendthen(delim, data, shallprint=PRINTER, color=STD_COL):
tmp = r.sendthen(delim, data)
if shallprint:
prettyprint(tmp, color)
return tmp
def recv(shallprint=PRINTER, color=STD_COL):
tmp = r.recv()
if shallprint:
prettyprint(tmp, color)
return tmp
"""recv until a newline is found"""
def recvline(shallprint=PRINTER, color=STD_COL):
tmp = r.recvline()
if shallprint:
prettyprint(tmp, color)
return tmp
"""recv until s appeared. drop s if drop=true"""
def recvuntil(s, shallprint=PRINTER, drop=False, color=STD_COL):
tmp = r.recvuntil(s,drop)
if shallprint:
prettyprint(tmp, color)
return tmp
"""recv n bytes"""
def recvn(n, shallprint=PRINTER, color=STD_COL):
tmp = r.recvn(n)
if shallprint:
prettyprint(tmp, color)
return tmp
"""recv until regex is found"""
def recvregex(regex, shallprint=PRINTER, exact=False, color=STD_COL):
tmp = r.recvregex(regex, exact)
if shallprint:
prettyprint(tmp, color)
return tmp
####################################
# PWN #
####################################
if DEBUG:
context.log_level = 'debug'
# Connect to target
r = remote(target, port, timeout=TIMEOUT)
# Connect to process
#r = process(binary)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Your code here
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Things that can be used:
# sleep(1)
# pause()/pause(n) -> waits for user input or n seconds
# cyclic(100), cyclic_find("aaaa")
# p32(0xdeadbeef), u32(s), p32(0xdeadbeef, endian='big') etc.
# asm(shellcraft.sh()) or similar
def pwn():
pass
# start the pwn
if __name__ == "__main__":
pause() # requires user input to start (e.g. waiting for server etc)
pwn()
# When there is a shell
# r.interactive()
|
mit
| -3,380,402,577,287,018,500
| 23.058201
| 72
| 0.553992
| false
| 3.489639
| false
| false
| false
|
informatik-mannheim/Moduro-CC3D
|
Simulation/Steppable/ModuroSteppable.py
|
1
|
1244
|
# Copyright 2016 the original author or authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = "Markus Gumbel"
__copyright__ = "The authors"
__license__ = "Apache 2"
__email__ = "m.gumbel@hs-mannheim.de"
__status__ = "Production"
from PySteppables import SteppableBasePy
class ModuroSteppable(SteppableBasePy):
def __init__(self, simulator, model, _frequency=1):
SteppableBasePy.__init__(self, simulator, _frequency)
self.model = model
self.execConfig = model.execConfig
def step(self, mcs):
if not self.execConfig.interuptMCS(mcs):
self.moduroStep(mcs) # better: not MCS but time!
# Abstract method:
def moduroStep(self, mcs):
return None
|
apache-2.0
| -3,116,914,015,083,291,600
| 34.542857
| 76
| 0.694534
| false
| 3.585014
| false
| false
| false
|
jkoelker/investing
|
picloud/magicformula/predict.py
|
1
|
3948
|
#!/usr/bin/env python
import argparse
import sys
import MySQLdb
import pandas as pd
import twitter
def publish_to_twitter(df, prefix='MF', api=None, **kwargs):
if api is None:
api = twitter.Api(**kwargs)
msg = ' '.join(['$%s' % s for s in df.T.index])
msg = '%s: %s' % (prefix, msg)
if len(msg) > 140:
return publish_to_twitter(df[:-1], prefix, api, **kwargs)
return api.PostUpdate(msg)
def rank_stocks(df):
roa_key = 'roa_ttm'
pe_key = 'trailing_pe'
roa_rank = 'return_rank'
pe_rank = 'pe_rank'
df[pe_rank] = df[pe_key].rank(method='min')
df[roa_rank] = df[roa_key].rank(method='min', ascending=0)
return df.sort_index(by=[pe_rank, roa_rank], ascending=[1, 1])
def get_stocks(db_kwargs):
qry = """
SELECT t.ticker, f.*, MAX(f.refresh_dt)
FROM fundamentals f, tickers t
WHERE f.ticker_id = t.id
AND f.refresh_dt BETWEEN DATE_SUB(NOW(), INTERVAL 1 WEEK) AND NOW()
AND t.sector NOT IN ('Financial', 'Utilities')
AND t.industry NOT IN ('Independent Oil & Gas',
'Major Integrated Oil & Gas',
'Oil & Gas Drilling & Exploration'
'Oil & Gas Equipment & Services',
'Oil & Gas Pipelines',
'Oil & Gas Refining & Marketing')
AND f.roa_ttm >= 0.25
AND f.trailing_pe >= 5
AND f.market_cap >= 30000000
GROUP BY f.ticker_id
"""
conn = MySQLdb.connect(**db_kwargs)
df = pd.read_sql(qry, conn, index_col='ticker')
conn.close()
return df
def predict(num_stocks, db_kwargs, twitter_kwargs):
stocks = get_stocks(db_kwargs)
rank = rank_stocks(stocks)
return publish_to_twitter(rank[:num_stocks].T, **twitter_kwargs)
def main():
parser = argparse.ArgumentParser(description='Run MagicFormula Prediction',
add_help=False)
parser.add_argument('-k', '--consumer-key',
required=True,
help='Twitter application consumer key')
parser.add_argument('-s', '--consumer-secret',
required=True,
help='Twitter application consumer secret')
parser.add_argument('-K', '--access-token-key',
required=True,
help='Twitter User access token key')
parser.add_argument('-S', '--access-token-secret',
required=True,
help='Twitter User access token secret')
parser.add_argument('-n', '--num_stocks',
default=15,
type=int,
help='Number of stocks to publish')
parser.add_argument('-h', '--host',
required=True,
help='MySQL host')
parser.add_argument('-u', '--user',
required=True,
help='MySQL User')
parser.add_argument('-p', '--password',
required=True,
help='MySQL password')
parser.add_argument('database',
help='Database to store tickers in')
parser.add_argument('--help',
action='help', default=argparse.SUPPRESS,
help='show this help message and exit')
args = parser.parse_args()
db_kwargs = {'host': args.host,
'user': args.user,
'passwd': args.password,
'db': args.database}
twitter_kwargs = {'consumer_key': args.consumer_key,
'consumer_secret': args.consumer_secret,
'access_token_key': args.access_token_key,
'access_token_secret': args.access_token_secret}
if predict(args.num_stocks, db_kwargs, twitter_kwargs):
return 0
return 1
if __name__ == '__main__':
sys.exit(main())
|
mit
| 5,124,949,464,170,065,000
| 32.457627
| 79
| 0.526342
| false
| 3.851707
| false
| false
| false
|
mucximilian/gimpmaps
|
gimprenderer/sketching/geometry.py
|
1
|
17067
|
'''
Created on Jun 11, 2015
@author: mucx
# TO DO:
- Adding classes:
- Point?
- Polygon
- Multipolygon support
'''
from __future__ import division
from abc import ABCMeta, abstractmethod
import math
import sys
class Geometry(object):
"""
An abstract class defining the base geometry object
"""
__metaclass__ = ABCMeta
@abstractmethod
def __init__(self):
"""
Constructor
"""
class Line(Geometry):
"""
An abstract class defining the connection between points
"""
__metaclass__ = ABCMeta
def __init__(self, coordinates):
"""
:param coordinates: A list of lne point coordinate tuples.
"""
# Check that line consists only of two points
if len(coordinates) > 2:
print coordinates
sys.exit("Too many points for simple line - interrupted.")
else:
self.coords = coordinates
def as_wkt(self, line):
"""
Returns an list of coordinate pair arrays in WKT notation.
"""
line_wkt = "LINESTRING ("
for p in self.coords:
line_wkt += str(p[0]) + " " + str(p[1]) + ", "
line_wkt = line_wkt[:-2] + ")"
return line_wkt
@abstractmethod
def length(self):
raise NotImplementedError
class LineSimple(Line):
"""
A class defining the straight connection between two points. The point
that is closer to the origin as the first point.
"""
def __init__(self, coordinates):
"""
:param coordinates: A list of coordinate tuples
"""
super(LineSimple, self).__init__(coordinates)
def length(self):
"""
Calculates the distance between the two line points using the
pythagorean theorem.
"""
d_x = math.fabs(self.coords[0][0] - self.coords[1][0])
d_y = math.fabs(self.coords[0][1] - self.coords[1][1])
l = math.sqrt(d_x**2 + d_y**2)
return l
def vector(self):
x = self.coords[1][0] - self.coords[0][0]
y = self.coords[1][1] - self.coords[0][1]
return (x, y)
def vector_orthogonal(self):
"""
Calculates an orthogonal vector to the line using the dot product.
Two vectors are orthogonal when their dot product is zero.
"""
v1 = self.vector()
v2 = None
try:
v2_y = -v1[0] / v1[1]
v2 = (1, v2_y)
except ZeroDivisionError:
v2 = (0, 1)
return v2
def get_delta(self):
"""
Returns the x or y distance between the two line points based on the
equation parameter (which determines the ascent of the line)
"""
delta = None
eq_params = self.get_line_equation_params()
if eq_params is not None:
delta = self.coords[0][0] - self.coords[1][0] # delta x
else:
delta = self.coords[0][1] - self.coords[1][1] # delta y
return delta
def get_line_equation_params(self):
"""
Identifies the line equation y = mx + b for a line which is determined
by two points.
:param line: Line class determining a line by two points (coordinate
tuple array)
"""
x1 = self.coords[0][0]
y1 = self.coords[0][1]
x2 = self.coords[1][0]
y2 = self.coords[1][1]
delta_x = x1 - x2
delta_y = y1 - y2
if (delta_x == 0):
return None # Vertical line
else:
m = (delta_y)/(delta_x)
b = y1 - m * x1
return [m,b]
def point_at_line_pos(self, p, reverse = False):
"""
Calculating the point at the position t * AB on the line from point A
to point B.
:param: Relative position between A and B (0 is at A, 0.5 middle, 1 is at B)
:param reverse: False, position between A and B, True between B and A.
Default is False
"""
a = self.coords[0]
b = self.coords[1]
p1 = None
p2 = None
if reverse:
p1 = b
p2 = a
else:
p1 = a
p2 = b
x = (1-p) * p1[0] + p * p2[0];
y = (1-p) * p1[1] + p * p2[1];
return (x,y)
def point_orthogonal(self, pos, d):
"""
Displaces a point P which is located on a line at a relative position d
between A and B orthogonally within a distance d.
.
:param pos: Relative position of the point between A and B (0...1)
:param d: Distance the point is displaced orthogonally
"""
p = self.point_at_line_pos(pos)
v = self.vector_orthogonal()
shift = [(p[0], p[1]), (v[0] + p[0], v[1] + p[1])]
shift_line = LineSimple(shift)
p_displaced = shift_line.point_shifted(d)
return p_displaced
def point_shifted(self, d):
"""
Computes the point that is on the straight line between A and B and
the distance d away from B.
:param line: Tuple of two coordinate pairs determining the line points.
"""
line_vector = self.vector()
length = self.length()
shift = tuple((d / length) * x for x in line_vector)
point_shifted = tuple(sum(t) for t in zip(self.coords[0], shift))
return point_shifted
def line_scale(self, d_abs = None, d_rel = None):
"""
Equally scaling (extending or shortening at both endpoints) the line
either with using a relative or absolute value. Returns the new
endpoints as a tuple.
:param d_abs: Scaling
:param d_rel:
"""
d = 0
if (d_abs is not None and d_rel is None):
d = d_abs
elif (d_rel is not None and d_abs is None):
d = d_rel * self.length()
else:
d = d_abs
print "Two d values provied for line scaling - absolute value used"
a_new = self.point_shifted(-d)
# Using reversed line coordinates
coords_reversed = self.coords[::-1]
line_reversed = LineSimple(coords_reversed)
b_new = line_reversed.point_shifted(-d)
return (a_new, b_new)
class LineString(Line):
def __init__(self, coordinates):
"""
:param coordinates: A list of coordinate tuples
"""
self.coords = coordinates
self.curve = None
def length(self):
length_total = 0
for i in range(1, len(self.coords)):
line = LineSimple([self.coords[i], self.coords[i - 1]])
length_total += line.length()
return length_total
def simple_bezier(self, t = 1.0):
"""
Returns a Bezier curve in SVG from a sequence of points and control
points in an array.
"""
def get_controlpoints(point_triple, t = 1.0):
"""
Given three consecutive points on a line (P0, P1, P2), this function
calculates the Bezier control points of P1 using the technique
explained by Rob Spencer.
Source: http://scaledinnovation.com/analytics/splines/aboutSplines.html
"""
x0 = point_triple[0][0]
y0 = point_triple[0][1]
x1 = point_triple[1][0]
y1 = point_triple[1][1]
x2 = point_triple[2][0]
y2 = point_triple[2][1]
d01 = math.sqrt(math.pow(x1 - x0, 2) + math.pow(y1 - y0, 2))
d12 = math.sqrt(math.pow(x2 - x1, 2) + math.pow(y2 - y1, 2))
fa = t * d01 / (d01 + d12) # scaling factor for triangle Ta
fb = t * d12 / (d01 + d12) # ditto for Tb, simplifies to fb=t-fa
p1x = x1 - fa * (x2 - x0) # x2-x0 is the width of triangle T
p1y = y1 - fa * (y2 - y0) # y2-y0 is the height of T
p2x = x1 + fb * (x2 - x0)
p2y = y1 + fb * (y2 - y0)
return [[p1x,p1y],[p2x,p2y]];
########################################################################
controlpoints = []
controlpoints.append([self.coords[0][0], self.coords[0][1]])
for i in range(1, len(self.coords)-1):
point_triple = [self.coords[i-1], self.coords[i], self.coords[i+1]]
cps_point = get_controlpoints(point_triple, t)
controlpoints.append([cps_point[0][0], cps_point[0][1]])
controlpoints.append([cps_point[1][0], cps_point[1][1]])
last = len(self.coords)-1
controlpoints.append([self.coords[last][0], self.coords[last][1]])
curve = self._get_curve(controlpoints)
self.curve = curve
return curve
def catmull_rom_bezier(self, t = 1.0):
"""
Returns a SVG Bezier curve of a line with the given points.
Source: http://schepers.cc/getting-to-the-point
Catmull-Rom to Cubic Bezier conversion matrix
0 1 0 0
-1/6 1 1/6 0
0 1/6 1 -1/6
0 0 1 0
"""
controlpoints = []
point_count = len(self.coords)
for i in range(0, point_count-1):
# Creating an array of relevant knot points
p = []
if ( 0 == i ):
p.append([self.coords[i][0], self.coords[i][1]])
p.append([self.coords[i][0], self.coords[i][1]])
p.append([self.coords[i+1][0], self.coords[i+1][1]])
p.append([self.coords[i+2][0], self.coords[i+2][1]])
elif (len(self.coords) - 2 == i ):
p.append([self.coords[i-1][0], self.coords[i-1][1]])
p.append([self.coords[i][0], self.coords[i][1]])
p.append([self.coords[i+1][0], self.coords[i+1][1]])
p.append([self.coords[i+1][0], self.coords[i+1][1]])
else:
p.append([self.coords[i-1][0], self.coords[i-1][1]])
p.append([self.coords[i][0], self.coords[i][1]])
p.append([self.coords[i+1][0], self.coords[i+1][1]])
p.append([self.coords[i+2][0], self.coords[i+2][1]])
# Calculating the bezier points from the knot points
bp = [];
# This assignment is for readability only
x0 = p[0][0]
y0 = p[0][1]
x1 = p[1][0]
y1= p[1][1]
x2 = p[2][0]
y2 = p[2][1]
x3 = p[3][0]
y3= p[3][1]
# Using the factor t as "tension control"
f = (1 / t) * 6
bp.append([x1, y1])
bp.append([
((-x0 + f*x1 + x2) / f),
((-y0 + f*y1 + y2) / f)
])
bp.append([
((x1 + f*x2 - x3) / f),
((y1 + f*y2 - y3) / f)
])
bp.append([x2, y2])
controlpoints.append([bp[1][0], bp[1][1]])
controlpoints.append([bp[2][0], bp[2][1]])
#print controlpoints
curve = self.get_curve(controlpoints)
self.curve = curve
return curve
def get_curve(self, cps):
"""
Creates a coordinate array of points and control points that can be
used as a SVG path.
:param cps: An array of control points coordinates.
"""
# Checking every linepoint after the start point for two control points
if (len(self.coords) - 1) != (len(cps) / 2):
print "coords: " + str(len(self.coords))
print "cps: " + str(len(cps))
sys.exit("Curve cannot be created - control point error:")
else:
# Adding first point
curve = [self.coords[0]]
# Adding remaining points
for i in range(0, len(self.coords) -1):
cp_pos = i * 2
curve.append(cps[cp_pos])
curve.append(cps[cp_pos + 1])
curve.append(self.coords[i + 1])
return curve
class Polygon(object):
"""
Classdocs
"""
def __init__(self, linearrings):
"""
:param coordinates: A list of coordinate tuples
"""
self.linearrings = linearrings
def disjoin(self, angle_disjoin = 120.0):
"""
Disjoins polygon linestrings into segments at vertices where the angle
between the lines from the vertex to the vertex behind and the vertex
to the vertex ahead exceeds a given threshold. Returns the calculated
line segments as an array.
:param polygon: Input geometry, array of lines (arrays of coordinates)
:param angle_disjoin: Threshold angle for disjoin in degree.
"""
def three_point_angle(points):
"""
Calculates the angle between the lines from a vertex to the vertex
behind and the vertex to the vertex ahead.
:param points: Coordinate array, containing three points
(vertex behind, vertex, vertex ahead)
"""
angle = 0
try:
p0 = points[0] # point_behind
p1 = points[1] # point_center
p2 = points[2] # point_ahead
a = (p1[0] - p0[0])**2 + (p1[1] - p0[1])**2
b = (p1[0] - p2[0])**2 + (p1[1] - p2[1])**2
c = (p2[0] - p0[0])**2 + (p2[1] - p0[1])**2
angle = math.acos((a + b - c) / math.sqrt(4 * a * b)) * 180/math.pi
"""
# Determine whether the edges are convex or concave
v1 = LineSimple([p0, p1]).vector()
v2 = LineSimple([p1, p2]).vector()
det = v1[0]*v2[1] - v2[0]*v1[1] # det is negative if concave
if det < 0:
angle = 360 - angle
Nice but useless...
"""
except ZeroDivisionError:
print "Angle is zero...probably duplicate points"
return angle
########################################################################
outline_segments = []
# Get linearrings of multipolygons
for linearring in self.linearrings:
segment = []
segment.append(linearring[0])
# Iterate over all points of linearring
for i in range(1, len(linearring) -1):
points = []
points.append(linearring[i - 1])
points.append(linearring[i])
points.append(linearring[i + 1])
angle = three_point_angle(points)
# Check if duplicate points exist (due to coordinate rounding)
if (angle == 0):
# Skipping duplicate points
if linearring[i] == linearring[i + 1]:
continue
if linearring[i] == linearring[i - 1]:
continue
# Continue segment
if (angle > angle_disjoin):
segment.append(linearring[i])
# Finish segment and create new one
else:
segment.append(linearring[i])
outline_segments.append(segment)
segment = []
segment.append(linearring[i])
segment.append(linearring[0])
outline_segments.append(segment)
return outline_segments
|
gpl-2.0
| 3,766,000,800,359,044,000
| 30.089253
| 84
| 0.457315
| false
| 4.103631
| false
| false
| false
|
chrsrds/scikit-learn
|
examples/model_selection/plot_grid_search_digits.py
|
7
|
2706
|
"""
============================================================
Parameter estimation using grid search with cross-validation
============================================================
This examples shows how a classifier is optimized by cross-validation,
which is done using the :class:`sklearn.model_selection.GridSearchCV` object
on a development set that comprises only half of the available labeled data.
The performance of the selected hyper-parameters and trained model is
then measured on a dedicated evaluation set that was not used during
the model selection step.
More details on tools available for model selection can be found in the
sections on :ref:`cross_validation` and :ref:`grid_search`.
"""
from sklearn import datasets
from sklearn.model_selection import train_test_split
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import classification_report
from sklearn.svm import SVC
print(__doc__)
# Loading the Digits dataset
digits = datasets.load_digits()
# To apply an classifier on this data, we need to flatten the image, to
# turn the data in a (samples, feature) matrix:
n_samples = len(digits.images)
X = digits.images.reshape((n_samples, -1))
y = digits.target
# Split the dataset in two equal parts
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.5, random_state=0)
# Set the parameters by cross-validation
tuned_parameters = [{'kernel': ['rbf'], 'gamma': [1e-3, 1e-4],
'C': [1, 10, 100, 1000]},
{'kernel': ['linear'], 'C': [1, 10, 100, 1000]}]
scores = ['precision', 'recall']
for score in scores:
print("# Tuning hyper-parameters for %s" % score)
print()
clf = GridSearchCV(
SVC(), tuned_parameters, scoring='%s_macro' % score
)
clf.fit(X_train, y_train)
print("Best parameters set found on development set:")
print()
print(clf.best_params_)
print()
print("Grid scores on development set:")
print()
means = clf.cv_results_['mean_test_score']
stds = clf.cv_results_['std_test_score']
for mean, std, params in zip(means, stds, clf.cv_results_['params']):
print("%0.3f (+/-%0.03f) for %r"
% (mean, std * 2, params))
print()
print("Detailed classification report:")
print()
print("The model is trained on the full development set.")
print("The scores are computed on the full evaluation set.")
print()
y_true, y_pred = y_test, clf.predict(X_test)
print(classification_report(y_true, y_pred))
print()
# Note the problem is too easy: the hyperparameter plateau is too flat and the
# output model is the same for precision and recall with ties in quality.
|
bsd-3-clause
| 537,184,166,535,805,440
| 33.692308
| 78
| 0.656689
| false
| 3.816643
| true
| false
| false
|
TresysTechnology/setools
|
setoolsgui/rolemodel.py
|
1
|
2087
|
# Copyright 2016, Tresys Technology, LLC
#
# This file is part of SETools.
#
# SETools is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 2.1 of
# the License, or (at your option) any later version.
#
# SETools is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with SETools. If not, see
# <http://www.gnu.org/licenses/>.
#
from PyQt5.QtCore import Qt
from PyQt5.QtGui import QPalette, QTextCursor
from setools.policyrep.exception import MLSDisabled
from .details import DetailsPopup
from .models import SEToolsTableModel
def role_detail(parent, role):
"""
Create a dialog box for role details.
Parameters:
parent The parent Qt Widget
role The role
"""
detail = DetailsPopup(parent, "Role detail: {0}".format(role))
types = sorted(role.types())
detail.append_header("Types ({0}): ".format(len(types)))
for t in types:
detail.append(" {0}".format(t))
detail.show()
class RoleTableModel(SEToolsTableModel):
"""Table-based model for roles."""
headers = ["Name", "Types"]
def data(self, index, role):
# There are two roles here.
# The parameter, role, is the Qt role
# The below item is a role in the list.
if self.resultlist and index.isValid():
row = index.row()
col = index.column()
item = self.resultlist[row]
if role == Qt.DisplayRole:
if col == 0:
return str(item)
elif col == 1:
return ", ".join(sorted(str(t) for t in item.types()))
elif role == Qt.UserRole:
# get the whole object
return item
|
lgpl-2.1
| -2,426,512,159,329,464,300
| 28.814286
| 74
| 0.638237
| false
| 4.060311
| false
| false
| false
|
Weihonghao/ECM
|
Vpy34/lib/python3.5/site-packages/theano/gof/tests/test_toolbox.py
|
2
|
2301
|
from __future__ import absolute_import, print_function, division
from theano.gof.graph import Variable, Apply
from theano.gof.type import Type
from theano.gof.op import Op
from theano.gof.fg import FunctionGraph
from theano.gof.toolbox import NodeFinder
def as_variable(x):
assert isinstance(x, Variable)
return x
class MyType(Type):
def __init__(self, name):
self.name = name
def __str__(self):
return self.name
def __repr__(self):
return self.name
def __eq__(self, other):
return isinstance(other, MyType)
def MyVariable(name):
return Variable(MyType(name), None, None)
class MyOp(Op):
__props__ = ("nin", "name")
def __init__(self, nin, name):
self.nin = nin
self.name = name
def make_node(self, *inputs):
assert len(inputs) == self.nin
inputs = list(map(as_variable, inputs))
for input in inputs:
if not isinstance(input.type, MyType):
raise Exception("Error 1")
outputs = [MyType(self.name + "_R")()]
return Apply(self, inputs, outputs)
def __str__(self):
return self.name
sigmoid = MyOp(1, 'Sigmoid')
add = MyOp(2, 'Add')
dot = MyOp(2, 'Dot')
def inputs():
x = MyVariable('x')
y = MyVariable('y')
z = MyVariable('z')
return x, y, z
class TestNodeFinder:
def test_straightforward(self):
x, y, z = inputs()
e0 = dot(y, z)
e = add(add(sigmoid(x), sigmoid(sigmoid(z))), dot(add(x, y), e0))
g = FunctionGraph([x, y, z], [e], clone=False)
g.attach_feature(NodeFinder())
assert hasattr(g, 'get_nodes')
for type, num in ((add, 3), (sigmoid, 3), (dot, 2)):
if not len([t for t in g.get_nodes(type)]) == num:
raise Exception("Expected: %i times %s" % (num, type))
new_e0 = add(y, z)
assert e0.owner in g.get_nodes(dot)
assert new_e0.owner not in g.get_nodes(add)
g.replace(e0, new_e0)
assert e0.owner not in g.get_nodes(dot)
assert new_e0.owner in g.get_nodes(add)
for type, num in ((add, 4), (sigmoid, 3), (dot, 1)):
if not len([t for t in g.get_nodes(type)]) == num:
raise Exception("Expected: %i times %s" % (num, type))
|
agpl-3.0
| -5,614,573,700,113,669,000
| 25.147727
| 73
| 0.571491
| false
| 3.277778
| false
| false
| false
|
davidfischer/rpc4django
|
docs/conf.py
|
2
|
6764
|
# -*- coding: utf-8 -*-
#
# RPC4Django documentation build configuration file, created by
# sphinx-quickstart on Sat Apr 17 14:31:28 2010.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import sys
from datetime import datetime
BASE_DIR = os.path.abspath(
os.path.join(os.path.dirname(os.path.abspath(__file__)), "..")
)
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.append(BASE_DIR)
# Get __version__ without loading rpc4django module
ns = {}
version_path = os.path.join(BASE_DIR, "rpc4django/version.py")
with open(version_path, "r", encoding="utf-8") as version_file:
exec(version_file.read(), ns)
rpc4django_version = ns["__version__"]
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.txt'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'RPC4Django'
copyright = u'%d, the respective authors' %datetime.now().year
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = rpc4django_version
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'RPC4Djangodoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'RPC4Django.tex', u'RPC4Django Documentation',
u'David Fischer', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
|
bsd-3-clause
| -7,409,543,119,766,741,000
| 31.363636
| 80
| 0.713779
| false
| 3.708333
| true
| false
| false
|
c-square/homework
|
Licență/Anul III/CN/gauss/scripts/deploy.py
|
1
|
1156
|
#! /usr/bin/env python
""" Deploys a .pth file in site-packages for easy importing """
import distutils.sysconfig
import os
def deploy():
"""Deploy gauss"""
site = distutils.sysconfig.get_python_lib()
pth = os.path.join(site, 'gauss.pth')
if os.path.exists(pth):
print("[i] Module already exists!") # pylint: disable=C0325
else:
dirname = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
print dirname
try:
with open(pth, 'w') as stream:
stream.write(dirname)
except IOError:
# pylint: disable=C0325
print("[x] Please run this script with superuser privileges.")
return
print("[i] Testing module...") # pylint: disable=C0325
try:
import gauss # pylint: disable=W0612
except ImportError as exc:
# pylint: disable=C0325
print("Failed to deploy module! {}".format(exc))
else:
# pylint: disable=C0325
print("[i] Module was successfully installed!")
if __name__ == "__main__":
deploy()
|
mit
| 3,215,219,101,553,088,000
| 30.243243
| 77
| 0.553633
| false
| 4.173285
| false
| false
| false
|
jolyonb/edx-platform
|
lms/djangoapps/discussion/rest_api/tests/test_api.py
|
1
|
128040
|
"""
Tests for Discussion API internal interface
"""
import itertools
from datetime import datetime, timedelta
from urllib import urlencode
from urlparse import parse_qs, urlparse, urlunparse
import ddt
import httpretty
import mock
from django.core.exceptions import ValidationError
from django.test.client import RequestFactory
from opaque_keys.edx.locator import CourseLocator
from pytz import UTC
from rest_framework.exceptions import PermissionDenied
from common.test.utils import MockSignalHandlerMixin, disable_signal
from courseware.tests.factories import BetaTesterFactory, StaffFactory
from lms.djangoapps.discussion.rest_api import api
from lms.djangoapps.discussion.rest_api.api import (
create_comment,
create_thread,
delete_comment,
delete_thread,
get_comment_list,
get_course,
get_course_topics,
get_thread,
get_thread_list,
update_comment,
update_thread
)
from lms.djangoapps.discussion.rest_api.exceptions import (
CommentNotFoundError, DiscussionDisabledError, ThreadNotFoundError,
)
from lms.djangoapps.discussion.rest_api.tests.utils import (
CommentsServiceMockMixin,
make_minimal_cs_comment,
make_minimal_cs_thread,
make_paginated_api_response
)
from lms.djangoapps.discussion.django_comment_client.tests.utils import ForumsEnableMixin
from openedx.core.djangoapps.django_comment_common.models import (
FORUM_ROLE_ADMINISTRATOR,
FORUM_ROLE_COMMUNITY_TA,
FORUM_ROLE_MODERATOR,
FORUM_ROLE_STUDENT,
Role
)
from openedx.core.djangoapps.course_groups.models import CourseUserGroupPartitionGroup
from openedx.core.djangoapps.course_groups.tests.helpers import CohortFactory
from openedx.core.lib.exceptions import CourseNotFoundError, PageNotFoundError
from student.tests.factories import CourseEnrollmentFactory, UserFactory
from util.testing import UrlResetMixin
from xmodule.modulestore import ModuleStoreEnum
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase, SharedModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory
from xmodule.partitions.partitions import Group, UserPartition
def _remove_discussion_tab(course, user_id):
"""
Remove the discussion tab for the course.
user_id is passed to the modulestore as the editor of the xblock.
"""
course.tabs = [tab for tab in course.tabs if not tab.type == 'discussion']
modulestore().update_item(course, user_id)
def _discussion_disabled_course_for(user):
"""
Create and return a course with discussions disabled.
The user passed in will be enrolled in the course.
"""
course_with_disabled_forums = CourseFactory.create()
CourseEnrollmentFactory.create(user=user, course_id=course_with_disabled_forums.id)
_remove_discussion_tab(course_with_disabled_forums, user.id)
return course_with_disabled_forums
def _create_course_and_cohort_with_user_role(course_is_cohorted, user, role_name):
"""
Creates a course with the value of `course_is_cohorted`, plus `always_cohort_inline_discussions`
set to True (which is no longer the default value). Then 1) enrolls the user in that course,
2) creates a cohort that the user is placed in, and 3) adds the user to the given role.
Returns: a tuple of the created course and the created cohort
"""
cohort_course = CourseFactory.create(
cohort_config={"cohorted": course_is_cohorted, "always_cohort_inline_discussions": True}
)
CourseEnrollmentFactory.create(user=user, course_id=cohort_course.id)
cohort = CohortFactory.create(course_id=cohort_course.id, users=[user])
role = Role.objects.create(name=role_name, course_id=cohort_course.id)
role.users = [user]
return [cohort_course, cohort]
@mock.patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
class GetCourseTest(ForumsEnableMixin, UrlResetMixin, SharedModuleStoreTestCase):
"""Test for get_course"""
@classmethod
def setUpClass(cls):
super(GetCourseTest, cls).setUpClass()
cls.course = CourseFactory.create(org="x", course="y", run="z")
@mock.patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
def setUp(self):
super(GetCourseTest, self).setUp()
self.user = UserFactory.create()
CourseEnrollmentFactory.create(user=self.user, course_id=self.course.id)
self.request = RequestFactory().get("/dummy")
self.request.user = self.user
def test_nonexistent_course(self):
with self.assertRaises(CourseNotFoundError):
get_course(self.request, CourseLocator.from_string("non/existent/course"))
def test_not_enrolled(self):
unenrolled_user = UserFactory.create()
self.request.user = unenrolled_user
with self.assertRaises(CourseNotFoundError):
get_course(self.request, self.course.id)
def test_discussions_disabled(self):
with self.assertRaises(DiscussionDisabledError):
get_course(self.request, _discussion_disabled_course_for(self.user).id)
def test_basic(self):
self.assertEqual(
get_course(self.request, self.course.id),
{
"id": unicode(self.course.id),
"blackouts": [],
"thread_list_url": "http://testserver/api/discussion/v1/threads/?course_id=x%2Fy%2Fz",
"following_thread_list_url": (
"http://testserver/api/discussion/v1/threads/?course_id=x%2Fy%2Fz&following=True"
),
"topics_url": "http://testserver/api/discussion/v1/course_topics/x/y/z",
}
)
@ddt.ddt
@mock.patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
class GetCourseTestBlackouts(ForumsEnableMixin, UrlResetMixin, ModuleStoreTestCase):
"""
Tests of get_course for courses that have blackout dates.
"""
@mock.patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
def setUp(self):
super(GetCourseTestBlackouts, self).setUp()
self.course = CourseFactory.create(org="x", course="y", run="z")
self.user = UserFactory.create()
CourseEnrollmentFactory.create(user=self.user, course_id=self.course.id)
self.request = RequestFactory().get("/dummy")
self.request.user = self.user
def test_blackout(self):
# A variety of formats is accepted
self.course.discussion_blackouts = [
["2015-06-09T00:00:00Z", "6-10-15"],
[1433980800000, datetime(2015, 6, 12)],
]
modulestore().update_item(self.course, self.user.id)
result = get_course(self.request, self.course.id)
self.assertEqual(
result["blackouts"],
[
{"start": "2015-06-09T00:00:00+00:00", "end": "2015-06-10T00:00:00+00:00"},
{"start": "2015-06-11T00:00:00+00:00", "end": "2015-06-12T00:00:00+00:00"},
]
)
@ddt.data(None, "not a datetime", "2015", [])
def test_blackout_errors(self, bad_value):
self.course.discussion_blackouts = [
[bad_value, "2015-06-09T00:00:00Z"],
["2015-06-10T00:00:00Z", "2015-06-11T00:00:00Z"],
]
modulestore().update_item(self.course, self.user.id)
result = get_course(self.request, self.course.id)
self.assertEqual(result["blackouts"], [])
@mock.patch.dict("django.conf.settings.FEATURES", {"DISABLE_START_DATES": False})
@mock.patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
class GetCourseTopicsTest(ForumsEnableMixin, UrlResetMixin, ModuleStoreTestCase):
"""Test for get_course_topics"""
@mock.patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
def setUp(self):
super(GetCourseTopicsTest, self).setUp()
self.maxDiff = None # pylint: disable=invalid-name
self.partition = UserPartition(
0,
"partition",
"Test Partition",
[Group(0, "Cohort A"), Group(1, "Cohort B")],
scheme_id="cohort"
)
self.course = CourseFactory.create(
org="x",
course="y",
run="z",
start=datetime.now(UTC),
discussion_topics={"Test Topic": {"id": "non-courseware-topic-id"}},
user_partitions=[self.partition],
cohort_config={"cohorted": True},
days_early_for_beta=3
)
self.user = UserFactory.create()
self.request = RequestFactory().get("/dummy")
self.request.user = self.user
CourseEnrollmentFactory.create(user=self.user, course_id=self.course.id)
def make_discussion_xblock(self, topic_id, category, subcategory, **kwargs):
"""
Build a discussion xblock in self.course.
"""
ItemFactory.create(
parent_location=self.course.location,
category="discussion",
discussion_id=topic_id,
discussion_category=category,
discussion_target=subcategory,
**kwargs
)
def get_thread_list_url(self, topic_id_list):
"""
Returns the URL for the thread_list_url field, given a list of topic_ids
"""
path = "http://testserver/api/discussion/v1/threads/"
query_list = [("course_id", unicode(self.course.id))] + [("topic_id", topic_id) for topic_id in topic_id_list]
return urlunparse(("", "", path, "", urlencode(query_list), ""))
def get_course_topics(self):
"""
Get course topics for self.course, using the given user or self.user if
not provided, and generating absolute URIs with a test scheme/host.
"""
return get_course_topics(self.request, self.course.id)
def make_expected_tree(self, topic_id, name, children=None):
"""
Build an expected result tree given a topic id, display name, and
children
"""
topic_id_list = [topic_id] if topic_id else [child["id"] for child in children]
children = children or []
node = {
"id": topic_id,
"name": name,
"children": children,
"thread_list_url": self.get_thread_list_url(topic_id_list)
}
return node
def test_nonexistent_course(self):
with self.assertRaises(CourseNotFoundError):
get_course_topics(self.request, CourseLocator.from_string("non/existent/course"))
def test_not_enrolled(self):
unenrolled_user = UserFactory.create()
self.request.user = unenrolled_user
with self.assertRaises(CourseNotFoundError):
self.get_course_topics()
def test_discussions_disabled(self):
_remove_discussion_tab(self.course, self.user.id)
with self.assertRaises(DiscussionDisabledError):
self.get_course_topics()
def test_without_courseware(self):
actual = self.get_course_topics()
expected = {
"courseware_topics": [],
"non_courseware_topics": [
self.make_expected_tree("non-courseware-topic-id", "Test Topic")
],
}
self.assertEqual(actual, expected)
def test_with_courseware(self):
self.make_discussion_xblock("courseware-topic-id", "Foo", "Bar")
actual = self.get_course_topics()
expected = {
"courseware_topics": [
self.make_expected_tree(
None,
"Foo",
[self.make_expected_tree("courseware-topic-id", "Bar")]
),
],
"non_courseware_topics": [
self.make_expected_tree("non-courseware-topic-id", "Test Topic")
],
}
self.assertEqual(actual, expected)
def test_many(self):
with self.store.bulk_operations(self.course.id, emit_signals=False):
self.course.discussion_topics = {
"A": {"id": "non-courseware-1"},
"B": {"id": "non-courseware-2"},
}
self.store.update_item(self.course, self.user.id)
self.make_discussion_xblock("courseware-1", "A", "1")
self.make_discussion_xblock("courseware-2", "A", "2")
self.make_discussion_xblock("courseware-3", "B", "1")
self.make_discussion_xblock("courseware-4", "B", "2")
self.make_discussion_xblock("courseware-5", "C", "1")
actual = self.get_course_topics()
expected = {
"courseware_topics": [
self.make_expected_tree(
None,
"A",
[
self.make_expected_tree("courseware-1", "1"),
self.make_expected_tree("courseware-2", "2"),
]
),
self.make_expected_tree(
None,
"B",
[
self.make_expected_tree("courseware-3", "1"),
self.make_expected_tree("courseware-4", "2"),
]
),
self.make_expected_tree(
None,
"C",
[self.make_expected_tree("courseware-5", "1")]
),
],
"non_courseware_topics": [
self.make_expected_tree("non-courseware-1", "A"),
self.make_expected_tree("non-courseware-2", "B"),
],
}
self.assertEqual(actual, expected)
def test_sort_key(self):
with self.store.bulk_operations(self.course.id, emit_signals=False):
self.course.discussion_topics = {
"W": {"id": "non-courseware-1", "sort_key": "Z"},
"X": {"id": "non-courseware-2"},
"Y": {"id": "non-courseware-3", "sort_key": "Y"},
"Z": {"id": "non-courseware-4", "sort_key": "W"},
}
self.store.update_item(self.course, self.user.id)
self.make_discussion_xblock("courseware-1", "First", "A", sort_key="D")
self.make_discussion_xblock("courseware-2", "First", "B", sort_key="B")
self.make_discussion_xblock("courseware-3", "First", "C", sort_key="E")
self.make_discussion_xblock("courseware-4", "Second", "A", sort_key="F")
self.make_discussion_xblock("courseware-5", "Second", "B", sort_key="G")
self.make_discussion_xblock("courseware-6", "Second", "C")
self.make_discussion_xblock("courseware-7", "Second", "D", sort_key="A")
actual = self.get_course_topics()
expected = {
"courseware_topics": [
self.make_expected_tree(
None,
"First",
[
self.make_expected_tree("courseware-2", "B"),
self.make_expected_tree("courseware-1", "A"),
self.make_expected_tree("courseware-3", "C"),
]
),
self.make_expected_tree(
None,
"Second",
[
self.make_expected_tree("courseware-7", "D"),
self.make_expected_tree("courseware-6", "C"),
self.make_expected_tree("courseware-4", "A"),
self.make_expected_tree("courseware-5", "B"),
]
),
],
"non_courseware_topics": [
self.make_expected_tree("non-courseware-4", "Z"),
self.make_expected_tree("non-courseware-2", "X"),
self.make_expected_tree("non-courseware-3", "Y"),
self.make_expected_tree("non-courseware-1", "W"),
],
}
self.assertEqual(actual, expected)
def test_access_control(self):
"""
Test that only topics that a user has access to are returned. The
ways in which a user may not have access are:
* Module is visible to staff only
* Module has a start date in the future
* Module is accessible only to a group the user is not in
Also, there is a case that ensures that a category with no accessible
subcategories does not appear in the result.
"""
beta_tester = BetaTesterFactory.create(course_key=self.course.id)
CourseEnrollmentFactory.create(user=beta_tester, course_id=self.course.id)
staff = StaffFactory.create(course_key=self.course.id)
for user, group_idx in [(self.user, 0), (beta_tester, 1)]:
cohort = CohortFactory.create(
course_id=self.course.id,
name=self.partition.groups[group_idx].name,
users=[user]
)
CourseUserGroupPartitionGroup.objects.create(
course_user_group=cohort,
partition_id=self.partition.id,
group_id=self.partition.groups[group_idx].id
)
with self.store.bulk_operations(self.course.id, emit_signals=False):
self.make_discussion_xblock("courseware-1", "First", "Everybody")
self.make_discussion_xblock(
"courseware-2",
"First",
"Cohort A",
group_access={self.partition.id: [self.partition.groups[0].id]}
)
self.make_discussion_xblock(
"courseware-3",
"First",
"Cohort B",
group_access={self.partition.id: [self.partition.groups[1].id]}
)
self.make_discussion_xblock("courseware-4", "Second", "Staff Only", visible_to_staff_only=True)
self.make_discussion_xblock(
"courseware-5",
"Second",
"Future Start Date",
start=datetime.now(UTC) + timedelta(days=1)
)
student_actual = self.get_course_topics()
student_expected = {
"courseware_topics": [
self.make_expected_tree(
None,
"First",
[
self.make_expected_tree("courseware-2", "Cohort A"),
self.make_expected_tree("courseware-1", "Everybody"),
]
),
],
"non_courseware_topics": [
self.make_expected_tree("non-courseware-topic-id", "Test Topic"),
],
}
self.assertEqual(student_actual, student_expected)
self.request.user = beta_tester
beta_actual = self.get_course_topics()
beta_expected = {
"courseware_topics": [
self.make_expected_tree(
None,
"First",
[
self.make_expected_tree("courseware-3", "Cohort B"),
self.make_expected_tree("courseware-1", "Everybody"),
]
),
self.make_expected_tree(
None,
"Second",
[self.make_expected_tree("courseware-5", "Future Start Date")]
),
],
"non_courseware_topics": [
self.make_expected_tree("non-courseware-topic-id", "Test Topic"),
],
}
self.assertEqual(beta_actual, beta_expected)
self.request.user = staff
staff_actual = self.get_course_topics()
staff_expected = {
"courseware_topics": [
self.make_expected_tree(
None,
"First",
[
self.make_expected_tree("courseware-2", "Cohort A"),
self.make_expected_tree("courseware-3", "Cohort B"),
self.make_expected_tree("courseware-1", "Everybody"),
]
),
self.make_expected_tree(
None,
"Second",
[
self.make_expected_tree("courseware-5", "Future Start Date"),
self.make_expected_tree("courseware-4", "Staff Only"),
]
),
],
"non_courseware_topics": [
self.make_expected_tree("non-courseware-topic-id", "Test Topic"),
],
}
self.assertEqual(staff_actual, staff_expected)
def test_discussion_topic(self):
"""
Tests discussion topic details against a requested topic id
"""
topic_id_1 = "topic_id_1"
topic_id_2 = "topic_id_2"
self.make_discussion_xblock(topic_id_1, "test_category_1", "test_target_1")
self.make_discussion_xblock(topic_id_2, "test_category_2", "test_target_2")
actual = get_course_topics(self.request, self.course.id, {"topic_id_1", "topic_id_2"})
self.assertEqual(
actual,
{
"non_courseware_topics": [],
"courseware_topics": [
{
"children": [{
"children": [],
"id": "topic_id_1",
"thread_list_url": "http://testserver/api/discussion/v1/threads/?"
"course_id=x%2Fy%2Fz&topic_id=topic_id_1",
"name": "test_target_1"
}],
"id": None,
"thread_list_url": "http://testserver/api/discussion/v1/threads/?"
"course_id=x%2Fy%2Fz&topic_id=topic_id_1",
"name": "test_category_1"
},
{
"children":
[{
"children": [],
"id": "topic_id_2",
"thread_list_url": "http://testserver/api/discussion/v1/threads/?"
"course_id=x%2Fy%2Fz&topic_id=topic_id_2",
"name": "test_target_2"
}],
"id": None,
"thread_list_url": "http://testserver/api/discussion/v1/threads/?"
"course_id=x%2Fy%2Fz&topic_id=topic_id_2",
"name": "test_category_2"
}
]
}
)
@ddt.ddt
@mock.patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
class GetThreadListTest(ForumsEnableMixin, CommentsServiceMockMixin, UrlResetMixin, SharedModuleStoreTestCase):
"""Test for get_thread_list"""
@classmethod
def setUpClass(cls):
super(GetThreadListTest, cls).setUpClass()
cls.course = CourseFactory.create()
@mock.patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
def setUp(self):
super(GetThreadListTest, self).setUp()
httpretty.reset()
httpretty.enable()
self.addCleanup(httpretty.reset)
self.addCleanup(httpretty.disable)
self.maxDiff = None # pylint: disable=invalid-name
self.user = UserFactory.create()
self.register_get_user_response(self.user)
self.request = RequestFactory().get("/test_path")
self.request.user = self.user
CourseEnrollmentFactory.create(user=self.user, course_id=self.course.id)
self.author = UserFactory.create()
self.course.cohort_config = {"cohorted": False}
modulestore().update_item(self.course, ModuleStoreEnum.UserID.test)
self.cohort = CohortFactory.create(course_id=self.course.id)
def get_thread_list(
self,
threads,
page=1,
page_size=1,
num_pages=1,
course=None,
topic_id_list=None,
):
"""
Register the appropriate comments service response, then call
get_thread_list and return the result.
"""
course = course or self.course
self.register_get_threads_response(threads, page, num_pages)
ret = get_thread_list(self.request, course.id, page, page_size, topic_id_list)
return ret
def test_nonexistent_course(self):
with self.assertRaises(CourseNotFoundError):
get_thread_list(self.request, CourseLocator.from_string("non/existent/course"), 1, 1)
def test_not_enrolled(self):
self.request.user = UserFactory.create()
with self.assertRaises(CourseNotFoundError):
self.get_thread_list([])
def test_discussions_disabled(self):
with self.assertRaises(DiscussionDisabledError):
self.get_thread_list([], course=_discussion_disabled_course_for(self.user))
def test_empty(self):
self.assertEqual(
self.get_thread_list([], num_pages=0).data,
{
"pagination": {
"next": None,
"previous": None,
"num_pages": 0,
"count": 0
},
"results": [],
"text_search_rewrite": None,
}
)
def test_get_threads_by_topic_id(self):
self.get_thread_list([], topic_id_list=["topic_x", "topic_meow"])
self.assertEqual(urlparse(httpretty.last_request().path).path, "/api/v1/threads")
self.assert_last_query_params({
"user_id": [unicode(self.user.id)],
"course_id": [unicode(self.course.id)],
"sort_key": ["activity"],
"page": ["1"],
"per_page": ["1"],
"commentable_ids": ["topic_x,topic_meow"]
})
def test_basic_query_params(self):
self.get_thread_list([], page=6, page_size=14)
self.assert_last_query_params({
"user_id": [unicode(self.user.id)],
"course_id": [unicode(self.course.id)],
"sort_key": ["activity"],
"page": ["6"],
"per_page": ["14"],
})
def test_thread_content(self):
self.course.cohort_config = {"cohorted": True}
modulestore().update_item(self.course, ModuleStoreEnum.UserID.test)
source_threads = [
make_minimal_cs_thread({
"id": "test_thread_id_0",
"course_id": unicode(self.course.id),
"commentable_id": "topic_x",
"username": self.author.username,
"user_id": str(self.author.id),
"title": "Test Title",
"body": "Test body",
"votes": {"up_count": 4},
"comments_count": 5,
"unread_comments_count": 3,
"endorsed": True,
"read": True,
"created_at": "2015-04-28T00:00:00Z",
"updated_at": "2015-04-28T11:11:11Z",
}),
make_minimal_cs_thread({
"id": "test_thread_id_1",
"course_id": unicode(self.course.id),
"commentable_id": "topic_y",
"group_id": self.cohort.id,
"username": self.author.username,
"user_id": str(self.author.id),
"thread_type": "question",
"title": "Another Test Title",
"body": "More content",
"votes": {"up_count": 9},
"comments_count": 18,
"created_at": "2015-04-28T22:22:22Z",
"updated_at": "2015-04-28T00:33:33Z",
})
]
expected_threads = [
self.expected_thread_data({
"id": "test_thread_id_0",
"author": self.author.username,
"topic_id": "topic_x",
"vote_count": 4,
"comment_count": 6,
"unread_comment_count": 3,
"comment_list_url": "http://testserver/api/discussion/v1/comments/?thread_id=test_thread_id_0",
"editable_fields": ["abuse_flagged", "following", "read", "voted"],
"has_endorsed": True,
"read": True,
"created_at": "2015-04-28T00:00:00Z",
"updated_at": "2015-04-28T11:11:11Z",
}),
self.expected_thread_data({
"id": "test_thread_id_1",
"author": self.author.username,
"topic_id": "topic_y",
"group_id": self.cohort.id,
"group_name": self.cohort.name,
"type": "question",
"title": "Another Test Title",
"raw_body": "More content",
"rendered_body": "<p>More content</p>",
"vote_count": 9,
"comment_count": 19,
"created_at": "2015-04-28T22:22:22Z",
"updated_at": "2015-04-28T00:33:33Z",
"comment_list_url": None,
"endorsed_comment_list_url": (
"http://testserver/api/discussion/v1/comments/?thread_id=test_thread_id_1&endorsed=True"
),
"non_endorsed_comment_list_url": (
"http://testserver/api/discussion/v1/comments/?thread_id=test_thread_id_1&endorsed=False"
),
"editable_fields": ["abuse_flagged", "following", "read", "voted"],
}),
]
expected_result = make_paginated_api_response(
results=expected_threads, count=2, num_pages=1, next_link=None, previous_link=None
)
expected_result.update({"text_search_rewrite": None})
self.assertEqual(
self.get_thread_list(source_threads).data,
expected_result
)
@ddt.data(
*itertools.product(
[
FORUM_ROLE_ADMINISTRATOR,
FORUM_ROLE_MODERATOR,
FORUM_ROLE_COMMUNITY_TA,
FORUM_ROLE_STUDENT,
],
[True, False]
)
)
@ddt.unpack
def test_request_group(self, role_name, course_is_cohorted):
cohort_course = CourseFactory.create(cohort_config={"cohorted": course_is_cohorted})
CourseEnrollmentFactory.create(user=self.user, course_id=cohort_course.id)
CohortFactory.create(course_id=cohort_course.id, users=[self.user])
role = Role.objects.create(name=role_name, course_id=cohort_course.id)
role.users = [self.user]
self.get_thread_list([], course=cohort_course)
actual_has_group = "group_id" in httpretty.last_request().querystring
expected_has_group = (course_is_cohorted and role_name == FORUM_ROLE_STUDENT)
self.assertEqual(actual_has_group, expected_has_group)
def test_pagination(self):
# N.B. Empty thread list is not realistic but convenient for this test
expected_result = make_paginated_api_response(
results=[], count=0, num_pages=3, next_link="http://testserver/test_path?page=2", previous_link=None
)
expected_result.update({"text_search_rewrite": None})
self.assertEqual(
self.get_thread_list([], page=1, num_pages=3).data,
expected_result
)
expected_result = make_paginated_api_response(
results=[],
count=0,
num_pages=3,
next_link="http://testserver/test_path?page=3",
previous_link="http://testserver/test_path?page=1"
)
expected_result.update({"text_search_rewrite": None})
self.assertEqual(
self.get_thread_list([], page=2, num_pages=3).data,
expected_result
)
expected_result = make_paginated_api_response(
results=[], count=0, num_pages=3, next_link=None, previous_link="http://testserver/test_path?page=2"
)
expected_result.update({"text_search_rewrite": None})
self.assertEqual(
self.get_thread_list([], page=3, num_pages=3).data,
expected_result
)
# Test page past the last one
self.register_get_threads_response([], page=3, num_pages=3)
with self.assertRaises(PageNotFoundError):
get_thread_list(self.request, self.course.id, page=4, page_size=10)
@ddt.data(None, "rewritten search string")
def test_text_search(self, text_search_rewrite):
expected_result = make_paginated_api_response(
results=[], count=0, num_pages=0, next_link=None, previous_link=None
)
expected_result.update({"text_search_rewrite": text_search_rewrite})
self.register_get_threads_search_response([], text_search_rewrite, num_pages=0)
self.assertEqual(
get_thread_list(
self.request,
self.course.id,
page=1,
page_size=10,
text_search="test search string"
).data,
expected_result
)
self.assert_last_query_params({
"user_id": [unicode(self.user.id)],
"course_id": [unicode(self.course.id)],
"sort_key": ["activity"],
"page": ["1"],
"per_page": ["10"],
"text": ["test search string"],
})
def test_following(self):
self.register_subscribed_threads_response(self.user, [], page=1, num_pages=0)
result = get_thread_list(
self.request,
self.course.id,
page=1,
page_size=11,
following=True,
).data
expected_result = make_paginated_api_response(
results=[], count=0, num_pages=0, next_link=None, previous_link=None
)
expected_result.update({"text_search_rewrite": None})
self.assertEqual(
result,
expected_result
)
self.assertEqual(
urlparse(httpretty.last_request().path).path,
"/api/v1/users/{}/subscribed_threads".format(self.user.id)
)
self.assert_last_query_params({
"user_id": [unicode(self.user.id)],
"course_id": [unicode(self.course.id)],
"sort_key": ["activity"],
"page": ["1"],
"per_page": ["11"],
})
@ddt.data("unanswered", "unread")
def test_view_query(self, query):
self.register_get_threads_response([], page=1, num_pages=0)
result = get_thread_list(
self.request,
self.course.id,
page=1,
page_size=11,
view=query,
).data
expected_result = make_paginated_api_response(
results=[], count=0, num_pages=0, next_link=None, previous_link=None
)
expected_result.update({"text_search_rewrite": None})
self.assertEqual(
result,
expected_result
)
self.assertEqual(
urlparse(httpretty.last_request().path).path,
"/api/v1/threads"
)
self.assert_last_query_params({
"user_id": [unicode(self.user.id)],
"course_id": [unicode(self.course.id)],
"sort_key": ["activity"],
"page": ["1"],
"per_page": ["11"],
query: ["true"],
})
@ddt.data(
("last_activity_at", "activity"),
("comment_count", "comments"),
("vote_count", "votes")
)
@ddt.unpack
def test_order_by_query(self, http_query, cc_query):
"""
Tests the order_by parameter
Arguments:
http_query (str): Query string sent in the http request
cc_query (str): Query string used for the comments client service
"""
self.register_get_threads_response([], page=1, num_pages=0)
result = get_thread_list(
self.request,
self.course.id,
page=1,
page_size=11,
order_by=http_query,
).data
expected_result = make_paginated_api_response(
results=[], count=0, num_pages=0, next_link=None, previous_link=None
)
expected_result.update({"text_search_rewrite": None})
self.assertEqual(result, expected_result)
self.assertEqual(
urlparse(httpretty.last_request().path).path,
"/api/v1/threads"
)
self.assert_last_query_params({
"user_id": [unicode(self.user.id)],
"course_id": [unicode(self.course.id)],
"sort_key": [cc_query],
"page": ["1"],
"per_page": ["11"],
})
def test_order_direction(self):
"""
Only "desc" is supported for order. Also, since it is simply swallowed,
it isn't included in the params.
"""
self.register_get_threads_response([], page=1, num_pages=0)
result = get_thread_list(
self.request,
self.course.id,
page=1,
page_size=11,
order_direction="desc",
).data
expected_result = make_paginated_api_response(
results=[], count=0, num_pages=0, next_link=None, previous_link=None
)
expected_result.update({"text_search_rewrite": None})
self.assertEqual(result, expected_result)
self.assertEqual(
urlparse(httpretty.last_request().path).path,
"/api/v1/threads"
)
self.assert_last_query_params({
"user_id": [unicode(self.user.id)],
"course_id": [unicode(self.course.id)],
"sort_key": ["activity"],
"page": ["1"],
"per_page": ["11"],
})
def test_invalid_order_direction(self):
"""
Test with invalid order_direction (e.g. "asc")
"""
with self.assertRaises(ValidationError) as assertion:
self.register_get_threads_response([], page=1, num_pages=0)
get_thread_list( # pylint: disable=expression-not-assigned
self.request,
self.course.id,
page=1,
page_size=11,
order_direction="asc",
).data
self.assertIn("order_direction", assertion.exception.message_dict)
@ddt.ddt
@mock.patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
class GetCommentListTest(ForumsEnableMixin, CommentsServiceMockMixin, SharedModuleStoreTestCase):
"""Test for get_comment_list"""
@classmethod
def setUpClass(cls):
super(GetCommentListTest, cls).setUpClass()
cls.course = CourseFactory.create()
@mock.patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
def setUp(self):
super(GetCommentListTest, self).setUp()
httpretty.reset()
httpretty.enable()
self.addCleanup(httpretty.reset)
self.addCleanup(httpretty.disable)
self.maxDiff = None # pylint: disable=invalid-name
self.user = UserFactory.create()
self.register_get_user_response(self.user)
self.request = RequestFactory().get("/test_path")
self.request.user = self.user
CourseEnrollmentFactory.create(user=self.user, course_id=self.course.id)
self.author = UserFactory.create()
def make_minimal_cs_thread(self, overrides=None):
"""
Create a thread with the given overrides, plus the course_id if not
already in overrides.
"""
overrides = overrides.copy() if overrides else {}
overrides.setdefault("course_id", unicode(self.course.id))
return make_minimal_cs_thread(overrides)
def get_comment_list(self, thread, endorsed=None, page=1, page_size=1):
"""
Register the appropriate comments service response, then call
get_comment_list and return the result.
"""
self.register_get_thread_response(thread)
return get_comment_list(self.request, thread["id"], endorsed, page, page_size)
def test_nonexistent_thread(self):
thread_id = "nonexistent_thread"
self.register_get_thread_error_response(thread_id, 404)
with self.assertRaises(ThreadNotFoundError):
get_comment_list(self.request, thread_id, endorsed=False, page=1, page_size=1)
def test_nonexistent_course(self):
with self.assertRaises(CourseNotFoundError):
self.get_comment_list(self.make_minimal_cs_thread({"course_id": "non/existent/course"}))
def test_not_enrolled(self):
self.request.user = UserFactory.create()
with self.assertRaises(CourseNotFoundError):
self.get_comment_list(self.make_minimal_cs_thread())
def test_discussions_disabled(self):
disabled_course = _discussion_disabled_course_for(self.user)
with self.assertRaises(DiscussionDisabledError):
self.get_comment_list(
self.make_minimal_cs_thread(
overrides={"course_id": unicode(disabled_course.id)}
)
)
@ddt.data(
*itertools.product(
[
FORUM_ROLE_ADMINISTRATOR,
FORUM_ROLE_MODERATOR,
FORUM_ROLE_COMMUNITY_TA,
FORUM_ROLE_STUDENT,
],
[True, False],
[True, False],
["no_group", "match_group", "different_group"],
)
)
@ddt.unpack
def test_group_access(
self,
role_name,
course_is_cohorted,
topic_is_cohorted,
thread_group_state
):
cohort_course = CourseFactory.create(
discussion_topics={"Test Topic": {"id": "test_topic"}},
cohort_config={
"cohorted": course_is_cohorted,
"cohorted_discussions": ["test_topic"] if topic_is_cohorted else [],
}
)
CourseEnrollmentFactory.create(user=self.user, course_id=cohort_course.id)
cohort = CohortFactory.create(course_id=cohort_course.id, users=[self.user])
role = Role.objects.create(name=role_name, course_id=cohort_course.id)
role.users = [self.user]
thread = self.make_minimal_cs_thread({
"course_id": unicode(cohort_course.id),
"commentable_id": "test_topic",
"group_id": (
None if thread_group_state == "no_group" else
cohort.id if thread_group_state == "match_group" else
cohort.id + 1
),
})
expected_error = (
role_name == FORUM_ROLE_STUDENT and
course_is_cohorted and
topic_is_cohorted and
thread_group_state == "different_group"
)
try:
self.get_comment_list(thread)
self.assertFalse(expected_error)
except ThreadNotFoundError:
self.assertTrue(expected_error)
@ddt.data(True, False)
def test_discussion_endorsed(self, endorsed_value):
with self.assertRaises(ValidationError) as assertion:
self.get_comment_list(
self.make_minimal_cs_thread({"thread_type": "discussion"}),
endorsed=endorsed_value
)
self.assertEqual(
assertion.exception.message_dict,
{"endorsed": ["This field may not be specified for discussion threads."]}
)
def test_question_without_endorsed(self):
with self.assertRaises(ValidationError) as assertion:
self.get_comment_list(
self.make_minimal_cs_thread({"thread_type": "question"}),
endorsed=None
)
self.assertEqual(
assertion.exception.message_dict,
{"endorsed": ["This field is required for question threads."]}
)
def test_empty(self):
discussion_thread = self.make_minimal_cs_thread(
{"thread_type": "discussion", "children": [], "resp_total": 0}
)
self.assertEqual(
self.get_comment_list(discussion_thread).data,
make_paginated_api_response(results=[], count=0, num_pages=1, next_link=None, previous_link=None)
)
question_thread = self.make_minimal_cs_thread({
"thread_type": "question",
"endorsed_responses": [],
"non_endorsed_responses": [],
"non_endorsed_resp_total": 0
})
self.assertEqual(
self.get_comment_list(question_thread, endorsed=False).data,
make_paginated_api_response(results=[], count=0, num_pages=1, next_link=None, previous_link=None)
)
self.assertEqual(
self.get_comment_list(question_thread, endorsed=True).data,
make_paginated_api_response(results=[], count=0, num_pages=1, next_link=None, previous_link=None)
)
def test_basic_query_params(self):
self.get_comment_list(
self.make_minimal_cs_thread({
"children": [make_minimal_cs_comment({"username": self.user.username})],
"resp_total": 71
}),
page=6,
page_size=14
)
self.assert_query_params_equal(
httpretty.httpretty.latest_requests[-2],
{
"user_id": [str(self.user.id)],
"mark_as_read": ["False"],
"recursive": ["False"],
"resp_skip": ["70"],
"resp_limit": ["14"],
"with_responses": ["True"],
}
)
def test_discussion_content(self):
source_comments = [
{
"type": "comment",
"id": "test_comment_1",
"thread_id": "test_thread",
"user_id": str(self.author.id),
"username": self.author.username,
"anonymous": False,
"anonymous_to_peers": False,
"created_at": "2015-05-11T00:00:00Z",
"updated_at": "2015-05-11T11:11:11Z",
"body": "Test body",
"endorsed": False,
"abuse_flaggers": [],
"votes": {"up_count": 4},
"child_count": 0,
"children": [],
},
{
"type": "comment",
"id": "test_comment_2",
"thread_id": "test_thread",
"user_id": str(self.author.id),
"username": self.author.username,
"anonymous": True,
"anonymous_to_peers": False,
"created_at": "2015-05-11T22:22:22Z",
"updated_at": "2015-05-11T33:33:33Z",
"body": "More content",
"endorsed": False,
"abuse_flaggers": [str(self.user.id)],
"votes": {"up_count": 7},
"child_count": 0,
"children": [],
}
]
expected_comments = [
{
"id": "test_comment_1",
"thread_id": "test_thread",
"parent_id": None,
"author": self.author.username,
"author_label": None,
"created_at": "2015-05-11T00:00:00Z",
"updated_at": "2015-05-11T11:11:11Z",
"raw_body": "Test body",
"rendered_body": "<p>Test body</p>",
"endorsed": False,
"endorsed_by": None,
"endorsed_by_label": None,
"endorsed_at": None,
"abuse_flagged": False,
"voted": False,
"vote_count": 4,
"editable_fields": ["abuse_flagged", "voted"],
"child_count": 0,
"children": [],
},
{
"id": "test_comment_2",
"thread_id": "test_thread",
"parent_id": None,
"author": None,
"author_label": None,
"created_at": "2015-05-11T22:22:22Z",
"updated_at": "2015-05-11T33:33:33Z",
"raw_body": "More content",
"rendered_body": "<p>More content</p>",
"endorsed": False,
"endorsed_by": None,
"endorsed_by_label": None,
"endorsed_at": None,
"abuse_flagged": True,
"voted": False,
"vote_count": 7,
"editable_fields": ["abuse_flagged", "voted"],
"child_count": 0,
"children": [],
},
]
actual_comments = self.get_comment_list(
self.make_minimal_cs_thread({"children": source_comments})
).data["results"]
self.assertEqual(actual_comments, expected_comments)
def test_question_content(self):
thread = self.make_minimal_cs_thread({
"thread_type": "question",
"endorsed_responses": [make_minimal_cs_comment({"id": "endorsed_comment", "username": self.user.username})],
"non_endorsed_responses": [make_minimal_cs_comment({
"id": "non_endorsed_comment", "username": self.user.username
})],
"non_endorsed_resp_total": 1,
})
endorsed_actual = self.get_comment_list(thread, endorsed=True).data
self.assertEqual(endorsed_actual["results"][0]["id"], "endorsed_comment")
non_endorsed_actual = self.get_comment_list(thread, endorsed=False).data
self.assertEqual(non_endorsed_actual["results"][0]["id"], "non_endorsed_comment")
def test_endorsed_by_anonymity(self):
"""
Ensure thread anonymity is properly considered in serializing
endorsed_by.
"""
thread = self.make_minimal_cs_thread({
"anonymous": True,
"children": [
make_minimal_cs_comment({
"username": self.user.username,
"endorsement": {"user_id": str(self.author.id), "time": "2015-05-18T12:34:56Z"},
})
]
})
actual_comments = self.get_comment_list(thread).data["results"]
self.assertIsNone(actual_comments[0]["endorsed_by"])
@ddt.data(
("discussion", None, "children", "resp_total"),
("question", False, "non_endorsed_responses", "non_endorsed_resp_total"),
)
@ddt.unpack
def test_cs_pagination(self, thread_type, endorsed_arg, response_field, response_total_field):
"""
Test cases in which pagination is done by the comments service.
thread_type is the type of thread (question or discussion).
endorsed_arg is the value of the endorsed argument.
repsonse_field is the field in which responses are returned for the
given thread type.
response_total_field is the field in which the total number of responses
is returned for the given thread type.
"""
# N.B. The mismatch between the number of children and the listed total
# number of responses is unrealistic but convenient for this test
thread = self.make_minimal_cs_thread({
"thread_type": thread_type,
response_field: [make_minimal_cs_comment({"username": self.user.username})],
response_total_field: 5,
})
# Only page
actual = self.get_comment_list(thread, endorsed=endorsed_arg, page=1, page_size=5).data
self.assertIsNone(actual["pagination"]["next"])
self.assertIsNone(actual["pagination"]["previous"])
# First page of many
actual = self.get_comment_list(thread, endorsed=endorsed_arg, page=1, page_size=2).data
self.assertEqual(actual["pagination"]["next"], "http://testserver/test_path?page=2")
self.assertIsNone(actual["pagination"]["previous"])
# Middle page of many
actual = self.get_comment_list(thread, endorsed=endorsed_arg, page=2, page_size=2).data
self.assertEqual(actual["pagination"]["next"], "http://testserver/test_path?page=3")
self.assertEqual(actual["pagination"]["previous"], "http://testserver/test_path?page=1")
# Last page of many
actual = self.get_comment_list(thread, endorsed=endorsed_arg, page=3, page_size=2).data
self.assertIsNone(actual["pagination"]["next"])
self.assertEqual(actual["pagination"]["previous"], "http://testserver/test_path?page=2")
# Page past the end
thread = self.make_minimal_cs_thread({
"thread_type": thread_type,
response_field: [],
response_total_field: 5
})
with self.assertRaises(PageNotFoundError):
self.get_comment_list(thread, endorsed=endorsed_arg, page=2, page_size=5)
def test_question_endorsed_pagination(self):
thread = self.make_minimal_cs_thread({
"thread_type": "question",
"endorsed_responses": [make_minimal_cs_comment({
"id": "comment_{}".format(i),
"username": self.user.username
}) for i in range(10)]
})
def assert_page_correct(page, page_size, expected_start, expected_stop, expected_next, expected_prev):
"""
Check that requesting the given page/page_size returns the expected
output
"""
actual = self.get_comment_list(thread, endorsed=True, page=page, page_size=page_size).data
result_ids = [result["id"] for result in actual["results"]]
self.assertEqual(
result_ids,
["comment_{}".format(i) for i in range(expected_start, expected_stop)]
)
self.assertEqual(
actual["pagination"]["next"],
"http://testserver/test_path?page={}".format(expected_next) if expected_next else None
)
self.assertEqual(
actual["pagination"]["previous"],
"http://testserver/test_path?page={}".format(expected_prev) if expected_prev else None
)
# Only page
assert_page_correct(
page=1,
page_size=10,
expected_start=0,
expected_stop=10,
expected_next=None,
expected_prev=None
)
# First page of many
assert_page_correct(
page=1,
page_size=4,
expected_start=0,
expected_stop=4,
expected_next=2,
expected_prev=None
)
# Middle page of many
assert_page_correct(
page=2,
page_size=4,
expected_start=4,
expected_stop=8,
expected_next=3,
expected_prev=1
)
# Last page of many
assert_page_correct(
page=3,
page_size=4,
expected_start=8,
expected_stop=10,
expected_next=None,
expected_prev=2
)
# Page past the end
with self.assertRaises(PageNotFoundError):
self.get_comment_list(thread, endorsed=True, page=2, page_size=10)
@ddt.ddt
@disable_signal(api, 'thread_created')
@disable_signal(api, 'thread_voted')
@mock.patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
class CreateThreadTest(
ForumsEnableMixin,
CommentsServiceMockMixin,
UrlResetMixin,
SharedModuleStoreTestCase,
MockSignalHandlerMixin
):
"""Tests for create_thread"""
LONG_TITLE = (
'Lorem ipsum dolor sit amet, consectetuer adipiscing elit. '
'Aenean commodo ligula eget dolor. Aenean massa. Cum sociis '
'natoque penatibus et magnis dis parturient montes, nascetur '
'ridiculus mus. Donec quam felis, ultricies nec, '
'pellentesque eu, pretium quis, sem. Nulla consequat massa '
'quis enim. Donec pede justo, fringilla vel, aliquet nec, '
'vulputate eget, arcu. In enim justo, rhoncus ut, imperdiet '
'a, venenatis vitae, justo. Nullam dictum felis eu pede '
'mollis pretium. Integer tincidunt. Cras dapibus. Vivamus '
'elementum semper nisi. Aenean vulputate eleifend tellus. '
'Aenean leo ligula, porttitor eu, consequat vitae, eleifend '
'ac, enim. Aliquam lorem ante, dapibus in, viverra quis, '
'feugiat a, tellus. Phasellus viverra nulla ut metus varius '
'laoreet. Quisque rutrum. Aenean imperdiet. Etiam ultricies '
'nisi vel augue. Curabitur ullamcorper ultricies nisi. Nam '
'eget dui. Etiam rhoncus. Maecenas tempus, tellus eget '
'condimentum rhoncus, sem quam semper libero, sit amet '
'adipiscing sem neque sed ipsum. Nam quam nunc, blandit vel, '
'luctus pulvinar, hendrerit id, lorem. Maecenas nec odio et '
'ante tincidunt tempus. Donec vitae sapien ut libero '
'venenatis faucibus. Nullam quis ante. Etiam sit amet orci '
'eget eros faucibus tincidunt. Duis leo. Sed fringilla '
'mauris sit amet nibh. Donec sodales sagittis magna. Sed '
'consequat, leo eget bibendum sodales, augue velit cursus '
'nunc, quis gravida magna mi a libero. Fusce vulputate '
'eleifend sapien. Vestibulum purus quam, scelerisque ut, '
'mollis sed, nonummy id, metus. Nullam accumsan lorem in '
'dui. Cras ultricies mi eu turpis hendrerit fringilla. '
'Vestibulum ante ipsum primis in faucibus orci luctus et '
'ultrices posuere cubilia Curae; In ac dui quis mi '
'consectetuer lacinia. Nam pretium turpis et arcu. Duis arcu '
'tortor, suscipit eget, imperdiet nec, imperdiet iaculis, '
'ipsum. Sed aliquam ultrices mauris. Integer ante arcu, '
'accumsan a, consectetuer eget, posuere ut, mauris. Praesent '
'adipiscing. Phasellus ullamcorper ipsum rutrum nunc. Nunc '
'nonummy metus.'
)
@classmethod
def setUpClass(cls):
super(CreateThreadTest, cls).setUpClass()
cls.course = CourseFactory.create()
@mock.patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
def setUp(self):
super(CreateThreadTest, self).setUp()
httpretty.reset()
httpretty.enable()
self.addCleanup(httpretty.reset)
self.addCleanup(httpretty.disable)
self.user = UserFactory.create()
self.register_get_user_response(self.user)
self.request = RequestFactory().get("/test_path")
self.request.user = self.user
CourseEnrollmentFactory.create(user=self.user, course_id=self.course.id)
self.minimal_data = {
"course_id": unicode(self.course.id),
"topic_id": "test_topic",
"type": "discussion",
"title": "Test Title",
"raw_body": "Test body",
}
@mock.patch("eventtracking.tracker.emit")
def test_basic(self, mock_emit):
cs_thread = make_minimal_cs_thread({
"id": "test_id",
"username": self.user.username,
"read": True,
})
self.register_post_thread_response(cs_thread)
with self.assert_signal_sent(api, 'thread_created', sender=None, user=self.user, exclude_args=('post',)):
actual = create_thread(self.request, self.minimal_data)
expected = self.expected_thread_data({
"id": "test_id",
"course_id": unicode(self.course.id),
"comment_list_url": "http://testserver/api/discussion/v1/comments/?thread_id=test_id",
"read": True,
})
self.assertEqual(actual, expected)
self.assertEqual(
httpretty.last_request().parsed_body,
{
"course_id": [unicode(self.course.id)],
"commentable_id": ["test_topic"],
"thread_type": ["discussion"],
"title": ["Test Title"],
"body": ["Test body"],
"user_id": [str(self.user.id)],
}
)
event_name, event_data = mock_emit.call_args[0]
self.assertEqual(event_name, "edx.forum.thread.created")
self.assertEqual(
event_data,
{
"commentable_id": "test_topic",
"group_id": None,
"thread_type": "discussion",
"title": "Test Title",
"title_truncated": False,
"anonymous": False,
"anonymous_to_peers": False,
"options": {"followed": False},
"id": "test_id",
"truncated": False,
"body": "Test body",
"url": "",
"user_forums_roles": [FORUM_ROLE_STUDENT],
"user_course_roles": [],
}
)
@mock.patch("eventtracking.tracker.emit")
def test_title_truncation(self, mock_emit):
data = self.minimal_data.copy()
data['title'] = self.LONG_TITLE
cs_thread = make_minimal_cs_thread({
"id": "test_id",
"username": self.user.username,
"read": True,
})
self.register_post_thread_response(cs_thread)
with self.assert_signal_sent(api, 'thread_created', sender=None, user=self.user, exclude_args=('post',)):
create_thread(self.request, data)
event_name, event_data = mock_emit.call_args[0]
self.assertEqual(event_name, "edx.forum.thread.created")
self.assertEqual(
event_data,
{
"commentable_id": "test_topic",
"group_id": None,
"thread_type": "discussion",
"title": self.LONG_TITLE[:1000],
"title_truncated": True,
"anonymous": False,
"anonymous_to_peers": False,
"options": {"followed": False},
"id": "test_id",
"truncated": False,
"body": "Test body",
"url": "",
"user_forums_roles": [FORUM_ROLE_STUDENT],
"user_course_roles": [],
}
)
@ddt.data(
*itertools.product(
[
FORUM_ROLE_ADMINISTRATOR,
FORUM_ROLE_MODERATOR,
FORUM_ROLE_COMMUNITY_TA,
FORUM_ROLE_STUDENT,
],
[True, False],
[True, False],
["no_group_set", "group_is_none", "group_is_set"],
)
)
@ddt.unpack
def test_group_id(self, role_name, course_is_cohorted, topic_is_cohorted, data_group_state):
"""
Tests whether the user has permission to create a thread with certain
group_id values.
If there is no group, user cannot create a thread.
Else if group is None or set, and the course is not cohorted and/or the
role is a student, user can create a thread.
"""
cohort_course = CourseFactory.create(
discussion_topics={"Test Topic": {"id": "test_topic"}},
cohort_config={
"cohorted": course_is_cohorted,
"cohorted_discussions": ["test_topic"] if topic_is_cohorted else [],
}
)
CourseEnrollmentFactory.create(user=self.user, course_id=cohort_course.id)
if course_is_cohorted:
cohort = CohortFactory.create(course_id=cohort_course.id, users=[self.user])
role = Role.objects.create(name=role_name, course_id=cohort_course.id)
role.users = [self.user]
self.register_post_thread_response({"username": self.user.username})
data = self.minimal_data.copy()
data["course_id"] = unicode(cohort_course.id)
if data_group_state == "group_is_none":
data["group_id"] = None
elif data_group_state == "group_is_set":
if course_is_cohorted:
data["group_id"] = cohort.id + 1
else:
data["group_id"] = 1 # Set to any value since there is no cohort
expected_error = (
data_group_state in ["group_is_none", "group_is_set"] and
(not course_is_cohorted or role_name == FORUM_ROLE_STUDENT)
)
try:
create_thread(self.request, data)
self.assertFalse(expected_error)
actual_post_data = httpretty.last_request().parsed_body
if data_group_state == "group_is_set":
self.assertEqual(actual_post_data["group_id"], [str(data["group_id"])])
elif data_group_state == "no_group_set" and course_is_cohorted and topic_is_cohorted:
self.assertEqual(actual_post_data["group_id"], [str(cohort.id)])
else:
self.assertNotIn("group_id", actual_post_data)
except ValidationError as ex:
if not expected_error:
self.fail(u"Unexpected validation error: {}".format(ex))
def test_following(self):
self.register_post_thread_response({"id": "test_id", "username": self.user.username})
self.register_subscription_response(self.user)
data = self.minimal_data.copy()
data["following"] = "True"
result = create_thread(self.request, data)
self.assertEqual(result["following"], True)
cs_request = httpretty.last_request()
self.assertEqual(
urlparse(cs_request.path).path,
"/api/v1/users/{}/subscriptions".format(self.user.id)
)
self.assertEqual(cs_request.method, "POST")
self.assertEqual(
cs_request.parsed_body,
{"source_type": ["thread"], "source_id": ["test_id"]}
)
def test_voted(self):
self.register_post_thread_response({"id": "test_id", "username": self.user.username})
self.register_thread_votes_response("test_id")
data = self.minimal_data.copy()
data["voted"] = "True"
with self.assert_signal_sent(api, 'thread_voted', sender=None, user=self.user, exclude_args=('post',)):
result = create_thread(self.request, data)
self.assertEqual(result["voted"], True)
cs_request = httpretty.last_request()
self.assertEqual(urlparse(cs_request.path).path, "/api/v1/threads/test_id/votes")
self.assertEqual(cs_request.method, "PUT")
self.assertEqual(
cs_request.parsed_body,
{"user_id": [str(self.user.id)], "value": ["up"]}
)
def test_abuse_flagged(self):
self.register_post_thread_response({"id": "test_id", "username": self.user.username})
self.register_thread_flag_response("test_id")
data = self.minimal_data.copy()
data["abuse_flagged"] = "True"
result = create_thread(self.request, data)
self.assertEqual(result["abuse_flagged"], True)
cs_request = httpretty.last_request()
self.assertEqual(urlparse(cs_request.path).path, "/api/v1/threads/test_id/abuse_flag")
self.assertEqual(cs_request.method, "PUT")
self.assertEqual(cs_request.parsed_body, {"user_id": [str(self.user.id)]})
def test_course_id_missing(self):
with self.assertRaises(ValidationError) as assertion:
create_thread(self.request, {})
self.assertEqual(assertion.exception.message_dict, {"course_id": ["This field is required."]})
def test_course_id_invalid(self):
with self.assertRaises(ValidationError) as assertion:
create_thread(self.request, {"course_id": "invalid!"})
self.assertEqual(assertion.exception.message_dict, {"course_id": ["Invalid value."]})
def test_nonexistent_course(self):
with self.assertRaises(CourseNotFoundError):
create_thread(self.request, {"course_id": "non/existent/course"})
def test_not_enrolled(self):
self.request.user = UserFactory.create()
with self.assertRaises(CourseNotFoundError):
create_thread(self.request, self.minimal_data)
def test_discussions_disabled(self):
disabled_course = _discussion_disabled_course_for(self.user)
self.minimal_data["course_id"] = unicode(disabled_course.id)
with self.assertRaises(DiscussionDisabledError):
create_thread(self.request, self.minimal_data)
def test_invalid_field(self):
data = self.minimal_data.copy()
data["type"] = "invalid_type"
with self.assertRaises(ValidationError):
create_thread(self.request, data)
@ddt.ddt
@disable_signal(api, 'comment_created')
@disable_signal(api, 'comment_voted')
@mock.patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
class CreateCommentTest(
ForumsEnableMixin,
CommentsServiceMockMixin,
UrlResetMixin,
SharedModuleStoreTestCase,
MockSignalHandlerMixin
):
"""Tests for create_comment"""
@classmethod
def setUpClass(cls):
super(CreateCommentTest, cls).setUpClass()
cls.course = CourseFactory.create()
@mock.patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
def setUp(self):
super(CreateCommentTest, self).setUp()
httpretty.reset()
httpretty.enable()
self.addCleanup(httpretty.reset)
self.addCleanup(httpretty.disable)
self.user = UserFactory.create()
self.register_get_user_response(self.user)
self.request = RequestFactory().get("/test_path")
self.request.user = self.user
CourseEnrollmentFactory.create(user=self.user, course_id=self.course.id)
self.register_get_thread_response(
make_minimal_cs_thread({
"id": "test_thread",
"course_id": unicode(self.course.id),
"commentable_id": "test_topic",
})
)
self.minimal_data = {
"thread_id": "test_thread",
"raw_body": "Test body",
}
@ddt.data(None, "test_parent")
@mock.patch("eventtracking.tracker.emit")
def test_success(self, parent_id, mock_emit):
if parent_id:
self.register_get_comment_response({"id": parent_id, "thread_id": "test_thread"})
self.register_post_comment_response(
{
"id": "test_comment",
"username": self.user.username,
"created_at": "2015-05-27T00:00:00Z",
"updated_at": "2015-05-27T00:00:00Z",
},
thread_id="test_thread",
parent_id=parent_id
)
data = self.minimal_data.copy()
if parent_id:
data["parent_id"] = parent_id
with self.assert_signal_sent(api, 'comment_created', sender=None, user=self.user, exclude_args=('post',)):
actual = create_comment(self.request, data)
expected = {
"id": "test_comment",
"thread_id": "test_thread",
"parent_id": parent_id,
"author": self.user.username,
"author_label": None,
"created_at": "2015-05-27T00:00:00Z",
"updated_at": "2015-05-27T00:00:00Z",
"raw_body": "Test body",
"rendered_body": "<p>Test body</p>",
"endorsed": False,
"endorsed_by": None,
"endorsed_by_label": None,
"endorsed_at": None,
"abuse_flagged": False,
"voted": False,
"vote_count": 0,
"children": [],
"editable_fields": ["abuse_flagged", "raw_body", "voted"],
"child_count": 0,
}
self.assertEqual(actual, expected)
expected_url = (
"/api/v1/comments/{}".format(parent_id) if parent_id else
"/api/v1/threads/test_thread/comments"
)
self.assertEqual(
urlparse(httpretty.last_request().path).path,
expected_url
)
self.assertEqual(
httpretty.last_request().parsed_body,
{
"course_id": [unicode(self.course.id)],
"body": ["Test body"],
"user_id": [str(self.user.id)]
}
)
expected_event_name = (
"edx.forum.comment.created" if parent_id else
"edx.forum.response.created"
)
expected_event_data = {
"discussion": {"id": "test_thread"},
"commentable_id": "test_topic",
"options": {"followed": False},
"id": "test_comment",
"truncated": False,
"body": "Test body",
"url": "",
"user_forums_roles": [FORUM_ROLE_STUDENT],
"user_course_roles": [],
}
if parent_id:
expected_event_data["response"] = {"id": parent_id}
actual_event_name, actual_event_data = mock_emit.call_args[0]
self.assertEqual(actual_event_name, expected_event_name)
self.assertEqual(actual_event_data, expected_event_data)
@ddt.data(
*itertools.product(
[
FORUM_ROLE_ADMINISTRATOR,
FORUM_ROLE_MODERATOR,
FORUM_ROLE_COMMUNITY_TA,
FORUM_ROLE_STUDENT,
],
[True, False],
["question", "discussion"],
)
)
@ddt.unpack
def test_endorsed(self, role_name, is_thread_author, thread_type):
role = Role.objects.create(name=role_name, course_id=self.course.id)
role.users = [self.user]
self.register_get_thread_response(
make_minimal_cs_thread({
"id": "test_thread",
"course_id": unicode(self.course.id),
"thread_type": thread_type,
"user_id": str(self.user.id) if is_thread_author else str(self.user.id + 1),
})
)
self.register_post_comment_response({"username": self.user.username}, "test_thread")
data = self.minimal_data.copy()
data["endorsed"] = True
expected_error = (
role_name == FORUM_ROLE_STUDENT and
(not is_thread_author or thread_type == "discussion")
)
try:
create_comment(self.request, data)
self.assertEqual(httpretty.last_request().parsed_body["endorsed"], ["True"])
self.assertFalse(expected_error)
except ValidationError:
self.assertTrue(expected_error)
def test_voted(self):
self.register_post_comment_response({"id": "test_comment", "username": self.user.username}, "test_thread")
self.register_comment_votes_response("test_comment")
data = self.minimal_data.copy()
data["voted"] = "True"
with self.assert_signal_sent(api, 'comment_voted', sender=None, user=self.user, exclude_args=('post',)):
result = create_comment(self.request, data)
self.assertEqual(result["voted"], True)
cs_request = httpretty.last_request()
self.assertEqual(urlparse(cs_request.path).path, "/api/v1/comments/test_comment/votes")
self.assertEqual(cs_request.method, "PUT")
self.assertEqual(
cs_request.parsed_body,
{"user_id": [str(self.user.id)], "value": ["up"]}
)
def test_abuse_flagged(self):
self.register_post_comment_response({"id": "test_comment", "username": self.user.username}, "test_thread")
self.register_comment_flag_response("test_comment")
data = self.minimal_data.copy()
data["abuse_flagged"] = "True"
result = create_comment(self.request, data)
self.assertEqual(result["abuse_flagged"], True)
cs_request = httpretty.last_request()
self.assertEqual(urlparse(cs_request.path).path, "/api/v1/comments/test_comment/abuse_flag")
self.assertEqual(cs_request.method, "PUT")
self.assertEqual(cs_request.parsed_body, {"user_id": [str(self.user.id)]})
def test_thread_id_missing(self):
with self.assertRaises(ValidationError) as assertion:
create_comment(self.request, {})
self.assertEqual(assertion.exception.message_dict, {"thread_id": ["This field is required."]})
def test_thread_id_not_found(self):
self.register_get_thread_error_response("test_thread", 404)
with self.assertRaises(ThreadNotFoundError):
create_comment(self.request, self.minimal_data)
def test_nonexistent_course(self):
self.register_get_thread_response(
make_minimal_cs_thread({"id": "test_thread", "course_id": "non/existent/course"})
)
with self.assertRaises(CourseNotFoundError):
create_comment(self.request, self.minimal_data)
def test_not_enrolled(self):
self.request.user = UserFactory.create()
with self.assertRaises(CourseNotFoundError):
create_comment(self.request, self.minimal_data)
def test_discussions_disabled(self):
disabled_course = _discussion_disabled_course_for(self.user)
self.register_get_thread_response(
make_minimal_cs_thread({
"id": "test_thread",
"course_id": unicode(disabled_course.id),
"commentable_id": "test_topic",
})
)
with self.assertRaises(DiscussionDisabledError):
create_comment(self.request, self.minimal_data)
@ddt.data(
*itertools.product(
[
FORUM_ROLE_ADMINISTRATOR,
FORUM_ROLE_MODERATOR,
FORUM_ROLE_COMMUNITY_TA,
FORUM_ROLE_STUDENT,
],
[True, False],
["no_group", "match_group", "different_group"],
)
)
@ddt.unpack
def test_group_access(self, role_name, course_is_cohorted, thread_group_state):
cohort_course, cohort = _create_course_and_cohort_with_user_role(course_is_cohorted, self.user, role_name)
self.register_get_thread_response(make_minimal_cs_thread({
"id": "cohort_thread",
"course_id": unicode(cohort_course.id),
"group_id": (
None if thread_group_state == "no_group" else
cohort.id if thread_group_state == "match_group" else
cohort.id + 1
),
}))
self.register_post_comment_response({"username": self.user.username}, thread_id="cohort_thread")
data = self.minimal_data.copy()
data["thread_id"] = "cohort_thread"
expected_error = (
role_name == FORUM_ROLE_STUDENT and
course_is_cohorted and
thread_group_state == "different_group"
)
try:
create_comment(self.request, data)
self.assertFalse(expected_error)
except ThreadNotFoundError:
self.assertTrue(expected_error)
def test_invalid_field(self):
data = self.minimal_data.copy()
del data["raw_body"]
with self.assertRaises(ValidationError):
create_comment(self.request, data)
@ddt.ddt
@disable_signal(api, 'thread_edited')
@disable_signal(api, 'thread_voted')
@mock.patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
class UpdateThreadTest(
ForumsEnableMixin,
CommentsServiceMockMixin,
UrlResetMixin,
SharedModuleStoreTestCase,
MockSignalHandlerMixin
):
"""Tests for update_thread"""
@classmethod
def setUpClass(cls):
super(UpdateThreadTest, cls).setUpClass()
cls.course = CourseFactory.create()
@mock.patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
def setUp(self):
super(UpdateThreadTest, self).setUp()
httpretty.reset()
httpretty.enable()
self.addCleanup(httpretty.reset)
self.addCleanup(httpretty.disable)
self.user = UserFactory.create()
self.register_get_user_response(self.user)
self.request = RequestFactory().get("/test_path")
self.request.user = self.user
CourseEnrollmentFactory.create(user=self.user, course_id=self.course.id)
def register_thread(self, overrides=None):
"""
Make a thread with appropriate data overridden by the overrides
parameter and register mock responses for both GET and PUT on its
endpoint.
"""
cs_data = make_minimal_cs_thread({
"id": "test_thread",
"course_id": unicode(self.course.id),
"commentable_id": "original_topic",
"username": self.user.username,
"user_id": str(self.user.id),
"thread_type": "discussion",
"title": "Original Title",
"body": "Original body",
})
cs_data.update(overrides or {})
self.register_get_thread_response(cs_data)
self.register_put_thread_response(cs_data)
def test_empty(self):
"""Check that an empty update does not make any modifying requests."""
# Ensure that the default following value of False is not applied implicitly
self.register_get_user_response(self.user, subscribed_thread_ids=["test_thread"])
self.register_thread()
update_thread(self.request, "test_thread", {})
for request in httpretty.httpretty.latest_requests:
self.assertEqual(request.method, "GET")
def test_basic(self):
self.register_thread()
with self.assert_signal_sent(api, 'thread_edited', sender=None, user=self.user, exclude_args=('post',)):
actual = update_thread(self.request, "test_thread", {"raw_body": "Edited body"})
self.assertEqual(actual, self.expected_thread_data({
"raw_body": "Edited body",
"rendered_body": "<p>Edited body</p>",
"topic_id": "original_topic",
"read": True,
"title": "Original Title",
}))
self.assertEqual(
httpretty.last_request().parsed_body,
{
"course_id": [unicode(self.course.id)],
"commentable_id": ["original_topic"],
"thread_type": ["discussion"],
"title": ["Original Title"],
"body": ["Edited body"],
"user_id": [str(self.user.id)],
"anonymous": ["False"],
"anonymous_to_peers": ["False"],
"closed": ["False"],
"pinned": ["False"],
"read": ["False"],
}
)
def test_nonexistent_thread(self):
self.register_get_thread_error_response("test_thread", 404)
with self.assertRaises(ThreadNotFoundError):
update_thread(self.request, "test_thread", {})
def test_nonexistent_course(self):
self.register_thread({"course_id": "non/existent/course"})
with self.assertRaises(CourseNotFoundError):
update_thread(self.request, "test_thread", {})
def test_not_enrolled(self):
self.register_thread()
self.request.user = UserFactory.create()
with self.assertRaises(CourseNotFoundError):
update_thread(self.request, "test_thread", {})
def test_discussions_disabled(self):
disabled_course = _discussion_disabled_course_for(self.user)
self.register_thread(overrides={"course_id": unicode(disabled_course.id)})
with self.assertRaises(DiscussionDisabledError):
update_thread(self.request, "test_thread", {})
@ddt.data(
*itertools.product(
[
FORUM_ROLE_ADMINISTRATOR,
FORUM_ROLE_MODERATOR,
FORUM_ROLE_COMMUNITY_TA,
FORUM_ROLE_STUDENT,
],
[True, False],
["no_group", "match_group", "different_group"],
)
)
@ddt.unpack
def test_group_access(self, role_name, course_is_cohorted, thread_group_state):
cohort_course, cohort = _create_course_and_cohort_with_user_role(course_is_cohorted, self.user, role_name)
self.register_thread({
"course_id": unicode(cohort_course.id),
"group_id": (
None if thread_group_state == "no_group" else
cohort.id if thread_group_state == "match_group" else
cohort.id + 1
),
})
expected_error = (
role_name == FORUM_ROLE_STUDENT and
course_is_cohorted and
thread_group_state == "different_group"
)
try:
update_thread(self.request, "test_thread", {})
self.assertFalse(expected_error)
except ThreadNotFoundError:
self.assertTrue(expected_error)
@ddt.data(
FORUM_ROLE_ADMINISTRATOR,
FORUM_ROLE_MODERATOR,
FORUM_ROLE_COMMUNITY_TA,
FORUM_ROLE_STUDENT,
)
def test_author_only_fields(self, role_name):
role = Role.objects.create(name=role_name, course_id=self.course.id)
role.users = [self.user]
self.register_thread({"user_id": str(self.user.id + 1)})
data = {field: "edited" for field in ["topic_id", "title", "raw_body"]}
data["type"] = "question"
expected_error = role_name == FORUM_ROLE_STUDENT
try:
update_thread(self.request, "test_thread", data)
self.assertFalse(expected_error)
except ValidationError as err:
self.assertTrue(expected_error)
self.assertEqual(
err.message_dict,
{field: ["This field is not editable."] for field in data.keys()}
)
@ddt.data(*itertools.product([True, False], [True, False]))
@ddt.unpack
def test_following(self, old_following, new_following):
"""
Test attempts to edit the "following" field.
old_following indicates whether the thread should be followed at the
start of the test. new_following indicates the value for the "following"
field in the update. If old_following and new_following are the same, no
update should be made. Otherwise, a subscription should be POSTed or
DELETEd according to the new_following value.
"""
if old_following:
self.register_get_user_response(self.user, subscribed_thread_ids=["test_thread"])
self.register_subscription_response(self.user)
self.register_thread()
data = {"following": new_following}
result = update_thread(self.request, "test_thread", data)
self.assertEqual(result["following"], new_following)
last_request_path = urlparse(httpretty.last_request().path).path
subscription_url = "/api/v1/users/{}/subscriptions".format(self.user.id)
if old_following == new_following:
self.assertNotEqual(last_request_path, subscription_url)
else:
self.assertEqual(last_request_path, subscription_url)
self.assertEqual(
httpretty.last_request().method,
"POST" if new_following else "DELETE"
)
request_data = (
httpretty.last_request().parsed_body if new_following else
parse_qs(urlparse(httpretty.last_request().path).query)
)
request_data.pop("request_id", None)
self.assertEqual(
request_data,
{"source_type": ["thread"], "source_id": ["test_thread"]}
)
@ddt.data(*itertools.product([True, False], [True, False]))
@ddt.unpack
@mock.patch("eventtracking.tracker.emit")
def test_voted(self, current_vote_status, new_vote_status, mock_emit):
"""
Test attempts to edit the "voted" field.
current_vote_status indicates whether the thread should be upvoted at
the start of the test. new_vote_status indicates the value for the
"voted" field in the update. If current_vote_status and new_vote_status
are the same, no update should be made. Otherwise, a vote should be PUT
or DELETEd according to the new_vote_status value.
"""
if current_vote_status:
self.register_get_user_response(self.user, upvoted_ids=["test_thread"])
self.register_thread_votes_response("test_thread")
self.register_thread()
data = {"voted": new_vote_status}
result = update_thread(self.request, "test_thread", data)
self.assertEqual(result["voted"], new_vote_status)
last_request_path = urlparse(httpretty.last_request().path).path
votes_url = "/api/v1/threads/test_thread/votes"
if current_vote_status == new_vote_status:
self.assertNotEqual(last_request_path, votes_url)
else:
self.assertEqual(last_request_path, votes_url)
self.assertEqual(
httpretty.last_request().method,
"PUT" if new_vote_status else "DELETE"
)
actual_request_data = (
httpretty.last_request().parsed_body if new_vote_status else
parse_qs(urlparse(httpretty.last_request().path).query)
)
actual_request_data.pop("request_id", None)
expected_request_data = {"user_id": [str(self.user.id)]}
if new_vote_status:
expected_request_data["value"] = ["up"]
self.assertEqual(actual_request_data, expected_request_data)
event_name, event_data = mock_emit.call_args[0]
self.assertEqual(event_name, "edx.forum.thread.voted")
self.assertEqual(
event_data,
{
'undo_vote': not new_vote_status,
'url': '',
'target_username': self.user.username,
'vote_value': 'up',
'user_forums_roles': [FORUM_ROLE_STUDENT],
'user_course_roles': [],
'commentable_id': 'original_topic',
'id': 'test_thread'
}
)
@ddt.data(*itertools.product([True, False], [True, False], [True, False]))
@ddt.unpack
def test_vote_count(self, current_vote_status, first_vote, second_vote):
"""
Tests vote_count increases and decreases correctly from the same user
"""
#setup
starting_vote_count = 0
if current_vote_status:
self.register_get_user_response(self.user, upvoted_ids=["test_thread"])
starting_vote_count = 1
self.register_thread_votes_response("test_thread")
self.register_thread(overrides={"votes": {"up_count": starting_vote_count}})
#first vote
data = {"voted": first_vote}
result = update_thread(self.request, "test_thread", data)
self.register_thread(overrides={"voted": first_vote})
self.assertEqual(result["vote_count"], 1 if first_vote else 0)
#second vote
data = {"voted": second_vote}
result = update_thread(self.request, "test_thread", data)
self.assertEqual(result["vote_count"], 1 if second_vote else 0)
@ddt.data(*itertools.product([True, False], [True, False], [True, False], [True, False]))
@ddt.unpack
def test_vote_count_two_users(
self,
current_user1_vote,
current_user2_vote,
user1_vote,
user2_vote
):
"""
Tests vote_count increases and decreases correctly from different users
"""
#setup
user2 = UserFactory.create()
self.register_get_user_response(user2)
request2 = RequestFactory().get("/test_path")
request2.user = user2
CourseEnrollmentFactory.create(user=user2, course_id=self.course.id)
vote_count = 0
if current_user1_vote:
self.register_get_user_response(self.user, upvoted_ids=["test_thread"])
vote_count += 1
if current_user2_vote:
self.register_get_user_response(user2, upvoted_ids=["test_thread"])
vote_count += 1
for (current_vote, user_vote, request) in \
[(current_user1_vote, user1_vote, self.request),
(current_user2_vote, user2_vote, request2)]:
self.register_thread_votes_response("test_thread")
self.register_thread(overrides={"votes": {"up_count": vote_count}})
data = {"voted": user_vote}
result = update_thread(request, "test_thread", data)
if current_vote == user_vote:
self.assertEqual(result["vote_count"], vote_count)
elif user_vote:
vote_count += 1
self.assertEqual(result["vote_count"], vote_count)
self.register_get_user_response(self.user, upvoted_ids=["test_thread"])
else:
vote_count -= 1
self.assertEqual(result["vote_count"], vote_count)
self.register_get_user_response(self.user, upvoted_ids=[])
@ddt.data(*itertools.product([True, False], [True, False]))
@ddt.unpack
def test_abuse_flagged(self, old_flagged, new_flagged):
"""
Test attempts to edit the "abuse_flagged" field.
old_flagged indicates whether the thread should be flagged at the start
of the test. new_flagged indicates the value for the "abuse_flagged"
field in the update. If old_flagged and new_flagged are the same, no
update should be made. Otherwise, a PUT should be made to the flag or
or unflag endpoint according to the new_flagged value.
"""
self.register_get_user_response(self.user)
self.register_thread_flag_response("test_thread")
self.register_thread({"abuse_flaggers": [str(self.user.id)] if old_flagged else []})
data = {"abuse_flagged": new_flagged}
result = update_thread(self.request, "test_thread", data)
self.assertEqual(result["abuse_flagged"], new_flagged)
last_request_path = urlparse(httpretty.last_request().path).path
flag_url = "/api/v1/threads/test_thread/abuse_flag"
unflag_url = "/api/v1/threads/test_thread/abuse_unflag"
if old_flagged == new_flagged:
self.assertNotEqual(last_request_path, flag_url)
self.assertNotEqual(last_request_path, unflag_url)
else:
self.assertEqual(
last_request_path,
flag_url if new_flagged else unflag_url
)
self.assertEqual(httpretty.last_request().method, "PUT")
self.assertEqual(
httpretty.last_request().parsed_body,
{"user_id": [str(self.user.id)]}
)
def test_invalid_field(self):
self.register_thread()
with self.assertRaises(ValidationError) as assertion:
update_thread(self.request, "test_thread", {"raw_body": ""})
self.assertEqual(
assertion.exception.message_dict,
{"raw_body": ["This field may not be blank."]}
)
@ddt.ddt
@disable_signal(api, 'comment_edited')
@disable_signal(api, 'comment_voted')
@mock.patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
class UpdateCommentTest(
ForumsEnableMixin,
CommentsServiceMockMixin,
UrlResetMixin,
SharedModuleStoreTestCase,
MockSignalHandlerMixin
):
"""Tests for update_comment"""
@classmethod
def setUpClass(cls):
super(UpdateCommentTest, cls).setUpClass()
cls.course = CourseFactory.create()
@mock.patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
def setUp(self):
super(UpdateCommentTest, self).setUp()
httpretty.reset()
httpretty.enable()
self.addCleanup(httpretty.reset)
self.addCleanup(httpretty.disable)
self.user = UserFactory.create()
self.register_get_user_response(self.user)
self.request = RequestFactory().get("/test_path")
self.request.user = self.user
CourseEnrollmentFactory.create(user=self.user, course_id=self.course.id)
def register_comment(self, overrides=None, thread_overrides=None, course=None):
"""
Make a comment with appropriate data overridden by the overrides
parameter and register mock responses for both GET and PUT on its
endpoint. Also mock GET for the related thread with thread_overrides.
"""
if course is None:
course = self.course
cs_thread_data = make_minimal_cs_thread({
"id": "test_thread",
"course_id": unicode(course.id)
})
cs_thread_data.update(thread_overrides or {})
self.register_get_thread_response(cs_thread_data)
cs_comment_data = make_minimal_cs_comment({
"id": "test_comment",
"course_id": cs_thread_data["course_id"],
"thread_id": cs_thread_data["id"],
"username": self.user.username,
"user_id": str(self.user.id),
"created_at": "2015-06-03T00:00:00Z",
"updated_at": "2015-06-03T00:00:00Z",
"body": "Original body",
})
cs_comment_data.update(overrides or {})
self.register_get_comment_response(cs_comment_data)
self.register_put_comment_response(cs_comment_data)
def test_empty(self):
"""Check that an empty update does not make any modifying requests."""
self.register_comment()
update_comment(self.request, "test_comment", {})
for request in httpretty.httpretty.latest_requests:
self.assertEqual(request.method, "GET")
@ddt.data(None, "test_parent")
def test_basic(self, parent_id):
self.register_comment({"parent_id": parent_id})
with self.assert_signal_sent(api, 'comment_edited', sender=None, user=self.user, exclude_args=('post',)):
actual = update_comment(self.request, "test_comment", {"raw_body": "Edited body"})
expected = {
"id": "test_comment",
"thread_id": "test_thread",
"parent_id": parent_id,
"author": self.user.username,
"author_label": None,
"created_at": "2015-06-03T00:00:00Z",
"updated_at": "2015-06-03T00:00:00Z",
"raw_body": "Edited body",
"rendered_body": "<p>Edited body</p>",
"endorsed": False,
"endorsed_by": None,
"endorsed_by_label": None,
"endorsed_at": None,
"abuse_flagged": False,
"voted": False,
"vote_count": 0,
"children": [],
"editable_fields": ["abuse_flagged", "raw_body", "voted"],
"child_count": 0,
}
self.assertEqual(actual, expected)
self.assertEqual(
httpretty.last_request().parsed_body,
{
"body": ["Edited body"],
"course_id": [unicode(self.course.id)],
"user_id": [str(self.user.id)],
"anonymous": ["False"],
"anonymous_to_peers": ["False"],
"endorsed": ["False"],
}
)
def test_nonexistent_comment(self):
self.register_get_comment_error_response("test_comment", 404)
with self.assertRaises(CommentNotFoundError):
update_comment(self.request, "test_comment", {})
def test_nonexistent_course(self):
self.register_comment(thread_overrides={"course_id": "non/existent/course"})
with self.assertRaises(CourseNotFoundError):
update_comment(self.request, "test_comment", {})
def test_unenrolled(self):
self.register_comment()
self.request.user = UserFactory.create()
with self.assertRaises(CourseNotFoundError):
update_comment(self.request, "test_comment", {})
def test_discussions_disabled(self):
self.register_comment(course=_discussion_disabled_course_for(self.user))
with self.assertRaises(DiscussionDisabledError):
update_comment(self.request, "test_comment", {})
@ddt.data(
*itertools.product(
[
FORUM_ROLE_ADMINISTRATOR,
FORUM_ROLE_MODERATOR,
FORUM_ROLE_COMMUNITY_TA,
FORUM_ROLE_STUDENT,
],
[True, False],
["no_group", "match_group", "different_group"],
)
)
@ddt.unpack
def test_group_access(self, role_name, course_is_cohorted, thread_group_state):
cohort_course, cohort = _create_course_and_cohort_with_user_role(course_is_cohorted, self.user, role_name)
self.register_get_thread_response(make_minimal_cs_thread())
self.register_comment(
{"thread_id": "test_thread"},
thread_overrides={
"id": "test_thread",
"course_id": unicode(cohort_course.id),
"group_id": (
None if thread_group_state == "no_group" else
cohort.id if thread_group_state == "match_group" else
cohort.id + 1
),
}
)
expected_error = (
role_name == FORUM_ROLE_STUDENT and
course_is_cohorted and
thread_group_state == "different_group"
)
try:
update_comment(self.request, "test_comment", {})
self.assertFalse(expected_error)
except ThreadNotFoundError:
self.assertTrue(expected_error)
@ddt.data(*itertools.product(
[
FORUM_ROLE_ADMINISTRATOR,
FORUM_ROLE_MODERATOR,
FORUM_ROLE_COMMUNITY_TA,
FORUM_ROLE_STUDENT,
],
[True, False],
[True, False],
))
@ddt.unpack
def test_raw_body_access(self, role_name, is_thread_author, is_comment_author):
role = Role.objects.create(name=role_name, course_id=self.course.id)
role.users = [self.user]
self.register_comment(
{"user_id": str(self.user.id if is_comment_author else (self.user.id + 1))},
thread_overrides={
"user_id": str(self.user.id if is_thread_author else (self.user.id + 1))
}
)
expected_error = role_name == FORUM_ROLE_STUDENT and not is_comment_author
try:
update_comment(self.request, "test_comment", {"raw_body": "edited"})
self.assertFalse(expected_error)
except ValidationError as err:
self.assertTrue(expected_error)
self.assertEqual(
err.message_dict,
{"raw_body": ["This field is not editable."]}
)
@ddt.data(*itertools.product(
[
FORUM_ROLE_ADMINISTRATOR,
FORUM_ROLE_MODERATOR,
FORUM_ROLE_COMMUNITY_TA,
FORUM_ROLE_STUDENT,
],
[True, False],
["question", "discussion"],
[True, False],
))
@ddt.unpack
def test_endorsed_access(self, role_name, is_thread_author, thread_type, is_comment_author):
role = Role.objects.create(name=role_name, course_id=self.course.id)
role.users = [self.user]
self.register_comment(
{"user_id": str(self.user.id if is_comment_author else (self.user.id + 1))},
thread_overrides={
"thread_type": thread_type,
"user_id": str(self.user.id if is_thread_author else (self.user.id + 1)),
}
)
expected_error = (
role_name == FORUM_ROLE_STUDENT and
(thread_type == "discussion" or not is_thread_author)
)
try:
update_comment(self.request, "test_comment", {"endorsed": True})
self.assertFalse(expected_error)
except ValidationError as err:
self.assertTrue(expected_error)
self.assertEqual(
err.message_dict,
{"endorsed": ["This field is not editable."]}
)
@ddt.data(*itertools.product([True, False], [True, False]))
@ddt.unpack
@mock.patch("eventtracking.tracker.emit")
def test_voted(self, current_vote_status, new_vote_status, mock_emit):
"""
Test attempts to edit the "voted" field.
current_vote_status indicates whether the comment should be upvoted at
the start of the test. new_vote_status indicates the value for the
"voted" field in the update. If current_vote_status and new_vote_status
are the same, no update should be made. Otherwise, a vote should be PUT
or DELETEd according to the new_vote_status value.
"""
vote_count = 0
if current_vote_status:
self.register_get_user_response(self.user, upvoted_ids=["test_comment"])
vote_count = 1
self.register_comment_votes_response("test_comment")
self.register_comment(overrides={"votes": {"up_count": vote_count}})
data = {"voted": new_vote_status}
result = update_comment(self.request, "test_comment", data)
self.assertEqual(result["vote_count"], 1 if new_vote_status else 0)
self.assertEqual(result["voted"], new_vote_status)
last_request_path = urlparse(httpretty.last_request().path).path
votes_url = "/api/v1/comments/test_comment/votes"
if current_vote_status == new_vote_status:
self.assertNotEqual(last_request_path, votes_url)
else:
self.assertEqual(last_request_path, votes_url)
self.assertEqual(
httpretty.last_request().method,
"PUT" if new_vote_status else "DELETE"
)
actual_request_data = (
httpretty.last_request().parsed_body if new_vote_status else
parse_qs(urlparse(httpretty.last_request().path).query)
)
actual_request_data.pop("request_id", None)
expected_request_data = {"user_id": [str(self.user.id)]}
if new_vote_status:
expected_request_data["value"] = ["up"]
self.assertEqual(actual_request_data, expected_request_data)
event_name, event_data = mock_emit.call_args[0]
self.assertEqual(event_name, "edx.forum.response.voted")
self.assertEqual(
event_data,
{
'undo_vote': not new_vote_status,
'url': '',
'target_username': self.user.username,
'vote_value': 'up',
'user_forums_roles': [FORUM_ROLE_STUDENT],
'user_course_roles': [],
'commentable_id': 'dummy',
'id': 'test_comment'
}
)
@ddt.data(*itertools.product([True, False], [True, False], [True, False]))
@ddt.unpack
def test_vote_count(self, current_vote_status, first_vote, second_vote):
"""
Tests vote_count increases and decreases correctly from the same user
"""
#setup
starting_vote_count = 0
if current_vote_status:
self.register_get_user_response(self.user, upvoted_ids=["test_comment"])
starting_vote_count = 1
self.register_comment_votes_response("test_comment")
self.register_comment(overrides={"votes": {"up_count": starting_vote_count}})
#first vote
data = {"voted": first_vote}
result = update_comment(self.request, "test_comment", data)
self.register_comment(overrides={"voted": first_vote})
self.assertEqual(result["vote_count"], 1 if first_vote else 0)
#second vote
data = {"voted": second_vote}
result = update_comment(self.request, "test_comment", data)
self.assertEqual(result["vote_count"], 1 if second_vote else 0)
@ddt.data(*itertools.product([True, False], [True, False], [True, False], [True, False]))
@ddt.unpack
def test_vote_count_two_users(
self,
current_user1_vote,
current_user2_vote,
user1_vote,
user2_vote
):
"""
Tests vote_count increases and decreases correctly from different users
"""
user2 = UserFactory.create()
self.register_get_user_response(user2)
request2 = RequestFactory().get("/test_path")
request2.user = user2
CourseEnrollmentFactory.create(user=user2, course_id=self.course.id)
vote_count = 0
if current_user1_vote:
self.register_get_user_response(self.user, upvoted_ids=["test_comment"])
vote_count += 1
if current_user2_vote:
self.register_get_user_response(user2, upvoted_ids=["test_comment"])
vote_count += 1
for (current_vote, user_vote, request) in \
[(current_user1_vote, user1_vote, self.request),
(current_user2_vote, user2_vote, request2)]:
self.register_comment_votes_response("test_comment")
self.register_comment(overrides={"votes": {"up_count": vote_count}})
data = {"voted": user_vote}
result = update_comment(request, "test_comment", data)
if current_vote == user_vote:
self.assertEqual(result["vote_count"], vote_count)
elif user_vote:
vote_count += 1
self.assertEqual(result["vote_count"], vote_count)
self.register_get_user_response(self.user, upvoted_ids=["test_comment"])
else:
vote_count -= 1
self.assertEqual(result["vote_count"], vote_count)
self.register_get_user_response(self.user, upvoted_ids=[])
@ddt.data(*itertools.product([True, False], [True, False]))
@ddt.unpack
def test_abuse_flagged(self, old_flagged, new_flagged):
"""
Test attempts to edit the "abuse_flagged" field.
old_flagged indicates whether the comment should be flagged at the start
of the test. new_flagged indicates the value for the "abuse_flagged"
field in the update. If old_flagged and new_flagged are the same, no
update should be made. Otherwise, a PUT should be made to the flag or
or unflag endpoint according to the new_flagged value.
"""
self.register_get_user_response(self.user)
self.register_comment_flag_response("test_comment")
self.register_comment({"abuse_flaggers": [str(self.user.id)] if old_flagged else []})
data = {"abuse_flagged": new_flagged}
result = update_comment(self.request, "test_comment", data)
self.assertEqual(result["abuse_flagged"], new_flagged)
last_request_path = urlparse(httpretty.last_request().path).path
flag_url = "/api/v1/comments/test_comment/abuse_flag"
unflag_url = "/api/v1/comments/test_comment/abuse_unflag"
if old_flagged == new_flagged:
self.assertNotEqual(last_request_path, flag_url)
self.assertNotEqual(last_request_path, unflag_url)
else:
self.assertEqual(
last_request_path,
flag_url if new_flagged else unflag_url
)
self.assertEqual(httpretty.last_request().method, "PUT")
self.assertEqual(
httpretty.last_request().parsed_body,
{"user_id": [str(self.user.id)]}
)
@ddt.ddt
@disable_signal(api, 'thread_deleted')
@mock.patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
class DeleteThreadTest(
ForumsEnableMixin,
CommentsServiceMockMixin,
UrlResetMixin,
SharedModuleStoreTestCase,
MockSignalHandlerMixin
):
"""Tests for delete_thread"""
@classmethod
def setUpClass(cls):
super(DeleteThreadTest, cls).setUpClass()
cls.course = CourseFactory.create()
@mock.patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
def setUp(self):
super(DeleteThreadTest, self).setUp()
httpretty.reset()
httpretty.enable()
self.addCleanup(httpretty.reset)
self.addCleanup(httpretty.disable)
self.user = UserFactory.create()
self.register_get_user_response(self.user)
self.request = RequestFactory().get("/test_path")
self.request.user = self.user
self.thread_id = "test_thread"
CourseEnrollmentFactory.create(user=self.user, course_id=self.course.id)
def register_thread(self, overrides=None):
"""
Make a thread with appropriate data overridden by the overrides
parameter and register mock responses for both GET and DELETE on its
endpoint.
"""
cs_data = make_minimal_cs_thread({
"id": self.thread_id,
"course_id": unicode(self.course.id),
"user_id": str(self.user.id),
})
cs_data.update(overrides or {})
self.register_get_thread_response(cs_data)
self.register_delete_thread_response(cs_data["id"])
def test_basic(self):
self.register_thread()
with self.assert_signal_sent(api, 'thread_deleted', sender=None, user=self.user, exclude_args=('post',)):
self.assertIsNone(delete_thread(self.request, self.thread_id))
self.assertEqual(
urlparse(httpretty.last_request().path).path,
"/api/v1/threads/{}".format(self.thread_id)
)
self.assertEqual(httpretty.last_request().method, "DELETE")
def test_thread_id_not_found(self):
self.register_get_thread_error_response("missing_thread", 404)
with self.assertRaises(ThreadNotFoundError):
delete_thread(self.request, "missing_thread")
def test_nonexistent_course(self):
self.register_thread({"course_id": "non/existent/course"})
with self.assertRaises(CourseNotFoundError):
delete_thread(self.request, self.thread_id)
def test_not_enrolled(self):
self.register_thread()
self.request.user = UserFactory.create()
with self.assertRaises(CourseNotFoundError):
delete_thread(self.request, self.thread_id)
def test_discussions_disabled(self):
disabled_course = _discussion_disabled_course_for(self.user)
self.register_thread(overrides={"course_id": unicode(disabled_course.id)})
with self.assertRaises(DiscussionDisabledError):
delete_thread(self.request, self.thread_id)
@ddt.data(
FORUM_ROLE_ADMINISTRATOR,
FORUM_ROLE_MODERATOR,
FORUM_ROLE_COMMUNITY_TA,
FORUM_ROLE_STUDENT,
)
def test_non_author_delete_allowed(self, role_name):
role = Role.objects.create(name=role_name, course_id=self.course.id)
role.users = [self.user]
self.register_thread({"user_id": str(self.user.id + 1)})
expected_error = role_name == FORUM_ROLE_STUDENT
try:
delete_thread(self.request, self.thread_id)
self.assertFalse(expected_error)
except PermissionDenied:
self.assertTrue(expected_error)
@ddt.data(
*itertools.product(
[
FORUM_ROLE_ADMINISTRATOR,
FORUM_ROLE_MODERATOR,
FORUM_ROLE_COMMUNITY_TA,
FORUM_ROLE_STUDENT,
],
[True, False],
["no_group", "match_group", "different_group"],
)
)
@ddt.unpack
def test_group_access(self, role_name, course_is_cohorted, thread_group_state):
"""
Tests group access for deleting a thread
All privileged roles are able to delete a thread. A student role can
only delete a thread if,
the student role is the author and the thread is not in a cohort,
the student role is the author and the thread is in the author's cohort.
"""
cohort_course, cohort = _create_course_and_cohort_with_user_role(course_is_cohorted, self.user, role_name)
self.register_thread({
"course_id": unicode(cohort_course.id),
"group_id": (
None if thread_group_state == "no_group" else
cohort.id if thread_group_state == "match_group" else
cohort.id + 1
),
})
expected_error = (
role_name == FORUM_ROLE_STUDENT and
course_is_cohorted and
thread_group_state == "different_group"
)
try:
delete_thread(self.request, self.thread_id)
self.assertFalse(expected_error)
except ThreadNotFoundError:
self.assertTrue(expected_error)
@ddt.ddt
@disable_signal(api, 'comment_deleted')
@mock.patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
class DeleteCommentTest(
ForumsEnableMixin,
CommentsServiceMockMixin,
UrlResetMixin,
SharedModuleStoreTestCase,
MockSignalHandlerMixin
):
"""Tests for delete_comment"""
@classmethod
def setUpClass(cls):
super(DeleteCommentTest, cls).setUpClass()
cls.course = CourseFactory.create()
@mock.patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
def setUp(self):
super(DeleteCommentTest, self).setUp()
httpretty.reset()
httpretty.enable()
self.addCleanup(httpretty.reset)
self.addCleanup(httpretty.disable)
self.user = UserFactory.create()
self.register_get_user_response(self.user)
self.request = RequestFactory().get("/test_path")
self.request.user = self.user
self.thread_id = "test_thread"
self.comment_id = "test_comment"
CourseEnrollmentFactory.create(user=self.user, course_id=self.course.id)
def register_comment_and_thread(self, overrides=None, thread_overrides=None):
"""
Make a comment with appropriate data overridden by the override
parameters and register mock responses for both GET and DELETE on its
endpoint. Also mock GET for the related thread with thread_overrides.
"""
cs_thread_data = make_minimal_cs_thread({
"id": self.thread_id,
"course_id": unicode(self.course.id)
})
cs_thread_data.update(thread_overrides or {})
self.register_get_thread_response(cs_thread_data)
cs_comment_data = make_minimal_cs_comment({
"id": self.comment_id,
"course_id": cs_thread_data["course_id"],
"thread_id": cs_thread_data["id"],
"username": self.user.username,
"user_id": str(self.user.id),
})
cs_comment_data.update(overrides or {})
self.register_get_comment_response(cs_comment_data)
self.register_delete_comment_response(self.comment_id)
def test_basic(self):
self.register_comment_and_thread()
with self.assert_signal_sent(api, 'comment_deleted', sender=None, user=self.user, exclude_args=('post',)):
self.assertIsNone(delete_comment(self.request, self.comment_id))
self.assertEqual(
urlparse(httpretty.last_request().path).path,
"/api/v1/comments/{}".format(self.comment_id)
)
self.assertEqual(httpretty.last_request().method, "DELETE")
def test_comment_id_not_found(self):
self.register_get_comment_error_response("missing_comment", 404)
with self.assertRaises(CommentNotFoundError):
delete_comment(self.request, "missing_comment")
def test_nonexistent_course(self):
self.register_comment_and_thread(
thread_overrides={"course_id": "non/existent/course"}
)
with self.assertRaises(CourseNotFoundError):
delete_comment(self.request, self.comment_id)
def test_not_enrolled(self):
self.register_comment_and_thread()
self.request.user = UserFactory.create()
with self.assertRaises(CourseNotFoundError):
delete_comment(self.request, self.comment_id)
def test_discussions_disabled(self):
disabled_course = _discussion_disabled_course_for(self.user)
self.register_comment_and_thread(
thread_overrides={"course_id": unicode(disabled_course.id)},
overrides={"course_id": unicode(disabled_course.id)}
)
with self.assertRaises(DiscussionDisabledError):
delete_comment(self.request, self.comment_id)
@ddt.data(
FORUM_ROLE_ADMINISTRATOR,
FORUM_ROLE_MODERATOR,
FORUM_ROLE_COMMUNITY_TA,
FORUM_ROLE_STUDENT,
)
def test_non_author_delete_allowed(self, role_name):
role = Role.objects.create(name=role_name, course_id=self.course.id)
role.users = [self.user]
self.register_comment_and_thread(
overrides={"user_id": str(self.user.id + 1)}
)
expected_error = role_name == FORUM_ROLE_STUDENT
try:
delete_comment(self.request, self.comment_id)
self.assertFalse(expected_error)
except PermissionDenied:
self.assertTrue(expected_error)
@ddt.data(
*itertools.product(
[
FORUM_ROLE_ADMINISTRATOR,
FORUM_ROLE_MODERATOR,
FORUM_ROLE_COMMUNITY_TA,
FORUM_ROLE_STUDENT,
],
[True, False],
["no_group", "match_group", "different_group"],
)
)
@ddt.unpack
def test_group_access(self, role_name, course_is_cohorted, thread_group_state):
"""
Tests group access for deleting a comment
All privileged roles are able to delete a comment. A student role can
only delete a comment if,
the student role is the author and the comment is not in a cohort,
the student role is the author and the comment is in the author's cohort.
"""
cohort_course, cohort = _create_course_and_cohort_with_user_role(course_is_cohorted, self.user, role_name)
self.register_comment_and_thread(
overrides={"thread_id": "test_thread"},
thread_overrides={
"course_id": unicode(cohort_course.id),
"group_id": (
None if thread_group_state == "no_group" else
cohort.id if thread_group_state == "match_group" else
cohort.id + 1
),
}
)
expected_error = (
role_name == FORUM_ROLE_STUDENT and
course_is_cohorted and
thread_group_state == "different_group"
)
try:
delete_comment(self.request, self.comment_id)
self.assertFalse(expected_error)
except ThreadNotFoundError:
self.assertTrue(expected_error)
@ddt.ddt
@mock.patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
class RetrieveThreadTest(
ForumsEnableMixin,
CommentsServiceMockMixin,
UrlResetMixin,
SharedModuleStoreTestCase
):
"""Tests for get_thread"""
@classmethod
def setUpClass(cls):
super(RetrieveThreadTest, cls).setUpClass()
cls.course = CourseFactory.create()
@mock.patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
def setUp(self):
super(RetrieveThreadTest, self).setUp()
httpretty.reset()
httpretty.enable()
self.addCleanup(httpretty.reset)
self.addCleanup(httpretty.disable)
self.user = UserFactory.create()
self.register_get_user_response(self.user)
self.request = RequestFactory().get("/test_path")
self.request.user = self.user
self.thread_id = "test_thread"
CourseEnrollmentFactory.create(user=self.user, course_id=self.course.id)
def register_thread(self, overrides=None):
"""
Make a thread with appropriate data overridden by the overrides
parameter and register mock responses for GET on its
endpoint.
"""
cs_data = make_minimal_cs_thread({
"id": self.thread_id,
"course_id": unicode(self.course.id),
"commentable_id": "test_topic",
"username": self.user.username,
"user_id": str(self.user.id),
"title": "Test Title",
"body": "Test body",
"resp_total": 0,
})
cs_data.update(overrides or {})
self.register_get_thread_response(cs_data)
def test_basic(self):
self.register_thread({"resp_total": 2})
self.assertEqual(get_thread(self.request, self.thread_id), self.expected_thread_data({
"response_count": 2,
"unread_comment_count": 1,
}))
self.assertEqual(httpretty.last_request().method, "GET")
def test_thread_id_not_found(self):
self.register_get_thread_error_response("missing_thread", 404)
with self.assertRaises(ThreadNotFoundError):
get_thread(self.request, "missing_thread")
def test_nonauthor_enrolled_in_course(self):
non_author_user = UserFactory.create()
self.register_get_user_response(non_author_user)
CourseEnrollmentFactory.create(user=non_author_user, course_id=self.course.id)
self.register_thread()
self.request.user = non_author_user
self.assertEqual(get_thread(self.request, self.thread_id), self.expected_thread_data({
"editable_fields": ["abuse_flagged", "following", "read", "voted"],
"unread_comment_count": 1,
}))
self.assertEqual(httpretty.last_request().method, "GET")
def test_not_enrolled_in_course(self):
self.register_thread()
self.request.user = UserFactory.create()
with self.assertRaises(CourseNotFoundError):
get_thread(self.request, self.thread_id)
@ddt.data(
*itertools.product(
[
FORUM_ROLE_ADMINISTRATOR,
FORUM_ROLE_MODERATOR,
FORUM_ROLE_COMMUNITY_TA,
FORUM_ROLE_STUDENT,
],
[True, False],
["no_group", "match_group", "different_group"],
)
)
@ddt.unpack
def test_group_access(self, role_name, course_is_cohorted, thread_group_state):
"""
Tests group access for retrieving a thread
All privileged roles are able to retrieve a thread. A student role can
only retrieve a thread if,
the student role is the author and the thread is not in a cohort,
the student role is the author and the thread is in the author's cohort.
"""
cohort_course, cohort = _create_course_and_cohort_with_user_role(course_is_cohorted, self.user, role_name)
self.register_thread({
"course_id": unicode(cohort_course.id),
"group_id": (
None if thread_group_state == "no_group" else
cohort.id if thread_group_state == "match_group" else
cohort.id + 1
),
})
expected_error = (
role_name == FORUM_ROLE_STUDENT and
course_is_cohorted and
thread_group_state == "different_group"
)
try:
get_thread(self.request, self.thread_id)
self.assertFalse(expected_error)
except ThreadNotFoundError:
self.assertTrue(expected_error)
|
agpl-3.0
| 4,310,273,995,939,487,000
| 39.28949
| 120
| 0.568971
| false
| 3.948196
| true
| false
| false
|
ehabkost/virt-test
|
qemu/tests/usb_hotplug.py
|
1
|
1705
|
import logging, re, uuid
from autotest.client.shared import error
from autotest.client import utils
@error.context_aware
def run_usb_hotplug(test, params, env):
"""
Test usb hotplug
@param test: kvm test object
@param params: Dictionary with the test parameters
@param env: Dictionary with test environment.
"""
device = params.get("usb_type_testdev")
product = params.get("product")
# compose strings
monitor_add = "device_add %s" % device
monitor_add += ",bus=usbtest.0,id=usbplugdev"
monitor_del = "device_del usbplugdev"
error.context("Log into guest", logging.info)
vm = env.get_vm(params["main_vm"])
vm.verify_alive()
session = vm.wait_for_login()
session.cmd_status("dmesg -c")
error.context("Plugin usb device", logging.info)
reply = vm.monitor.cmd(monitor_add)
if reply.find("Parameter 'driver' expects a driver name") != -1:
raise error.TestNAError("usb device %s not available" % device)
session.cmd_status("sleep 1")
session.cmd_status("udevadm settle")
messages_add = session.cmd("dmesg -c")
for line in messages_add.splitlines():
logging.debug("[dmesg add] %s" % line)
if messages_add.find("Product: %s" % product) == -1:
raise error.TestFail("kernel didn't detect plugin")
error.context("Unplug usb device", logging.info)
vm.monitor.cmd(monitor_del)
session.cmd_status("sleep 1")
messages_del = session.cmd("dmesg -c")
for line in messages_del.splitlines():
logging.debug("[dmesg del] %s" % line)
if messages_del.find("USB disconnect") == -1:
raise error.TestFail("kernel didn't detect unplug")
session.close()
|
gpl-2.0
| 3,051,980,685,977,966,000
| 32.431373
| 71
| 0.659824
| false
| 3.552083
| true
| false
| false
|
shaneoc/atom
|
atom/router/directory.py
|
1
|
1333
|
class Directory(object):
def __init__(self, router):
self.router = router
def start(self):
db = self.router.database
db.execute(
'CREATE TABLE IF NOT EXISTS users (' +
'id INTEGER PRIMARY KEY, name TEXT, password TEXT)')
db.execute(
'INSERT OR REPLACE INTO users VALUES (0, ?, ?)', ('system', None))
db.execute(
'CREATE TABLE IF NOT EXISTS modules (' +
'id INTEGER PRIMARY KEY, name TEXT)')
db.execute(
'CREATE TABLE IF NOT EXISTS hostnames (' +
'id INTEGER PRIMARY KEY, hostname TEXT UNIQUE, module_id INTEGER)')
def get_users(self):
pass
def get_modules(self):
pass
def get_system_hostname(self):
return 'sys.xvc.cc:8080'
def get_shell_hostname(self, uid):
return 'home.xvc.cc:8080'
def check_login(self, username, password):
if username == 'shane' and password == 'test':
return 1
else:
return None
def check_authorization(self, uid, hostname):
return True
def get_socket(self, hostname, uri):
return False
class Module(object):
def get_endpoint(self, path):
pass
class User(object):
pass
|
mit
| 6,492,416,664,778,558,000
| 24.653846
| 79
| 0.543136
| false
| 4.34202
| false
| false
| false
|
bokeh/bokeh
|
examples/app/fourier_animated.py
|
1
|
6647
|
''' Show a streaming, updating representation of Fourier Series.
The example was inspired by `this video`_.
Use the ``bokeh serve`` command to run the example by executing:
bokeh serve fourier_animated.py
at your command prompt. Then navigate to the URL
http://localhost:5006/fourier_animated
in your browser.
.. _this video: https://www.youtube.com/watch?v=LznjC4Lo7lE
'''
from collections import OrderedDict
import numpy as np
from bokeh.driving import repeat
from bokeh.io import curdoc
from bokeh.layouts import column
from bokeh.models import ColumnDataSource
from bokeh.plotting import figure
pi = np.pi
N = 100
newx = x = np.linspace(0, 2*pi, N)
shift = 2.2
base_x = x + shift
period = pi/2
palette = ['#08519c', '#3182bd', '#6baed6', '#bdd7e7']
def new_source():
return dict(
curve=ColumnDataSource(dict(x=[], base_x=[], y=[])),
lines=ColumnDataSource(dict(line_x=[], line_y=[], radius_x=[], radius_y=[])),
circle_point=ColumnDataSource(dict(x=[], y=[], r=[])),
circleds=ColumnDataSource(dict(x=[], y=[]))
)
def create_circle_glyphs(p, color, sources):
p.circle('x', 'y', size=1., line_color=color, color=None, source=sources['circleds'])
p.circle('x', 'y', size=5, line_color=color, color=color, source=sources['circle_point'])
p.line('radius_x', 'radius_y', line_color=color, color=color, alpha=0.5, source=sources['lines'])
def create_plot(foos, title='', r = 1, y_range=None, period = pi/2, cfoos=None):
if y_range is None:
y_range=[-2, 2]
# create new figure
p = figure(title=title, width=800, height=300, x_range=[-2, 9], y_range=y_range)
p.xgrid.bounds = (-2, 2)
p.xaxis.bounds = (-2, 2)
_sources = []
cx, cy = 0, 0
for i, foo in enumerate(foos):
sources = new_source()
get_new_sources(x, foo, sources, cfoos[i], cx, cy, i==0)
cp = sources['circle_point'].data
cx, cy = cp['x'][0], cp['y'][0]
if i==0:
# compute the full fourier eq
full_y = sum(foo(x) for foo in foos)
# replace the foo curve with the full fourier eq
sources['curve'] = ColumnDataSource(dict(x=x, base_x=base_x, y=full_y))
# draw the line
p.line('base_x','y', color="orange", line_width=2, source=sources['curve'])
if i==len(foos)-1:
# if it's the last foo let's draw a circle on the head of the curve
sources['floating_point'] = ColumnDataSource({'x':[shift], 'y': [cy]})
p.line('line_x', 'line_y', color=palette[i], line_width=2, source=sources['lines'])
p.circle('x', 'y', size=10, line_color=palette[i], color=palette[i], source=sources['floating_point'])
# draw the circle, radius and circle point related to foo domain
create_circle_glyphs(p, palette[i], sources)
_sources.append(sources)
return p, _sources
def get_new_sources(xs, foo, sources, cfoo, cx=0, cy=0, compute_curve = True):
if compute_curve:
ys = foo(xs)
sources['curve'].data = dict(x=xs, base_x=base_x, y=ys)
r = foo(period)
y = foo(xs[0]) + cy
x = cfoo(xs[0]) + cx
sources['lines'].data = {
'line_x': [x, shift], 'line_y': [y, y],
'radius_x': [0, x], 'radius_y': [0, y]
}
sources['circle_point'].data = {'x': [x], 'y': [y], 'r': [r]}
sources['circleds'].data=dict(
x = cx + np.cos(np.linspace(0, 2*pi, N)) * r,
y = cy + np.sin(np.linspace(0, 2*pi, N)) * r,
)
def update_sources(sources, foos, newx, ind, cfoos):
cx, cy = 0, 0
for i, foo in enumerate(foos):
get_new_sources(newx, foo, sources[i], cfoos[i], cx, cy,
compute_curve = i != 0)
if i == 0:
full_y = sum(foo(newx) for foo in foos)
sources[i]['curve'].data = dict(x=newx, base_x=base_x, y=full_y)
cp = sources[i]['circle_point'].data
cx, cy = cp['x'][0], cp['y'][0]
if i == len(foos)-1:
sources[i]['floating_point'].data['x'] = [shift]
sources[i]['floating_point'].data['y'] = [cy]
def update_centric_sources(sources, foos, newx, ind, cfoos):
for i, foo in enumerate(foos):
get_new_sources(newx, foo, sources[i], cfoos[i])
def create_centric_plot(foos, title='', r = 1, y_range=(-2, 2), period = pi/2, cfoos=None):
p = figure(title=title, width=800, height=300, x_range=[-2, 9], y_range=y_range)
p.xgrid.bounds = (-2, 2)
p.xaxis.bounds = (-2, 2)
_sources = []
for i, foo in enumerate(foos):
sources = new_source()
get_new_sources(x, foo, sources, cfoos[i])
_sources.append(sources)
if i:
legend_label = "4sin(%(c)sx)/%(c)spi" % {'c': i*2+1}
else:
legend_label = "4sin(x)/pi"
p.line('base_x','y', color=palette[i], line_width=2, source=sources['curve'])
p.line('line_x', 'line_y', color=palette[i], line_width=2,
source=sources['lines'], legend_label=legend_label)
create_circle_glyphs(p, palette[i], sources)
p.legend.location = "top_right"
p.legend.orientation = "horizontal"
p.legend.padding = 6
p.legend.margin = 6
p.legend.spacing = 6
return p, _sources
# create the series partials
f1 = lambda x: (4*np.sin(x))/pi
f2 = lambda x: (4*np.sin(3*x))/(3*pi)
f3 = lambda x: (4*np.sin(5*x))/(5*pi)
f4 = lambda x: (4*np.sin(7*x))/(7*pi)
cf1 = lambda x: (4*np.cos(x))/pi
cf2 = lambda x: (4*np.cos(3*x))/(3*pi)
cf3 = lambda x: (4*np.cos(5*x))/(5*pi)
cf4 = lambda x: (4*np.cos(7*x))/(7*pi)
fourier = OrderedDict(
fourier_4 = {
'f': lambda x: f1(x) + f2(x) + f3(x) + f4(x),
'fs': [f1, f2, f3, f4],
'cfs': [cf1, cf2, cf3, cf4]
},
)
for k, p in fourier.items():
p['plot'], p['sources'] = create_plot(
p['fs'], 'Fourier (Sum of the first 4 Harmonic Circles)', r = p['f'](period), cfoos = p['cfs']
)
for k, p in fourier.items():
p['cplot'], p['csources'] = create_centric_plot(
p['fs'], 'Fourier First 4 Harmonics & Harmonic Circles', r = p['f'](period), cfoos = p['cfs']
)
layout = column(*[f['plot'] for f in fourier.values()] + [f['cplot'] for f in fourier.values()])
@repeat(range(N))
def cb(gind):
global newx
oldx = np.delete(newx, 0)
newx = np.hstack([oldx, [oldx[-1] + 2*pi/N]])
for k, p in fourier.items():
update_sources(p['sources'], p['fs'], newx, gind, p['cfs'])
update_centric_sources(p['csources'], p['fs'], newx, gind, p['cfs'])
curdoc().add_periodic_callback(cb, 100)
curdoc().add_root(layout)
curdoc().title = "Fourier Animated"
|
bsd-3-clause
| 8,983,696,342,426,303,000
| 32.235
| 114
| 0.574846
| false
| 2.845462
| false
| false
| false
|
mposner/pychess
|
board.py
|
1
|
5063
|
# pychess
# mposner 11/23/14
from piece import Piece
from piece_attributes import PieceType, Color
from utils import isValidPosition
class Board:
"""Represents a chess board"""
def __init__(self):
"""Initialize a new chess board"""
self.makeNewPieces()
def __str__(self):
result = " " + 33*"=" + "\n"
for i in range(7,-1,-1):
result += " " + str(i+1) + " | "
for j in range(8):
if self.board[i][j] is None:
result += " | "
else:
result += self.board[i][j].shortstr() + " | "
result = result[:-1] + "\n"
if i > 0:
result += " " + 33*"-" + "\n"
else:
result += " " + 33*"=" + "\n"
result += " " + " ".join(["A","B","C","D","E","F","G","H"])
return result
def getPiece(self, position):
"""Return the piece at the given board square"""
if not isValidPosition(position):
return None
rank = int(position[1]) - 1
file = ord(position[0]) - ord("A")
return self.board[rank][file]
def isValidMove(self, piece, end):
"""See if a move is valid for a given Piece"""
if not isValidPosition(end): #rule out bad position input
return False
startfile = ord(piece.position[0]) #file is column, A-H
startrank = int(piece.position[1]) #rank is row, 1-8
endfile = ord(end[0])
endrank = int(end[1])
filediff = abs(startfile - endfile)
rankdiff = abs(startrank - endrank)
if piece.type == PieceType.KING:
if filediff <= 1 and rankdiff <= 1:
return True
else:
return False
elif piece.type == PieceType.QUEEN:
if filediff == 0 or rankdiff == 0:
return True
elif filediff == rankdiff:
return True
else:
return False
elif piece.type == PieceType.BISHOP:
if filediff == rankdiff:
return True
else:
return False
elif piece.type == PieceType.KNIGHT:
if filediff == 0 and rankdiff == 0:
return True
elif filediff == 1 and rankdiff == 2:
return True
elif filediff == 2 and rankdiff == 1:
return True
else:
return False
elif piece.type == PieceType.ROOK:
if filediff == 0 or rankdiff == 0:
return True
else:
return False
elif piece.type == PieceType.PAWN:
if filediff == 0 and (endrank-startrank) == 1:
# Normal move forward
return True
elif filediff == 1 and rankdiff == 1:
# Only valid if taking an enemy piece
if self.getPiece(end) is not None and \
self.getPiece(end).color != piece.color:
return True
elif filediff == 0 and (endrank-startrank) == 2:
# Only valid if pawn is starting from starting position
if int(piece.position[1]) == 2:
return True
return False
def makeNewPieces(self):
"""Make a new set of pieces"""
white = []
white.append(Piece(Color.WHITE, PieceType.ROOK, "A1"))
white.append(Piece(Color.WHITE, PieceType.KNIGHT, "B1"))
white.append(Piece(Color.WHITE, PieceType.BISHOP, "C1"))
white.append(Piece(Color.WHITE, PieceType.QUEEN, "D1"))
white.append(Piece(Color.WHITE, PieceType.KING, "E1"))
white.append(Piece(Color.WHITE, PieceType.BISHOP, "F1"))
white.append(Piece(Color.WHITE, PieceType.KNIGHT, "G1"))
white.append(Piece(Color.WHITE, PieceType.ROOK, "H1"))
white.append(Piece(Color.WHITE, PieceType.PAWN, "A2"))
white.append(Piece(Color.WHITE, PieceType.PAWN, "B2"))
white.append(Piece(Color.WHITE, PieceType.PAWN, "C2"))
white.append(Piece(Color.WHITE, PieceType.PAWN, "D2"))
white.append(Piece(Color.WHITE, PieceType.PAWN, "E2"))
white.append(Piece(Color.WHITE, PieceType.PAWN, "F2"))
white.append(Piece(Color.WHITE, PieceType.PAWN, "G2"))
white.append(Piece(Color.WHITE, PieceType.PAWN, "H2"))
black = []
black.append(Piece(Color.BLACK, PieceType.ROOK, "A8"))
black.append(Piece(Color.BLACK, PieceType.KNIGHT, "B8"))
black.append(Piece(Color.BLACK, PieceType.BISHOP, "C8"))
black.append(Piece(Color.BLACK, PieceType.QUEEN, "D8"))
black.append(Piece(Color.BLACK, PieceType.KING, "E8"))
black.append(Piece(Color.BLACK, PieceType.BISHOP, "F8"))
black.append(Piece(Color.BLACK, PieceType.KNIGHT, "G8"))
black.append(Piece(Color.BLACK, PieceType.ROOK, "H8"))
black.append(Piece(Color.BLACK, PieceType.PAWN, "A7"))
black.append(Piece(Color.BLACK, PieceType.PAWN, "B7"))
black.append(Piece(Color.BLACK, PieceType.PAWN, "C7"))
black.append(Piece(Color.BLACK, PieceType.PAWN, "D7"))
black.append(Piece(Color.BLACK, PieceType.PAWN, "E7"))
black.append(Piece(Color.BLACK, PieceType.PAWN, "F7"))
black.append(Piece(Color.BLACK, PieceType.PAWN, "G7"))
black.append(Piece(Color.BLACK, PieceType.PAWN, "H7"))
self.white = white
self.black = black
#2-D array representing the board (board[0] = rank 1)
board = [[] for i in range(8)]
board[0] = white[0:8]
board[1] = white[8:]
board[2] = [None for i in range(8)]
board[3] = [None for i in range(8)]
board[4] = [None for i in range(8)]
board[5] = [None for i in range(8)]
board[6] = black[8:]
board[7] = black[0:8]
self.board = board
|
gpl-2.0
| -1,243,340,808,460,693,000
| 26.818681
| 69
| 0.633419
| false
| 2.701708
| false
| false
| false
|
it-events-ro/scripts
|
update-from-eventbrite.py
|
1
|
6855
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import json
import sys
import utils
included_organizers = {
2491303902, # http://www.eventbrite.com/o/itcamp-2491303902
6873285549, # http://www.eventbrite.com/o/sponge-media-lab-6873285549
3001324227, # http://www.eventbrite.com/o/labview-student-ambassador-upb-3001324227
2300226659, # http://www.eventbrite.com/o/techstars-startup-programs-2300226659
5899601137, # http://www.eventbrite.com/o/oana-calugar-amp-fabio-carati-amp-cristian-dascalu-5899601137
4662547959, # http://www.eventbrite.com/o/clujhub-4662547959
4138472935, # http://www.eventbrite.com/o/yonder-4138472935
6397991619, # http://www.eventbrite.com/o/facultatea-de-inginerie-electrica-in-colaborare-cu-best-cluj-napoca-6397991619
3367422098, # http://www.eventbrite.com/o/andreea-popescu-3367422098
4206997271, # http://www.eventbrite.com/o/babele-create-together-4206997271
3168795376, # http://www.eventbrite.com/o/girls-in-tech-romania-3168795376
6671021543, # http://www.eventbrite.com/o/asociatia-ip-workshop-6671021543
2761218168, # http://www.eventbrite.com/o/ccsir-2761218168
9377817403, # http://www.eventbrite.com/o/hellojs-9377817403
7802438407, # http://www.eventbrite.com/o/innodrive-7802438407
10949312400, # http://www.eventbrite.com/o/school-of-content-10949312400
6795968089, # http://www.eventbrite.com/o/iiba-romania-chapter-6795968089
10963965257, # http://www.eventbrite.com/o/sinaptiq-edu-10963965257
4246372985, # http://www.eventbrite.com/o/hackathon-in-a-box-4246372985
8767089022, # http://www.eventbrite.com.au/o/bm-college-8767089022
6886785391, # http://www.eventbrite.com/o/sprint-consulting-6886785391
8270334915, # http://www.eventbrite.co.uk/o/msg-systems-romania-8270334915
2670928534, # http://www.eventbrite.com/o/itcamp-community-2670928534
5340605367, # http://www.eventbrite.com/o/techhub-bucharest-5340605367
8042013777, # http://www.eventbrite.com/o/owasp-foundation-8042013777
11097508562, # http://www.eventbrite.com/o/robertino-vasilescu-si-bogdan-socol-ambasadori-prestashop-11097508562
}
excluded_organizers = {
8035595159, # http://www.eventbrite.com/o/magicianul-augustin-8035595159
8193977126, # http://www.eventbrite.com/o/growth-marketing-conference-8193977126
2725971154, # http://www.eventbrite.com/o/lost-worlds-racing-2725971154
7795480037, # http://www.eventbrite.de/o/dexcar-autovermietung-ug-7795480037
10911641537, # http://www.eventbrite.com/o/johanna-house-10911641537
10950100881, # http://www.eventbrite.com/o/peace-action-training-and-research-institute-of-romania-patrir-10950100881
8349138707, # http://www.eventbrite.com/o/trix-bike-primaria-tg-jiu-consilul-local-gorj-salvamont-gorj-8349138707
5715420837, # http://www.eventbrite.com/o/mattei-events-5715420837
2087207893, # http://www.eventbrite.com/o/john-stevens-zero-in-2087207893
11050568264, # http://www.eventbrite.com/o/cristian-vasilescu-11050568264
10924487836, # http://www.eventbrite.com/o/kmdefensecom-krav-maga-scoala-bukan-10924487836
10797347037, # http://www.eventbrite.co.uk/o/story-travels-ltd-10797347037
10933030217, # http://www.eventbrite.com/o/10933030217
5570020107, # http://www.eventbrite.com/o/marius-5570020107
10948788760, # http://www.eventbrite.com/o/centrul-de-dezvoltare-personala-constanta-10948788760
10796273575, # http://www.eventbrite.com/o/summer-foundation-10796273575
10931790600, # http://www.eventbrite.com/o/ioana-amp-vali-10931790600
10024410089, # http://www.eventbrite.com/o/leagea-atractiei-in-actiune-10024410089
6837788799, # http://www.eventbrite.com/o/lost-worlds-travel-6837788799
10868911506, # http://www.eventbrite.com/o/the-city-of-green-buildings-association-10868911506
10973196426, # http://www.eventbrite.com/o/10973196426
8428263732, # http://www.eventbrite.com/o/upwork-8428263732
10967928809, # http://www.eventbrite.com/o/eastern-artisans-atelier-10967928809
1863005385, # http://www.eventbrite.com/o/sigma-3-survival-school-1863005385
8541146418, # http://www.eventbrite.com/o/modularity-8541146418
10909622502, # http://www.eventbrite.com/o/different-angle-cluster-10909622502
8384351483, # http://www.eventbrite.com/o/sciencehub-8384351483
10894747098, # http://www.eventbrite.com/o/consact-consulting-10894747098
10952849991, # http://www.eventbrite.co.uk/o/art-live-10952849991
10902884665, # http://www.eventbrite.com/o/10902884665
10942128462, # http://www.eventbrite.com/o/eurotech-assessment-and-certification-services-pvt-ltd-10942128462
9631107106, # http://www.eventbrite.com/o/de-ce-nu-eu-9631107106
11054013211, # http://www.eventbrite.co.uk/o/first-people-solutions-aviation-11054013211
10867523860, # http://www.eventbrite.com/o/igloo-media-10867523860
11063098365, # http://www.eventbrite.co.uk/o/glas-expert-11063098365
8348933279, # http://www.eventbrite.com/o/parentis-8348933279
11087510059, # http://www.eventbrite.co.uk/o/untold-ong-11087510059
11085577626, # http://www.eventbrite.com/o/11085577626
}
# TODO: make somehow API calls return historical events also
# TODO: make API calls handle paging
print ('Looking for new organizations')
has_unknown_orgs = False
events = utils.eventbriteApi('events/search/?venue.country=RO&include_unavailable_events=true')
for e in events['events']:
organizer_id = int(e['organizer_id'])
if (organizer_id in included_organizers) or (organizer_id in excluded_organizers):
continue
has_unknown_orgs = True
org = utils.eventbriteApi('organizers/%d/' % organizer_id)
print('Unknown organization %d:\n- %s\n- %s' % (organizer_id, e['url'], org['url']))
if has_unknown_orgs:
print('Had unknown orgs, stopping')
sys.exit(1)
orgs, venues, events = {}, {}, []
def _getOrganizersAndEvents(org_id):
global events, orgs
org = utils.eventbriteApi('organizers/%d/' % org_id)
orgs[org_id] = org
org_events = utils.eventbriteApi(
'organizers/%d/events/?start_date.range_start=2010-01-01T00:00:00&status=all' % org_id)
events += [e for e in org_events['events'] if 'venue_id' in e and e['venue_id'] is not None]
utils.repeat(included_organizers, 'Fetching organization data for %d', _getOrganizersAndEvents)
def _getVenueInfo(venue_id):
global venues
venue = utils.eventbriteApi('venues/%d/' % venue_id)
# some organizations do events world-wide, not in RO only
if venue['address']['country'] != 'RO': return
venues[venue_id] = venue
unique_venues = frozenset(int(e['venue_id']) for e in events)
utils.repeat(unique_venues, 'Fetching venue information for %d', _getVenueInfo)
# filter out events not from RO
events = [e for e in events if int(e['venue_id']) in venues]
result = dict(orgs=orgs, venues=venues, events=events)
with open('eventbrites.json', 'w') as f:
f.write(json.dumps(result, sort_keys=True, indent=4))
|
agpl-3.0
| -8,798,751,040,999,646,000
| 52.554688
| 122
| 0.756966
| false
| 2.474729
| false
| false
| false
|
HengeSense/website
|
website/migrations/0003_auto__chg_field_userprofile_gender__chg_field_userprofile_date_of_birt.py
|
1
|
4876
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'UserProfile.gender'
db.alter_column('website_userprofile', 'gender', self.gf('django.db.models.fields.CharField')(max_length=1, null=True))
# Changing field 'UserProfile.date_of_birth'
db.alter_column('website_userprofile', 'date_of_birth', self.gf('django.db.models.fields.DateField')(null=True))
def backwards(self, orm):
# User chose to not deal with backwards NULL issues for 'UserProfile.gender'
raise RuntimeError("Cannot reverse this migration. 'UserProfile.gender' and its values cannot be restored.")
# User chose to not deal with backwards NULL issues for 'UserProfile.date_of_birth'
raise RuntimeError("Cannot reverse this migration. 'UserProfile.date_of_birth' and its values cannot be restored.")
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'website.userprofile': {
'Meta': {'ordering': "('_order',)", 'object_name': 'UserProfile'},
'_order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'date_of_birth': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'gender': ('django.db.models.fields.CharField', [], {'max_length': '1', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'receive_updates': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'unique': 'True'})
}
}
complete_apps = ['website']
|
agpl-3.0
| 3,340,166,466,635,874,000
| 64.905405
| 182
| 0.576292
| false
| 3.815336
| false
| false
| false
|
Atrasoftware/PyTMCL
|
PyTMCL/tests/test_codec.py
|
1
|
7568
|
#!/usr/bin/env python
import unittest
from TMCL import *
import random as rnd
MAXITER = 200
REQUEST_KEYS = codec.REQUEST_KEYS + ['value']
REPLY_KEYS = codec.REPLY_KEYS + ['value']
class CodecTestCase(unittest.TestCase):
def _gen_byte(self, min_i=0, max_i=256):
return rnd.randint(min_i, max_i-1)
def _gen_bytes(self, length=5):
return [self._gen_byte() for _ in range(length)]
def _gen_pos_bytes(self, length=5):
return [self._gen_byte(max_i=128)] + self._gen_bytes(length-1)
def _gen_neg_bytes(self, length=5):
return [self._gen_byte(min_i=128)] + self._gen_bytes(length-1)
def _gen_number(self, length=None):
if length is None:
length = rnd.randint(1, 9)
value = [rnd.randint(0, 9) for _ in range(length)]
value = (str(s) for s in value)
value = "".join(value)
return int(value)
def _gen_cmd_string(self, length=8):
values = [rnd.randint(0, 9) for _ in range(length)]
chksum = sum(values)
values.append(chksum)
string = "".join(chr(v) for v in values)
return string
def test_byte(self):
for i in range(MAXITER):
self.assertIn(codec.byte(i), range(256))
def test_checksum(self):
for i in range(MAXITER):
self.assertEqual(codec.checksum(i*[i]), codec.byte(i*i))
def test_encodeBytes(self):
value = 123456789
bytes = codec.encodeBytes(value)
self.assertEqual([7, 91, 205, 21], bytes)
new_value = codec.decodeBytes(bytes)
self.assertEqual(value, new_value)
def test_decodeBytes(self):
bytes = [1, 2, 3, 4]
value = codec.decodeBytes(bytes)
self.assertEqual(16909060, value)
new_bytes = codec.encodeBytes(value)
self.assertEqual(bytes, new_bytes)
def test_encdecBytes(self):
for _ in range(MAXITER):
value = self._gen_number()
bytes = codec.encodeBytes(value)
new_value = codec.decodeBytes(bytes)
self.assertEqual(value, new_value)
def test_decencBytes(self):
for _ in range(MAXITER):
bytes = self._gen_bytes(length=4)
value = codec.decodeBytes(bytes)
new_bytes = codec.encodeBytes(value)
self.assertEqual(bytes, new_bytes)
def test_decencNegBytes(self):
for _ in range(MAXITER):
bytes = self._gen_neg_bytes(length=4)
value = codec.decodeBytes(bytes)
new_bytes = codec.encodeBytes(value)
self.assertEqual(bytes, new_bytes)
def test_decencPosBytes(self):
for _ in range(MAXITER):
bytes = self._gen_pos_bytes(length=4)
value = codec.decodeBytes(bytes)
new_bytes = codec.encodeBytes(value)
self.assertEqual(bytes, new_bytes)
def _help_test_encodeReAllCommand(self, encoder, decoder, keys):
string = "ABCD\x00\x00\x00EO"
values = [ord(s) for s in string]
result = encoder(values[0], values[1], values[2], values[3], sum(values[4:8]))
self.assertEqual(string, result)
def _help_test_decodeReAllCommand(self, encoder, decoder, keys):
string = "ABCD\x00\x00\x00EO"
result = decoder(string)
for i, k in enumerate(keys[:4]):
self.assertEqual(ord(string[i]), result[k])
values = sum(ord(s) for s in string[4:8])
self.assertEqual(values, result['value'])
self.assertEqual(ord(string[7]), result['value'])
self.assertEqual(ord(string[8]), result['checksum'])
def test_encodeRequestCommand(self):
self._help_test_encodeReAllCommand(codec.encodeRequestCommand, codec.decodeRequestCommand, REQUEST_KEYS)
def test_decodeRequestCommand(self):
self._help_test_decodeReAllCommand(codec.encodeRequestCommand, codec.decodeRequestCommand, REQUEST_KEYS)
def test_encodeReplyCommand(self):
self._help_test_encodeReAllCommand(codec.encodeReplyCommand, codec.decodeReplyCommand, REPLY_KEYS)
def test_decodeReplyCommand(self):
self._help_test_decodeReAllCommand(codec.encodeReplyCommand, codec.decodeReplyCommand, REPLY_KEYS)
def _help_test_encdecReAllCommand(self, encoder, decoder, keys):
for _ in range(MAXITER):
values = self._gen_bytes(length=len(keys))
string = encoder(*values)
result = decoder(string)
for i, k in enumerate(keys):
self.assertEqual(values[i], result[k])
self.assertEqual(sum(values) % 256, result['checksum'])
def _help_test_decencReALLCommand(self, encoder, decoder, keys):
for _ in range(MAXITER):
string = self._gen_cmd_string()
values = decoder(string)
unpacked = (values[k] for k in keys)
new_string = encoder(*unpacked)
self.assertEqual(string, new_string)
def test_encdecRequestCommand(self):
self._help_test_encdecReAllCommand(codec.encodeRequestCommand, codec.decodeRequestCommand, REQUEST_KEYS)
def test_decencRequestCommand(self):
self._help_test_decencReALLCommand(codec.encodeRequestCommand, codec.decodeRequestCommand, REQUEST_KEYS)
def test_encdecReplyCommand(self):
self._help_test_encdecReAllCommand(codec.encodeReplyCommand, codec.decodeReplyCommand, REPLY_KEYS)
def test_decencReplyCommand(self):
self._help_test_decencReALLCommand(codec.encodeReplyCommand, codec.decodeReplyCommand, REPLY_KEYS)
def test_encodeCommand(self):
string = "ABCD\x00\x00\x00EO"
params = [ord(s) for s in string[:4]]
values = ord(string[7])
# values = sum(ord(s) for s in string[4:8])
new_string = codec.encodeCommand(params, values)
self.assertEqual(string, new_string)
def test_decodeCommand(self):
keys = range(4)
string = "ABCD\x00\x00\x00EO"
result = codec.decodeCommand(string, keys)
for i, k in enumerate(keys):
self.assertEqual(ord(string[i]), result[k])
values = sum(ord(s) for s in string[4:8])
self.assertEqual(values, result['value'])
self.assertEqual(ord(string[7]), result['value'])
self.assertEqual(ord(string[8]), result['checksum'])
def test_encdecCommand(self):
keys = range(4)
for _ in range(MAXITER):
params = self._gen_bytes(length=4)
values = self._gen_byte()
chksum = sum(params, values) % 256
string = codec.encodeCommand(params, values)
result = codec.decodeCommand(string, keys)
for i, k in enumerate(keys):
self.assertEqual(params[i], result[k])
self.assertEqual(values, result['value'])
self.assertEqual(chksum, result['checksum'])
def test_decencCommand(self):
keys = range(4)
for _ in range(MAXITER):
string = self._gen_cmd_string()
decoded = codec.decodeCommand(string, keys)
params = [decoded[k] for k in keys]
values = decoded['value']
new_string = codec.encodeCommand(params, values)
self.assertEqual(string[:4], new_string[:4]) # parameter part
self.assertEqual(string[4:8], new_string[4:8]) # value part
self.assertEqual(string[8], new_string[8]) # checksum part
self.assertEqual(string, new_string)
if __name__ == '__main__':
unittest.main()
|
lgpl-3.0
| -1,073,172,985,004,600,400
| 29.516129
| 112
| 0.612711
| false
| 3.664891
| true
| false
| false
|
liesbethvanherpe/NeuroM
|
neurom/fst/__init__.py
|
1
|
6830
|
# Copyright (c) 2015, Ecole Polytechnique Federale de Lausanne, Blue Brain Project
# All rights reserved.
#
# This file is part of NeuroM <https://github.com/BlueBrain/NeuroM>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of
# its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
''' NeuroM, lightweight and fast
Examples:
Obtain some morphometrics
>>> ap_seg_len = fst.get('segment_lengths', nrn, neurite_type=neurom.APICAL_DENDRITE)
>>> ax_sec_len = fst.get('section_lengths', nrn, neurite_type=neurom.AXON)
'''
import numpy as _np
from . import _neuritefunc as _nrt
from . import _neuronfunc as _nrn
from ._core import FstNeuron
from ..core import NeuriteType as _ntype
from ..core import iter_neurites as _ineurites
from ..core.types import tree_type_checker as _is_type
from ..exceptions import NeuroMError
NEURITEFEATURES = {
'total_length': _nrt.total_length,
'total_length_per_neurite': _nrt.total_length_per_neurite,
'neurite_lengths': _nrt.total_length_per_neurite,
'terminal_path_lengths_per_neurite': _nrt.terminal_path_lengths_per_neurite,
'section_lengths': _nrt.section_lengths,
'section_term_lengths': _nrt.section_term_lengths,
'section_bif_lengths': _nrt.section_bif_lengths,
'neurite_volumes': _nrt.total_volume_per_neurite,
'neurite_volume_density': _nrt.neurite_volume_density,
'section_volumes': _nrt.section_volumes,
'section_areas': _nrt.section_areas,
'section_tortuosity': _nrt.section_tortuosity,
'section_path_distances': _nrt.section_path_lengths,
'number_of_sections': _nrt.number_of_sections,
'number_of_sections_per_neurite': _nrt.number_of_sections_per_neurite,
'number_of_neurites': _nrt.number_of_neurites,
'number_of_bifurcations': _nrt.number_of_bifurcations,
'number_of_forking_points': _nrt.number_of_forking_points,
'number_of_terminations': _nrt.number_of_terminations,
'section_branch_orders': _nrt.section_branch_orders,
'section_term_branch_orders': _nrt.section_term_branch_orders,
'section_bif_branch_orders': _nrt.section_bif_branch_orders,
'section_radial_distances': _nrt.section_radial_distances,
'local_bifurcation_angles': _nrt.local_bifurcation_angles,
'remote_bifurcation_angles': _nrt.remote_bifurcation_angles,
'partition': _nrt.bifurcation_partitions,
'partition_asymmetry': _nrt.partition_asymmetries,
'number_of_segments': _nrt.number_of_segments,
'segment_lengths': _nrt.segment_lengths,
'segment_volumes': _nrt.segment_volumes,
'segment_radii': _nrt.segment_radii,
'segment_midpoints': _nrt.segment_midpoints,
'segment_taper_rates': _nrt.segment_taper_rates,
'segment_radial_distances': _nrt.segment_radial_distances,
'segment_meander_angles': _nrt.segment_meander_angles,
'principal_direction_extents': _nrt.principal_direction_extents,
'total_area_per_neurite': _nrt.total_area_per_neurite,
}
NEURONFEATURES = {
'soma_radii': _nrn.soma_radii,
'soma_surface_areas': _nrn.soma_surface_areas,
'trunk_origin_radii': _nrn.trunk_origin_radii,
'trunk_origin_azimuths': _nrn.trunk_origin_azimuths,
'trunk_origin_elevations': _nrn.trunk_origin_elevations,
'trunk_section_lengths': _nrn.trunk_section_lengths,
'sholl_frequency': _nrn.sholl_frequency,
}
def register_neurite_feature(name, func):
'''Register a feature to be applied to neurites
Parameters:
name: name of the feature, used for access via get() function.
func: single parameter function of a neurite.
'''
if name in NEURITEFEATURES:
raise NeuroMError('Attempt to hide registered feature %s', name)
def _fun(neurites, neurite_type=_ntype.all):
'''Wrap neurite function from outer scope and map into list'''
return list(func(n) for n in _ineurites(neurites, filt=_is_type(neurite_type)))
NEURONFEATURES[name] = _fun
def get(feature, obj, **kwargs):
'''Obtain a feature from a set of morphology objects
Parameters:
feature(string): feature to extract
obj: a neuron, population or neurite tree
**kwargs: parameters to forward to underlying worker functions
Returns:
features as a 1D or 2D numpy array.
'''
feature = (NEURITEFEATURES[feature] if feature in NEURITEFEATURES
else NEURONFEATURES[feature])
return _np.array(list(feature(obj, **kwargs)))
_INDENT = ' ' * 4
def _indent(string, count):
'''indent `string` by `count` * INDENT'''
indent = _INDENT * count
ret = indent + string.replace('\n', '\n' + indent)
return ret.rstrip()
def _get_doc():
'''Get a description of all the known available features'''
def get_docstring(func):
'''extract doctstring, if possible'''
docstring = ':\n'
if func.__doc__:
docstring += _indent(func.__doc__, 2)
return docstring
ret = ['\nNeurite features (neurite, neuron, neuron population):']
ret.extend(_INDENT + '- ' + feature + get_docstring(func)
for feature, func in sorted(NEURITEFEATURES.items()))
ret.append('\nNeuron features (neuron, neuron population):')
ret.extend(_INDENT + '- ' + feature + get_docstring(func)
for feature, func in sorted(NEURONFEATURES.items()))
return '\n'.join(ret)
get.__doc__ += _indent('\nFeatures:\n', 1) + _indent(_get_doc(), 2) # pylint: disable=no-member
|
bsd-3-clause
| 317,015,164,251,236,800
| 39.898204
| 96
| 0.701611
| false
| 3.381188
| false
| false
| false
|
denys-duchier/kivy
|
kivy/uix/filechooser.py
|
1
|
32541
|
'''
FileChooser
===========
.. versionadded:: 1.0.5
.. versionchanged:: 1.2.0
In the chooser template, the `controller` is not a direct reference anymore
but a weak-reference.
You must update all the notation `root.controller.xxx` to
`root.controller().xxx`.
Simple example
--------------
main.py
.. include:: ../../examples/RST_Editor/main.py
:literal:
editor.kv
.. highlight:: kv
.. include:: ../../examples/RST_Editor/editor.kv
:literal:
'''
__all__ = ('FileChooserListView', 'FileChooserIconView',
'FileChooserListLayout', 'FileChooserIconLayout',
'FileChooser', 'FileChooserController',
'FileChooserProgressBase', 'FileSystemAbstract',
'FileSystemLocal')
from weakref import ref
from time import time
from kivy.compat import string_types
from kivy.factory import Factory
from kivy.clock import Clock
from kivy.lang import Builder
from kivy.logger import Logger
from kivy.utils import platform as core_platform
from kivy.uix.floatlayout import FloatLayout
from kivy.uix.relativelayout import RelativeLayout
from kivy.uix.screenmanager import ScreenManager, Screen
from kivy.properties import (
StringProperty, ListProperty, BooleanProperty, ObjectProperty,
NumericProperty, OptionProperty, AliasProperty)
from os import listdir
from os.path import (
basename, join, sep, normpath, expanduser, altsep,
splitdrive, realpath, getsize, isdir, abspath, pardir)
from fnmatch import fnmatch
import collections
platform = core_platform
filesize_units = ('B', 'KB', 'MB', 'GB', 'TB')
_have_win32file = False
if platform == 'win':
# Import that module here as it's not available on non-windows machines.
# See http://bit.ly/i9klJE except that the attributes are defined in
# win32file not win32com (bug on page).
# Note: For some reason this doesn't work after a os.chdir(), no matter to
# what directory you change from where. Windows weirdness.
try:
from win32file import FILE_ATTRIBUTE_HIDDEN, GetFileAttributesExW, error
_have_win32file = True
except ImportError:
Logger.error('filechooser: win32file module is missing')
Logger.error('filechooser: we cant check if a file is hidden or not')
def alphanumeric_folders_first(files, filesystem):
return (sorted(f for f in files if filesystem.is_dir(f)) +
sorted(f for f in files if not filesystem.is_dir(f)))
class FileSystemAbstract(object):
'''Class for implementing a File System view that can be used with the
:class:`FileChooser`.:attr:`~FileChooser.file_system`.
.. versionadded:: 1.8.0
'''
def listdir(self, fn):
'''Return the list of files in the directory `fn`
'''
pass
def getsize(self, fn):
'''Return the size in bytes of a file
'''
pass
def is_hidden(self, fn):
'''Return True if the file is hidden
'''
pass
def is_dir(self, fn):
'''Return True if the argument passed to this method is a directory
'''
pass
class FileSystemLocal(FileSystemAbstract):
'''Implementation of :class:`FileSystemAbstract` for local files
.. versionadded:: 1.8.0
'''
def listdir(self, fn):
return listdir(fn)
def getsize(self, fn):
return getsize(fn)
def is_hidden(self, fn):
if platform == 'win':
if not _have_win32file:
return False
try:
return GetFileAttributesExW(fn)[0] & FILE_ATTRIBUTE_HIDDEN
except error:
# This error can occured when a file is already accessed by
# someone else. So don't return to True, because we have lot
# of chances to not being able to do anything with it.
Logger.exception('unable to access to <%s>' % fn)
return True
return basename(fn).startswith('.')
def is_dir(self, fn):
return isdir(fn)
class FileChooserProgressBase(FloatLayout):
'''Base for implementing a progress view. This view is used when too many
entries need to be created and are delayed over multiple frames.
.. versionadded:: 1.2.0
'''
path = StringProperty('')
'''Current path of the FileChooser, read-only.
'''
index = NumericProperty(0)
'''Current index of :attr:`total` entries to be loaded.
'''
total = NumericProperty(1)
'''Total number of entries to load.
'''
def cancel(self, *largs):
'''Cancel any action from the FileChooserController.
'''
if self.parent:
self.parent.cancel()
def on_touch_down(self, touch):
if self.collide_point(*touch.pos):
super(FileChooserProgressBase, self).on_touch_down(touch)
return True
def on_touch_move(self, touch):
if self.collide_point(*touch.pos):
super(FileChooserProgressBase, self).on_touch_move(touch)
return True
def on_touch_up(self, touch):
if self.collide_point(*touch.pos):
super(FileChooserProgressBase, self).on_touch_up(touch)
return True
class FileChooserProgress(FileChooserProgressBase):
pass
class FileChooserLayout(FloatLayout):
'''Base class for file chooser layouts.
.. versionadded:: 1.9.0
'''
VIEWNAME = 'undefined'
__events__ = ('on_entry_added', 'on_entries_cleared',
'on_subentry_to_entry', 'on_remove_subentry', 'on_submit')
controller = ObjectProperty()
'''
Reference to the controller handling this layout.
:class:`~kivy.properties.ObjectProperty`
'''
def on_entry_added(self, node, parent=None):
pass
def on_entries_cleared(self):
pass
def on_subentry_to_entry(self, subentry, entry):
pass
def on_remove_subentry(self, subentry, entry):
pass
def on_submit(self, selected, touch=None):
pass
class FileChooserListLayout(FileChooserLayout):
'''File chooser layout using a list view.
.. versionadded:: 1.9.0
'''
VIEWNAME = 'list'
_ENTRY_TEMPLATE = 'FileListEntry'
def __init__(self, **kwargs):
super(FileChooserListLayout, self).__init__(**kwargs)
self.fast_bind('on_entries_cleared', self.scroll_to_top)
def scroll_to_top(self, *args):
self.ids.scrollview.scroll_y = 1.0
class FileChooserIconLayout(FileChooserLayout):
'''File chooser layout using an icon view.
.. versionadded:: 1.9.0
'''
VIEWNAME = 'icon'
_ENTRY_TEMPLATE = 'FileIconEntry'
def __init__(self, **kwargs):
super(FileChooserIconLayout, self).__init__(**kwargs)
self.fast_bind('on_entries_cleared', self.scroll_to_top)
def scroll_to_top(self, *args):
self.ids.scrollview.scroll_y = 1.0
class FileChooserController(RelativeLayout):
'''Base for implementing a FileChooser. Don't use this class directly, but
prefer using an implementation such as the :class:`FileChooser`,
:class:`FileChooserListView` or :class:`FileChooserIconView`.
.. versionchanged:: 1.9.0
:Events:
`on_entry_added`: entry, parent
Fired when a root-level entry is added to the file list.
`on_entries_cleared`
Fired when the the entries list is cleared, usually when the
root is refreshed.
`on_subentry_to_entry`: entry, parent
Fired when a sub-entry is added to an existing entry.
Fired when entries are removed from an entry, usually when
a node is closed.
`on_submit`: selection, touch
Fired when a file has been selected with a double-tap.
'''
_ENTRY_TEMPLATE = None
layout = ObjectProperty(baseclass=FileChooserLayout)
'''
Reference to the layout widget instance.
layout is an :class:`~kivy.properties.ObjectProperty`.
.. versionadded:: 1.9.0
'''
path = StringProperty(u'/')
'''
:class:`~kivy.properties.StringProperty`, defaults to the current working
directory as a unicode string. It specifies the path on the filesystem that
this controller should refer to.
.. warning::
If a unicode path is specified, all the files returned will be in
unicode allowing the display of unicode files and paths. If a bytes
path is specified, only files and paths with ascii names will be
displayed properly: non-ascii filenames will be displayed and listed
with questions marks (?) instead of their unicode characters.
'''
filters = ListProperty([])
''':class:`~kivy.properties.ListProperty`, defaults to [], equal to '\*'.
Specifies the filters to be applied to the files in the directory.
The filters are not reset when the path changes. You need to do that
yourself if desired.
There are two kinds of filters: patterns and callbacks.
#. Patterns
e.g. ['\*.png'].
You can use the following patterns:
========== =================================
Pattern Meaning
========== =================================
\* matches everything
? matches any single character
[seq] matches any character in seq
[!seq] matches any character not in seq
========== =================================
#. Callbacks
You can specify a function that will be called for each file. The
callback will be passed the folder and file name as the first
and second parameters respectively. It should return True to
indicate a match and False otherwise.
.. versionchanged:: 1.4.0
If the filter is a callable (function or method), it will be called
with the path and the file name as arguments for each file in the
directory.
The callable should returns True to indicate a match and False
overwise.
'''
filter_dirs = BooleanProperty(False)
'''
:class:`~kivy.properties.BooleanProperty`, defaults to False.
Indicates whether filters should also apply to directories.
'''
sort_func = ObjectProperty(alphanumeric_folders_first)
'''
:class:`~kivy.properties.ObjectProperty`.
Provides a function to be called with a list of filenames, and the
filesystem implementation as the second argument.
Returns a list of filenames sorted for display in the view.
.. versionchanged:: 1.8.0
The signature needs now 2 arguments: first the list of files,
second the filesystem class to use.
'''
files = ListProperty([])
'''
Read-only :class:`~kivy.properties.ListProperty`.
The list of files in the directory specified by path after applying the
filters.
'''
show_hidden = BooleanProperty(False)
'''
:class:`~kivy.properties.BooleanProperty`, defaults to False.
Determines whether hidden files and folders should be shown.
'''
selection = ListProperty([])
'''
Read-only :class:`~kivy.properties.ListProperty`.
Contains the list of files that are currently selected.
'''
multiselect = BooleanProperty(False)
'''
:class:`~kivy.properties.BooleanProperty`, defaults to False.
Determines whether the user is able to select multiple files or not.
'''
dirselect = BooleanProperty(False)
'''
:class:`~kivy.properties.BooleanProperty`, defaults to False.
Determines whether directories are valid selections or not.
.. versionadded:: 1.1.0
'''
rootpath = StringProperty(None, allownone=True)
'''
Root path to use instead of the system root path. If set, it will not show
a ".." directory to go up to the root path. For example, if you set
rootpath to /users/foo, the user will be unable to go to /users or to any
other directory not starting with /users/foo.
.. versionadded:: 1.2.0
:class:`~kivy.properties.StringProperty`, defaults to None.
.. note::
Similar to :attr:`path`, if `rootpath` is specified, whether it's a
bytes or unicode string determines the type of the filenames and paths
read.
'''
progress_cls = ObjectProperty(FileChooserProgress)
'''Class to use for displaying a progress indicator for filechooser
loading.
.. versionadded:: 1.2.0
:class:`~kivy.properties.ObjectProperty`, defaults to
:class:`FileChooserProgress`.
.. versionchanged:: 1.8.0
If you set a string, the :class:`~kivy.factory.Factory` will be used to
resolve the class.
'''
file_encodings = ListProperty(['utf-8', 'latin1', 'cp1252'])
'''Possible encodings for decoding a filename to unicode. In the case that
the user has a weird filename, undecodable without knowing it's
initial encoding, we have no other choice than to guess it.
Please note that if you encounter an issue because of a missing encoding
here, we'll be glad to add it to this list.
.. versionadded:: 1.3.0
.. deprecated:: 1.8.0
This property is no longer used as the filechooser no longer decodes
the file names.
file_encodings is a :class:`~kivy.properties.ListProperty` and defaults to
['utf-8', 'latin1', 'cp1252'],
'''
file_system = ObjectProperty(FileSystemLocal(),
baseclass=FileSystemAbstract)
'''Implementation to access the file system. Must be an instance of
FileSystemAbstract.
.. versionadded:: 1.8.0
:class:`~kivy.properties.ObjectProperty`, defaults to
:class:`FileSystemLocal()`
'''
__events__ = ('on_entry_added', 'on_entries_cleared',
'on_subentry_to_entry', 'on_remove_subentry', 'on_submit')
def __init__(self, **kwargs):
self._progress = None
super(FileChooserController, self).__init__(**kwargs)
self._items = []
fbind = self.fast_bind
fbind('selection', self._update_item_selection)
self._previous_path = [self.path]
fbind('path', self._save_previous_path)
update = self._trigger_update
fbind('path', update)
fbind('filters', update)
fbind('rootpath', update)
update()
def on_touch_down(self, touch):
# don't respond to touchs outside self
if not self.collide_point(*touch.pos):
return
if self.disabled:
return True
return super(FileChooserController, self).on_touch_down(touch)
def on_touch_up(self, touch):
# don't respond to touchs outside self
if not self.collide_point(*touch.pos):
return
if self.disabled:
return True
return super(FileChooserController, self).on_touch_up(touch)
def _update_item_selection(self, *args):
for item in self._items:
item.selected = item.path in self.selection
def _save_previous_path(self, instance, value):
self._previous_path.append(value)
self._previous_path = self._previous_path[-2:]
def _trigger_update(self, *args):
Clock.unschedule(self._update_files)
Clock.schedule_once(self._update_files)
def on_entry_added(self, node, parent=None):
if self.layout:
self.layout.dispatch('on_entry_added', node, parent)
def on_entries_cleared(self):
if self.layout:
self.layout.dispatch('on_entries_cleared')
def on_subentry_to_entry(self, subentry, entry):
if self.layout:
self.layout.dispatch('on_subentry_to_entry', subentry, entry)
def on_remove_subentry(self, subentry, entry):
if self.layout:
self.layout.dispatch('on_remove_subentry', subentry, entry)
def on_submit(self, selected, touch=None):
if self.layout:
self.layout.dispatch('on_submit', selected, touch)
def entry_touched(self, entry, touch):
'''(internal) This method must be called by the template when an entry
is touched by the user.
'''
if (
'button' in touch.profile and touch.button in (
'scrollup', 'scrolldown', 'scrollleft', 'scrollright')):
return False
_dir = self.file_system.is_dir(entry.path)
dirselect = self.dirselect
if _dir and dirselect and touch.is_double_tap:
self.open_entry(entry)
return
if self.multiselect:
if entry.path in self.selection:
self.selection.remove(entry.path)
else:
if _dir and not self.dirselect:
self.open_entry(entry)
return
self.selection.append(entry.path)
else:
if _dir and not self.dirselect:
self.open_entry
return
self.selection = [entry.path, ]
def entry_released(self, entry, touch):
'''(internal) This method must be called by the template when an entry
is touched by the user.
.. versionadded:: 1.1.0
'''
if (
'button' in touch.profile and touch.button in (
'scrollup', 'scrolldown', 'scrollleft', 'scrollright')):
return False
if not self.multiselect:
if self.file_system.is_dir(entry.path) and not self.dirselect:
self.open_entry(entry)
elif touch.is_double_tap:
if self.dirselect and self.file_system.is_dir(entry.path):
self.open_entry(entry)
else:
self.dispatch('on_submit', self.selection, touch)
def open_entry(self, entry):
try:
# Just check if we can list the directory. This is also what
# _add_file does, so if it fails here, it would also fail later
# on. Do the check here to prevent setting path to an invalid
# directory that we cannot list.
self.file_system.listdir(entry.path)
except OSError:
entry.locked = True
else:
# If entry.path is to jump to previous directory, update path with
# parent directory
self.path = abspath(join(self.path, entry.path))
self.selection = []
def _apply_filters(self, files):
if not self.filters:
return files
filtered = []
for filt in self.filters:
if isinstance(filt, collections.Callable):
filtered.extend([fn for fn in files if filt(self.path, fn)])
else:
filtered.extend([fn for fn in files if fnmatch(fn, filt)])
if not self.filter_dirs:
dirs = [fn for fn in files if self.file_system.is_dir(fn)]
filtered.extend(dirs)
return list(set(filtered))
def get_nice_size(self, fn):
'''Pass the filepath. Returns the size in the best human readable
format or '' if it is a directory (Don't recursively calculate size.).
'''
if self.file_system.is_dir(fn):
return ''
try:
size = self.file_system.getsize(fn)
except OSError:
return '--'
for unit in filesize_units:
if size < 1024.0:
return '%1.0f %s' % (size, unit)
size /= 1024.0
def _update_files(self, *args, **kwargs):
# trigger to start gathering the files in the new directory
# we'll start a timer that will do the job, 10 times per frames
# (default)
self._gitems = []
self._gitems_parent = kwargs.get('parent', None)
self._gitems_gen = self._generate_file_entries(
path=kwargs.get('path', self.path),
parent=self._gitems_parent)
# cancel any previous clock if exist
Clock.unschedule(self._create_files_entries)
# show the progression screen
self._hide_progress()
if self._create_files_entries():
# not enough for creating all the entries, all a clock to continue
# start a timer for the next 100 ms
Clock.schedule_interval(self._create_files_entries, .1)
def _get_file_paths(self, items):
return [file.path for file in items]
def _create_files_entries(self, *args):
# create maximum entries during 50ms max, or 10 minimum (slow system)
# (on a "fast system" (core i7 2700K), we can create up to 40 entries
# in 50 ms. So 10 is fine for low system.
start = time()
finished = False
index = total = count = 1
while time() - start < 0.05 or count < 10:
try:
index, total, item = next(self._gitems_gen)
self._gitems.append(item)
count += 1
except StopIteration:
finished = True
break
except TypeError: # in case _gitems_gen is None
finished = True
break
# if this wasn't enough for creating all the entries, show a progress
# bar, and report the activity to the user.
if not finished:
self._show_progress()
self._progress.total = total
self._progress.index = index
return True
# we created all the files, now push them on the view
self._items = items = self._gitems
parent = self._gitems_parent
if parent is None:
self.dispatch('on_entries_cleared')
for entry in items:
self.dispatch('on_entry_added', entry, parent)
else:
parent.entries[:] = items
for entry in items:
self.dispatch('on_subentry_to_entry', entry, parent)
self.files[:] = self._get_file_paths(items)
# stop the progression / creation
self._hide_progress()
self._gitems = None
self._gitems_gen = None
Clock.unschedule(self._create_files_entries)
return False
def cancel(self, *largs):
'''Cancel any background action started by filechooser, such as loading
a new directory.
.. versionadded:: 1.2.0
'''
Clock.unschedule(self._create_files_entries)
self._hide_progress()
if len(self._previous_path) > 1:
# if we cancel any action, the path will be set same as the
# previous one, so we can safely cancel the update of the previous
# path.
self.path = self._previous_path[-2]
Clock.unschedule(self._update_files)
def _show_progress(self):
if self._progress:
return
cls = self.progress_cls
if isinstance(cls, string_types):
cls = Factory.get(cls)
self._progress = cls(path=self.path)
self._progress.value = 0
self.add_widget(self._progress)
def _hide_progress(self):
if self._progress:
self.remove_widget(self._progress)
self._progress = None
def _generate_file_entries(self, *args, **kwargs):
# Generator that will create all the files entries.
# the generator is used via _update_files() and _create_files_entries()
# don't use it directly.
is_root = False
path = kwargs.get('path', self.path)
have_parent = kwargs.get('parent', None) is not None
# Add the components that are always needed
if self.rootpath:
rootpath = realpath(self.rootpath)
path = realpath(path)
if not path.startswith(rootpath):
self.path = rootpath
return
elif path == rootpath:
is_root = True
else:
if platform == 'win':
is_root = splitdrive(path)[1] in (sep, altsep)
elif platform in ('macosx', 'linux', 'android', 'ios'):
is_root = normpath(expanduser(path)) == sep
else:
# Unknown fs, just always add the .. entry but also log
Logger.warning('Filechooser: Unsupported OS: %r' % platform)
# generate an entries to go back to previous
if not is_root and not have_parent:
back = '..' + sep
pardir = self._create_entry_widget(dict(
name=back, size='', path=back, controller=ref(self),
isdir=True, parent=None, sep=sep, get_nice_size=lambda: ''))
yield 0, 1, pardir
# generate all the entries for files
try:
for index, total, item in self._add_files(path):
yield index, total, item
except OSError:
Logger.exception('Unable to open directory <%s>' % self.path)
self.files[:] = []
def _create_entry_widget(self, ctx):
template = self.layout._ENTRY_TEMPLATE\
if self.layout else self._ENTRY_TEMPLATE
return Builder.template(template, **ctx)
def _add_files(self, path, parent=None):
path = expanduser(path)
files = []
fappend = files.append
for f in self.file_system.listdir(path):
try:
# In the following, use fully qualified filenames
fappend(normpath(join(path, f)))
except UnicodeDecodeError:
Logger.exception('unable to decode <{}>'.format(f))
except UnicodeEncodeError:
Logger.exception('unable to encode <{}>'.format(f))
# Apply filename filters
files = self._apply_filters(files)
# Sort the list of files
files = self.sort_func(files, self.file_system)
is_hidden = self.file_system.is_hidden
if not self.show_hidden:
files = [x for x in files if not is_hidden(x)]
self.files[:] = files
total = len(files)
wself = ref(self)
for index, fn in enumerate(files):
def get_nice_size():
# Use a closure for lazy-loading here
return self.get_nice_size(fn)
ctx = {'name': basename(fn),
'get_nice_size': get_nice_size,
'path': fn,
'controller': wself,
'isdir': self.file_system.is_dir(fn),
'parent': parent,
'sep': sep}
entry = self._create_entry_widget(ctx)
yield index, total, entry
def entry_subselect(self, entry):
if not self.file_system.is_dir(entry.path):
return
self._update_files(path=entry.path, parent=entry)
def close_subselection(self, entry):
for subentry in entry.entries:
self.dispatch('on_remove_subentry', subentry, entry)
class FileChooserListView(FileChooserController):
'''Implementation of :class:`FileChooserController` using a list view.
.. versionadded:: 1.9.0
'''
_ENTRY_TEMPLATE = 'FileListEntry'
class FileChooserIconView(FileChooserController):
'''Implementation of :class:`FileChooserController` using an icon view.
.. versionadded:: 1.9.0
'''
_ENTRY_TEMPLATE = 'FileIconEntry'
class FileChooser(FileChooserController):
'''Implementation of :class:`FileChooserController` which supports
switching between multiple, synced layout views.
.. versionadded:: 1.9.0
'''
manager = ObjectProperty()
'''
Reference to the :class:`~kivy.uix.screenmanager.ScreenManager` instance.
:class:`~kivy.properties.ObjectProperty`
'''
_view_list = ListProperty()
def get_view_list(self):
return self._view_list
view_list = AliasProperty(get_view_list, bind=('_view_list',))
'''
List of views added to this FileChooser.
:class:`~kivy.properties.AliasProperty` of type :class:`list`.
'''
_view_mode = StringProperty()
def get_view_mode(self):
return self._view_mode
def set_view_mode(self, mode):
if mode not in self._view_list:
raise ValueError('unknown view mode %r' % mode)
self._view_mode = mode
view_mode = AliasProperty(
get_view_mode, set_view_mode, bind=('_view_mode',))
'''
Current layout view mode.
:class:`~kivy.properties.AliasProperty` of type :class:`str`.
'''
@property
def _views(self):
return [screen.children[0] for screen in self.manager.screens]
def __init__(self, **kwargs):
super(FileChooser, self).__init__(**kwargs)
self.manager = ScreenManager()
super(FileChooser, self).add_widget(self.manager)
self.trigger_update_view = Clock.create_trigger(self.update_view)
self.fast_bind('view_mode', self.trigger_update_view)
def add_widget(self, widget, **kwargs):
if widget is self._progress:
super(FileChooser, self).add_widget(widget, **kwargs)
elif hasattr(widget, 'VIEWNAME'):
name = widget.VIEWNAME + 'view'
screen = Screen(name=name)
widget.controller = self
screen.add_widget(widget)
self.manager.add_widget(screen)
self.trigger_update_view()
else:
raise ValueError(
'widget must be a FileChooserLayout,'
' not %s' % type(widget).__name__)
def rebuild_views(self):
views = [view.VIEWNAME for view in self._views]
if views != self._view_list:
self._view_list = views
if self._view_mode not in self._view_list:
self._view_mode = self._view_list[0]
self._trigger_update()
def update_view(self, *args):
self.rebuild_views()
sm = self.manager
viewlist = self._view_list
view = self.view_mode
current = sm.current[:-4]
viewindex = viewlist.index(view) if view in viewlist else 0
currentindex = viewlist.index(current) if current in viewlist else 0
direction = 'left' if currentindex < viewindex else 'right'
sm.transition.direction = direction
sm.current = view + 'view'
def _create_entry_widget(self, ctx):
return [Builder.template(view._ENTRY_TEMPLATE, **ctx)
for view in self._views]
def _get_file_paths(self, items):
if self._views:
return [file[0].path for file in items]
return []
def _update_item_selection(self, *args):
for viewitem in self._items:
selected = viewitem[0].path in self.selection
for item in viewitem:
item.selected = selected
def on_entry_added(self, node, parent=None):
for index, view in enumerate(self._views):
view.dispatch(
'on_entry_added',
node[index], parent[index] if parent else None)
def on_entries_cleared(self):
for view in self._views:
view.dispatch('on_entries_cleared')
def on_subentry_to_entry(self, subentry, entry):
for index, view in enumerate(self._views):
view.dispatch('on_subentry_to_entry', subentry[index], entry)
def on_remove_subentry(self, subentry, entry):
for index, view in enumerate(self._views):
view.dispatch('on_remove_subentry', subentry[index], entry)
def on_submit(self, selected, touch=None):
view_mode = self.view_mode
for view in self._views:
if view_mode == view.VIEWNAME:
view.dispatch('on_submit', selected, touch)
return
if __name__ == '__main__':
from kivy.app import App
from pprint import pprint
import textwrap
import sys
root = Builder.load_string(textwrap.dedent('''\
BoxLayout:
orientation: 'vertical'
BoxLayout:
size_hint_y: None
height: sp(52)
Button:
text: 'Icon View'
on_press: fc.view_mode = 'icon'
Button:
text: 'List View'
on_press: fc.view_mode = 'list'
FileChooser:
id: fc
FileChooserIconLayout
FileChooserListLayout
'''))
class FileChooserApp(App):
def build(self):
v = root.ids.fc
if len(sys.argv) > 1:
v.path = sys.argv[1]
v.bind(selection=lambda *x: pprint("selection: %s" % x[1:]))
v.bind(path=lambda *x: pprint("path: %s" % x[1:]))
return root
FileChooserApp().run()
|
mit
| -4,341,889,288,043,636,700
| 31.476048
| 80
| 0.598414
| false
| 4.19505
| false
| false
| false
|
patjouk/djangogirls
|
jobs/migrations/0004_auto_20150712_1803.py
|
1
|
3951
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('jobs', '0003_auto_20150510_1707'),
]
operations = [
migrations.RemoveField(
model_name='job',
name='reviewers_comment',
),
migrations.RemoveField(
model_name='meetup',
name='reviewers_comment',
),
migrations.AddField(
model_name='job',
name='internal_comment',
field=models.TextField(null=True, blank=True, help_text="Write you comments here. They won't be sent to the company/organisation."),
),
migrations.AddField(
model_name='job',
name='message_to_organisation',
field=models.TextField(null=True, blank=True, help_text='Write your message to the company/organisation here.'),
),
migrations.AddField(
model_name='meetup',
name='internal_comment',
field=models.TextField(null=True, blank=True, help_text="Write you comments here. They won't be sent to the company/organisation."),
),
migrations.AddField(
model_name='meetup',
name='message_to_organisation',
field=models.TextField(null=True, blank=True, help_text='Write your message to the company/organisation here.'),
),
migrations.AlterField(
model_name='job',
name='expiration_date',
field=models.DateField(null=True, blank=True, help_text='Automatically is set 60 days from posting. You can override this.'),
),
migrations.AlterField(
model_name='job',
name='review_status',
field=models.CharField(choices=[('OPN', 'Open'), ('URE', 'Under review'), ('RTP', 'Ready to publish'), ('REJ', 'Rejected'), ('PUB', 'Published')], max_length=3, default='OPN'),
),
migrations.AlterField(
model_name='job',
name='website',
field=models.URLField(null=True, blank=True, help_text='Link to your offer or company website.'),
),
migrations.AlterField(
model_name='meetup',
name='expiration_date',
field=models.DateField(null=True, blank=True, help_text='Automatically is set 60 days from posting. You can override this.'),
),
migrations.AlterField(
model_name='meetup',
name='meetup_end_date',
field=models.DateTimeField(null=True, blank=True, help_text='Date format: YYYY-MM-DD'),
),
migrations.AlterField(
model_name='meetup',
name='meetup_start_date',
field=models.DateTimeField(null=True, help_text='If this is a recurring meetup/event, please enter a start date. Date format: YYYY-MM-DD'),
),
migrations.AlterField(
model_name='meetup',
name='meetup_type',
field=models.CharField(choices=[('MEET', 'meetup'), ('CONF', 'conference'), ('WORK', 'workshop')], max_length=4, default='MEET'),
),
migrations.AlterField(
model_name='meetup',
name='recurrence',
field=models.CharField(null=True, blank=True, max_length=255, help_text='Provide details of recurrence if applicable.'),
),
migrations.AlterField(
model_name='meetup',
name='review_status',
field=models.CharField(choices=[('OPN', 'Open'), ('URE', 'Under review'), ('RTP', 'Ready to publish'), ('REJ', 'Rejected'), ('PUB', 'Published')], max_length=3, default='OPN'),
),
migrations.AlterField(
model_name='meetup',
name='website',
field=models.URLField(null=True, blank=True, help_text='Link to your meetup or organisation website.'),
),
]
|
bsd-3-clause
| 2,459,570,391,821,128,700
| 41.945652
| 188
| 0.576563
| false
| 4.203191
| false
| false
| false
|
cleberzavadniak/canivett
|
canivett/tree.py
|
1
|
6306
|
import os
import errno
import logging
import imp
from fuse import Operations, FuseOSError
class Tree(Operations):
"""
Most of this class is based on the work of Stavros Korokithakis:
https://www.stavros.io/posts/python-fuse-filesystem/
;-)
"""
def __init__(self, base):
self.logger = logging.getLogger(self.__class__.__name__)
self.base = base
self.modules = {}
self.virtual_tree = {}
self.verbose = True
self.create_root()
self.load_config()
def destroy(self, path):
for module_instance in self.virtual_tree.values():
module_instance.destroy()
# Helpers:
def create_root(self):
root_path = os.path.join(self.base, 'root')
if not os.path.exists(root_path):
os.mkdir(root_path)
def load_config(self):
config_path = os.path.join(self.base, 'canivett.cfg')
with open(config_path) as cfg_file:
for line in cfg_file:
content, *comments = line.split('#') # NOQA
if not content:
continue
path, module_name = content.strip().split('=')
self.mkdirs(path)
module = self.get_module(module_name)
self.virtual_tree[path] = module.Module(self, path)
def mkdirs(self, path):
parts = path.split('/')
current = '/'
for part in parts:
current = os.path.join(current, part)
complete = self.get_real_path(current)
if not os.path.exists(complete):
os.mkdir(complete)
def _do_load_module(self, name):
real_name = 'canivett_' + name
_, path, *_ = imp.find_module(real_name) # NOQA
module = imp.load_package(real_name, path)
self.modules[name] = module
return module
def get_module(self, name):
try:
return self.modules[name]
except KeyError:
return self._do_load_module(name)
def get_real_path(self, path):
real = os.path.join(self.base, 'root', path.strip('/'))
return real
def __call__(self, op, path, *args):
verbose = self.verbose and not (op in ('statfs', 'getattr') and path == '/')
for managed_path, module in self.virtual_tree.items():
if path.startswith(managed_path):
obj = module.root
break
else:
obj = self
f = getattr(obj, op)
try:
result = f(path, *args)
except Exception as ex:
if verbose:
self.logger.info('{}({}) -> {}'.format(op, path, ex))
raise ex
else:
if verbose:
self.logger.info('{}({}) -> {}'.format(op, path, result))
return result
def raise_error(self, error):
raise FuseOSError(error)
# Init
# ==================
def init(self, path):
return 0
# Filesystem methods
# ==================
def access(self, path, mode):
full_path = self.get_real_path(path)
if not os.access(full_path, mode):
raise FuseOSError(errno.EACCES)
def chmod(self, path, mode):
full_path = self.get_real_path(path)
return os.chmod(full_path, mode)
def chown(self, path, uid, gid):
full_path = self.get_real_path(path)
return os.chown(full_path, uid, gid)
def getattr(self, path, fh=None):
full_path = self.get_real_path(path)
st = os.lstat(full_path)
keys = ('st_atime', 'st_ctime', 'st_gid', 'st_mode', 'st_mtime',
'st_nlink', 'st_size', 'st_uid')
return dict((key, getattr(st, key)) for key in keys)
def readdir(self, path, fh):
full_path = self.get_real_path(path)
dirents = ['.', '..']
if os.path.isdir(full_path):
dirents.extend(os.listdir(full_path))
for r in dirents:
yield r
def readlink(self, path):
pathname = os.readlink(self.get_real_path(path))
if pathname.startswith("/"):
# Path name is absolute, sanitize it.
return os.path.relpath(pathname, self.base)
else:
return pathname
def mknod(self, path, mode, dev):
return os.mknod(self.get_real_path(path), mode, dev)
def rmdir(self, path):
full_path = self.get_real_path(path)
return os.rmdir(full_path)
def mkdir(self, path, mode):
return os.mkdir(self.get_real_path(path), mode)
def statfs(self, path):
full_path = self.get_real_path(path)
stv = os.statvfs(full_path)
keys = ('f_bavail', 'f_bfree', 'f_blocks', 'f_bsize', 'f_favail',
'f_ffree', 'f_files', 'f_flag', 'f_frsize', 'f_namemax')
return dict((key, getattr(stv, key)) for key in keys)
def unlink(self, path):
return os.unlink(self.get_real_path(path))
def symlink(self, name, target):
return os.symlink(name, self.get_real_path(target))
def rename(self, old, new):
return os.rename(self.get_real_path(old), self.get_real_path(new))
def link(self, target, name):
return os.link(self.get_real_path(target), self.get_real_path(name))
def utimens(self, path, times=None):
return os.utime(self.get_real_path(path), times)
# File methods
# ============
def open(self, path, flags):
full_path = self.get_real_path(path)
return os.open(full_path, flags)
def create(self, path, mode, fi=None):
full_path = self.get_real_path(path)
return os.open(full_path, os.O_WRONLY | os.O_CREAT, mode)
def read(self, path, length, offset, fh):
os.lseek(fh, offset, os.SEEK_SET)
return os.read(fh, length)
def write(self, path, buf, offset, fh):
os.lseek(fh, offset, os.SEEK_SET)
return os.write(fh, buf)
def truncate(self, path, length, fh=None):
full_path = self.get_real_path(path)
with open(full_path, 'r+') as f:
f.truncate(length)
def flush(self, path, fh):
return os.fsync(fh)
def release(self, path, fh):
return os.close(fh)
def fsync(self, path, fdatasync, fh):
return self.flush(path, fh)
|
gpl-2.0
| -1,390,266,037,243,918,800
| 28.605634
| 84
| 0.553283
| false
| 3.518973
| false
| false
| false
|
MariusWirtz/TM1py
|
TM1py/Services/TM1Service.py
|
1
|
1569
|
import pickle
from TM1py.Services import *
class TM1Service:
""" All features of TM1py are exposed through this service
Can be saved and restored from File, to avoid multiple authentication with TM1.
"""
def __init__(self, **kwargs):
self._tm1_rest = RESTService(**kwargs)
# instantiate all Services
self.chores = ChoreService(self._tm1_rest)
self.cubes = CubeService(self._tm1_rest)
self.dimensions = DimensionService(self._tm1_rest)
self.monitoring = MonitoringService(self._tm1_rest)
self.processes = ProcessService(self._tm1_rest)
self.security = SecurityService(self._tm1_rest)
self.server = ServerService(self._tm1_rest)
self.applications = ApplicationService(self._tm1_rest)
# Deprecated, use cubes.cells instead!
self.data = CellService(self._tm1_rest)
def logout(self):
self._tm1_rest.logout()
def __enter__(self):
return self
def __exit__(self, exception_type, exception_value, traceback):
self.logout()
@property
def whoami(self):
return self.security.get_current_user()
@property
def version(self):
return self._tm1_rest.version
@property
def connection(self):
return self._tm1_rest
def save_to_file(self, file_name):
with open(file_name, 'wb') as file:
pickle.dump(self, file)
@classmethod
def restore_from_file(cls, file_name):
with open(file_name, 'rb') as file:
return pickle.load(file)
|
mit
| -6,446,374,943,573,261,000
| 27.527273
| 83
| 0.631612
| false
| 3.76259
| false
| false
| false
|
sridevikoushik31/nova
|
nova/virt/baremetal/ipmi.py
|
1
|
10006
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# coding=utf-8
# Copyright 2012 Hewlett-Packard Development Company, L.P.
# Copyright (c) 2012 NTT DOCOMO, INC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Baremetal IPMI power manager.
"""
import os
import stat
import tempfile
from oslo.config import cfg
from nova import exception
from nova.openstack.common import log as logging
from nova.openstack.common import loopingcall
from nova import paths
from nova import utils
from nova.virt.baremetal import baremetal_states
from nova.virt.baremetal import base
from nova.virt.baremetal import utils as bm_utils
opts = [
cfg.StrOpt('terminal',
default='shellinaboxd',
help='path to baremetal terminal program'),
cfg.StrOpt('terminal_cert_dir',
default=None,
help='path to baremetal terminal SSL cert(PEM)'),
cfg.StrOpt('terminal_pid_dir',
default=paths.state_path_def('baremetal/console'),
help='path to directory stores pidfiles of baremetal_terminal'),
cfg.IntOpt('ipmi_power_retry',
default=5,
help='maximal number of retries for IPMI operations'),
]
baremetal_group = cfg.OptGroup(name='baremetal',
title='Baremetal Options')
CONF = cfg.CONF
CONF.register_group(baremetal_group)
CONF.register_opts(opts, baremetal_group)
LOG = logging.getLogger(__name__)
def _make_password_file(password):
fd, path = tempfile.mkstemp()
os.fchmod(fd, stat.S_IRUSR | stat.S_IWUSR)
with os.fdopen(fd, "w") as f:
f.write(password)
return path
def _get_console_pid_path(node_id):
name = "%s.pid" % node_id
path = os.path.join(CONF.baremetal.terminal_pid_dir, name)
return path
def _get_console_pid(node_id):
pid_path = _get_console_pid_path(node_id)
if os.path.exists(pid_path):
with open(pid_path, 'r') as f:
pid_str = f.read()
try:
return int(pid_str)
except ValueError:
LOG.warn(_("pid file %s does not contain any pid"), pid_path)
return None
class IPMI(base.PowerManager):
"""IPMI Power Driver for Baremetal Nova Compute
This PowerManager class provides mechanism for controlling the power state
of physical hardware via IPMI calls. It also provides serial console access
where available.
"""
def __init__(self, node, **kwargs):
self.state = None
self.retries = None
self.node_id = node['id']
self.address = node['pm_address']
self.user = node['pm_user']
self.password = node['pm_password']
self.port = node['terminal_port']
if self.node_id == None:
raise exception.InvalidParameterValue(_("Node id not supplied "
"to IPMI"))
if self.address == None:
raise exception.InvalidParameterValue(_("Address not supplied "
"to IPMI"))
if self.user == None:
raise exception.InvalidParameterValue(_("User not supplied "
"to IPMI"))
if self.password == None:
raise exception.InvalidParameterValue(_("Password not supplied "
"to IPMI"))
def _exec_ipmitool(self, command):
args = ['ipmitool',
'-I',
'lanplus',
'-H',
self.address,
'-U',
self.user,
'-f']
pwfile = _make_password_file(self.password)
try:
args.append(pwfile)
args.extend(command.split(" "))
out, err = utils.execute(*args, attempts=3)
LOG.debug(_("ipmitool stdout: '%(out)s', stderr: '%(err)s'"),
locals())
return out, err
finally:
bm_utils.unlink_without_raise(pwfile)
def _power_on(self):
"""Turn the power to this node ON."""
def _wait_for_power_on():
"""Called at an interval until the node's power is on."""
if self.is_power_on():
self.state = baremetal_states.ACTIVE
raise loopingcall.LoopingCallDone()
if self.retries > CONF.baremetal.ipmi_power_retry:
self.state = baremetal_states.ERROR
raise loopingcall.LoopingCallDone()
try:
self.retries += 1
self._exec_ipmitool("power on")
except Exception:
LOG.exception(_("IPMI power on failed"))
self.retries = 0
timer = loopingcall.FixedIntervalLoopingCall(_wait_for_power_on)
timer.start(interval=0.5).wait()
def _power_off(self):
"""Turn the power to this node OFF."""
def _wait_for_power_off():
"""Called at an interval until the node's power is off."""
if self.is_power_on() is False:
self.state = baremetal_states.DELETED
raise loopingcall.LoopingCallDone()
if self.retries > CONF.baremetal.ipmi_power_retry:
self.state = baremetal_states.ERROR
raise loopingcall.LoopingCallDone()
try:
self.retries += 1
self._exec_ipmitool("power off")
except Exception:
LOG.exception(_("IPMI power off failed"))
self.retries = 0
timer = loopingcall.FixedIntervalLoopingCall(_wait_for_power_off)
timer.start(interval=0.5).wait()
def _set_pxe_for_next_boot(self):
try:
self._exec_ipmitool("chassis bootdev pxe")
except Exception:
LOG.exception(_("IPMI set next bootdev failed"))
def activate_node(self):
"""Turns the power to node ON.
Sets node next-boot to PXE and turns the power on,
waiting up to ipmi_power_retry/2 seconds for confirmation
that the power is on.
:returns: One of baremetal_states.py, representing the new state.
"""
if self.is_power_on() and self.state == baremetal_states.ACTIVE:
LOG.warning(_("Activate node called, but node %s "
"is already active") % self.address)
self._set_pxe_for_next_boot()
self._power_on()
return self.state
def reboot_node(self):
"""Cycles the power to a node.
Turns the power off, sets next-boot to PXE, and turns the power on.
Each action waits up to ipmi_power_retry/2 seconds for confirmation
that the power state has changed.
:returns: One of baremetal_states.py, representing the new state.
"""
self._power_off()
self._set_pxe_for_next_boot()
self._power_on()
return self.state
def deactivate_node(self):
"""Turns the power to node OFF.
Turns the power off, and waits up to ipmi_power_retry/2 seconds
for confirmation that the power is off.
:returns: One of baremetal_states.py, representing the new state.
"""
self._power_off()
return self.state
def is_power_on(self):
"""Check if the power is currently on.
:returns: True if on; False if off; None if unable to determine.
"""
# NOTE(deva): string matching based on
# http://ipmitool.cvs.sourceforge.net/
# viewvc/ipmitool/ipmitool/lib/ipmi_chassis.c
res = self._exec_ipmitool("power status")[0]
if res == ("Chassis Power is on\n"):
return True
elif res == ("Chassis Power is off\n"):
return False
return None
def start_console(self):
if not self.port:
return
args = []
args.append(CONF.baremetal.terminal)
if CONF.baremetal.terminal_cert_dir:
args.append("-c")
args.append(CONF.baremetal.terminal_cert_dir)
else:
args.append("-t")
args.append("-p")
args.append(str(self.port))
args.append("--background=%s" % _get_console_pid_path(self.node_id))
args.append("-s")
try:
pwfile = _make_password_file(self.password)
ipmi_args = "/:%(uid)s:%(gid)s:HOME:ipmitool -H %(address)s" \
" -I lanplus -U %(user)s -f %(pwfile)s sol activate" \
% {'uid': os.getuid(),
'gid': os.getgid(),
'address': self.address,
'user': self.user,
'pwfile': pwfile,
}
args.append(ipmi_args)
# Run shellinaboxd without pipes. Otherwise utils.execute() waits
# infinitely since shellinaboxd does not close passed fds.
x = ["'" + arg.replace("'", "'\\''") + "'" for arg in args]
x.append('</dev/null')
x.append('>/dev/null')
x.append('2>&1')
utils.execute(' '.join(x), shell=True)
finally:
bm_utils.unlink_without_raise(pwfile)
def stop_console(self):
console_pid = _get_console_pid(self.node_id)
if console_pid:
# Allow exitcode 99 (RC_UNAUTHORIZED)
utils.execute('kill', '-TERM', str(console_pid),
run_as_root=True,
check_exit_code=[0, 99])
bm_utils.unlink_without_raise(_get_console_pid_path(self.node_id))
|
apache-2.0
| 7,198,640,514,716,515,000
| 33.38488
| 79
| 0.573056
| false
| 3.978529
| false
| false
| false
|
Arthraim/douban2mongo
|
book2mongo.py
|
1
|
2157
|
# coding= utf-8
from bs4 import BeautifulSoup
import codecs
from mongoengine import *
from book import Book
connect('mydouban')
import os
os.chdir("book")
for filename in os.listdir("."):
with codecs.open(filename, "r", "utf-8") as html_file:
soup = BeautifulSoup(html_file.read())
for item in soup.find_all("li", "subject-item"):
# <a href="http://book.douban.com/subject/5992037/" onclick=""moreurl(this,{i:'14'})"" title="为他准备的谋杀">为他准备的谋杀</a>
a_tag = item.find_all("a")[1]
link = a_tag.get('href').encode('UTF-8')
title = a_tag.get('title').encode('UTF-8')
# <div class="pub">蒋峰 / 中信出版社 / 2011-4 / 29.00元</div>
pub = item.find("div", "pub").string.strip().encode('UTF-8')
# <div class="short-note">
# <div>
# <span class="rating4-t"></span>
# <span class="date">2013-12-27 读过</span>
# <span class="tags">标签: 马伯庸 小说 历史 中国 祥瑞御免</span>
# </div>
# <p class="comment">blabla</p>
# </div>
short_note = item.find("div", "short-note")
spans = short_note.div.find_all("span")
rating = spans[0]['class'][0].replace("rating","").replace("-t","")
date = spans[1].string.encode("UTF-8").replace("读过","").strip()
if len(spans) > 2:
tags = spans[2].string.encode("UTF-8").replace("标签:","").strip().split(" ")
comment = short_note.p.string.encode("UTF-8").strip()
print ""
print title, pub, link
print rating, date, tags
print comment
book = Book()
book.title = title
book.pub = pub
book.link = link
book.rating = rating
book.date = date
book.tags = tags
book.comment = comment
try:
book.save()
except NotUniqueError as e:
print e
continue
|
mit
| 4,743,349,884,267,529,000
| 35.982143
| 136
| 0.494447
| false
| 3.251177
| false
| false
| false
|
dahlia/wikidata
|
wikidata/quantity.py
|
1
|
1837
|
""":mod:`wikidata.quantity` --- Quantity
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. versionadded:: 0.7.0
"""
from typing import Optional
from .entity import Entity
__all__ = 'Quantity',
class Quantity:
"""A Quantity value represents a decimal number, together with information
about the uncertainty interval of this number, and a unit of measurement.
"""
amount = None # type: float
lower_bound = None # type: Optional[float]
upper_bound = None # type: Optional[float]
unit = None # type: Optional[Entity]
def __init__(self,
amount: float,
lower_bound: Optional[float],
upper_bound: Optional[float],
unit: Optional[Entity]) -> None:
self.amount = amount
self.lower_bound = lower_bound
self.upper_bound = upper_bound
self.unit = unit
def __eq__(self, other) -> bool:
if not isinstance(other, type(self)):
raise TypeError(
'expected an instance of {0.__module__}.{0.__qualname__}, '
'not {1!r}'.format(type(self), other)
)
return (other.amount == self.amount and
other.lower_bound == self.lower_bound and
other.upper_bound == self.upper_bound and
other.unit == self.unit)
def __hash__(self):
return hash((self.amount,
self.lower_bound,
self.upper_bound,
self.unit))
def __repr__(self) -> str:
return ('{0.__module__}.{0.__qualname__}({1!r}, '
'{2!r}, {3!r}, {4!r})').format(
type(self),
self.amount,
self.lower_bound,
self.upper_bound,
self.unit
)
|
gpl-3.0
| 4,169,294,263,064,760,000
| 30.135593
| 78
| 0.494284
| false
| 4.262181
| false
| false
| false
|
rouge8/hitsearch
|
threadtest/maker.py
|
1
|
1288
|
from time import sleep
import time
import threading
class BoxFiller(threading.Thread):
def __init__(self,parent):
threading.Thread.__init__(self)
self.parent = parent
def run(self):
count = 0
for i in range(30):
sleep(.5)
count += 1
self.parent._box_lock.acquire()
self.parent._box.append(count)
self.parent._box_lock.release()
class Maker:
def __init__(self):
self._box = []
self._boring = range(10)
self._box_lock = threading.Lock()
self.filler = BoxFiller(self)
def go(self):
self.filler.start()
@property
def box(self):
while True:
if len(self._box) == 0 and not self.filler.is_alive():
raise StopIteration
if len(self._box) == 0:
sleep(.05)
continue
self._box_lock.acquire()
tmp = self._box.pop(0)
self._box_lock.release()
yield tmp
@property
def boring(self):
while True and len(self._boring) != 0:
#self._box_lock.acquire()
tmp = self._boring.pop(0)
#self._box_lock.release()
yield tmp
raise StopIteration
|
mit
| 5,738,367,370,088,499,000
| 24.76
| 66
| 0.505435
| false
| 4.012461
| false
| false
| false
|
eigoshimizu/Genomon
|
scripts/genomon_pipeline/dna_pipeline.py
|
1
|
57868
|
import os
import shutil
import glob
from ruffus import *
from genomon_pipeline.config.run_conf import *
from genomon_pipeline.config.genomon_conf import *
from genomon_pipeline.config.sample_conf import *
from genomon_pipeline.dna_resource.bamtofastq import *
from genomon_pipeline.dna_resource.fastq_splitter import *
from genomon_pipeline.dna_resource.bwa_align import *
from genomon_pipeline.dna_resource.markduplicates import *
from genomon_pipeline.dna_resource.mutation_call import *
from genomon_pipeline.dna_resource.mutation_merge import *
from genomon_pipeline.dna_resource.sv_parse import *
from genomon_pipeline.dna_resource.sv_merge import *
from genomon_pipeline.dna_resource.sv_filt import *
from genomon_pipeline.dna_resource.qc_bamstats import *
from genomon_pipeline.dna_resource.qc_coverage import *
from genomon_pipeline.dna_resource.qc_merge import *
from genomon_pipeline.dna_resource.post_analysis import *
from genomon_pipeline.dna_resource.pre_pmsignature import *
from genomon_pipeline.dna_resource.pmsignature import *
from genomon_pipeline.dna_resource.paplot import *
# set task classes
bamtofastq = Bam2Fastq(genomon_conf.get("bam2fastq", "qsub_option"), run_conf.drmaa)
fastq_splitter = Fastq_splitter(genomon_conf.get("split_fastq", "qsub_option"), run_conf.drmaa)
bwa_align = Bwa_align(genomon_conf.get("bwa_mem", "qsub_option"), run_conf.drmaa)
markduplicates = Markduplicates(genomon_conf.get("markduplicates", "qsub_option"), run_conf.drmaa)
mutation_call = Mutation_call(genomon_conf.get("mutation_call", "qsub_option"), run_conf.drmaa)
mutation_merge = Mutation_merge(genomon_conf.get("mutation_merge", "qsub_option"), run_conf.drmaa)
sv_parse = SV_parse(genomon_conf.get("sv_parse", "qsub_option"), run_conf.drmaa)
sv_merge = SV_merge(genomon_conf.get("sv_merge", "qsub_option"), run_conf.drmaa)
sv_filt = SV_filt(genomon_conf.get("sv_filt", "qsub_option"), run_conf.drmaa)
r_qc_bamstats = Res_QC_Bamstats(genomon_conf.get("qc_bamstats", "qsub_option"), run_conf.drmaa)
r_qc_coverage = Res_QC_Coverage(genomon_conf.get("qc_coverage", "qsub_option"), run_conf.drmaa)
r_qc_merge = Res_QC_Merge(genomon_conf.get("qc_merge", "qsub_option"), run_conf.drmaa)
r_paplot = Res_PA_Plot(genomon_conf.get("paplot", "qsub_option"), run_conf.drmaa)
r_post_analysis = Res_PostAnalysis(genomon_conf.get("post_analysis", "qsub_option"), run_conf.drmaa)
r_pre_pmsignature = Res_PrePmsignature(genomon_conf.get("pre_pmsignature", "qsub_option"), run_conf.drmaa)
r_pmsignature_ind = Res_Pmsignature(genomon_conf.get("pmsignature_ind", "qsub_option"), run_conf.drmaa)
r_pmsignature_full = Res_Pmsignature(genomon_conf.get("pmsignature_full", "qsub_option"), run_conf.drmaa)
_debug = False
if genomon_conf.has_section("develop"):
if genomon_conf.has_option("develop", "debug") == True:
_debug = genomon_conf.getboolean("develop", "debug")
# generate output list of 'linked fastq'
linked_fastq_list = []
for sample in sample_conf.fastq:
if os.path.exists(run_conf.project_root + '/bam/' + sample + '/1.sorted.bam'): continue
if os.path.exists(run_conf.project_root + '/bam/' + sample + '/' + sample + '.markdup.bam'): continue
link_fastq_arr1 = []
link_fastq_arr2 = []
for (count, fastq_file) in enumerate(sample_conf.fastq[sample][0]):
fastq_prefix, ext = os.path.splitext(fastq_file)
link_fastq_arr1.append(run_conf.project_root + '/fastq/' + sample + '/' + str(count+1) + '_1' + ext)
link_fastq_arr2.append(run_conf.project_root + '/fastq/' + sample + '/' + str(count+1) + '_2' + ext)
linked_fastq_list.append([link_fastq_arr1,link_fastq_arr2])
# generate output list of 'bam2fastq'
bam2fastq_output_list = []
for sample in sample_conf.bam_tofastq:
if os.path.exists(run_conf.project_root + '/bam/' + sample + '/1.sorted.bam'): continue
if os.path.exists(run_conf.project_root + '/bam/' + sample + '/' + sample + '.markdup.bam'): continue
bam2fastq_arr1 = []
bam2fastq_arr2 = []
bam2fastq_arr1.append(run_conf.project_root + '/fastq/' + sample + '/1_1.fastq')
bam2fastq_arr2.append(run_conf.project_root + '/fastq/' + sample + '/1_2.fastq')
bam2fastq_output_list.append([bam2fastq_arr1,bam2fastq_arr2])
# generate input list of 'mutation call'
markdup_bam_list = []
merge_mutation_list = []
for complist in sample_conf.mutation_call:
if os.path.exists(run_conf.project_root + '/mutation/' + complist[0] + '/' + complist[0] + '.genomon_mutation.result.filt.txt'): continue
tumor_bam = run_conf.project_root + '/bam/' + complist[0] + '/' + complist[0] + '.markdup.bam'
normal_bam = run_conf.project_root + '/bam/' + complist[1] + '/' + complist[1] + '.markdup.bam' if complist[1] != None else None
panel = run_conf.project_root + '/mutation/control_panel/' + complist[2] + ".control_panel.txt" if complist[2] != None else None
markdup_bam_list.append([tumor_bam, normal_bam, panel])
# generate input list of 'SV parse'
parse_sv_bam_list = []
all_target_bams = []
unique_bams = []
for complist in sample_conf.sv_detection:
tumor_sample = complist[0]
if tumor_sample != None:
all_target_bams.append(run_conf.project_root + '/bam/' + tumor_sample + '/' + tumor_sample + '.markdup.bam')
normal_sample = complist[1]
if normal_sample != None:
all_target_bams.append(run_conf.project_root + '/bam/' + normal_sample + '/' + normal_sample + '.markdup.bam')
panel_name = complist[2]
if panel_name != None:
for panel_sample in sample_conf.control_panel[panel_name]:
all_target_bams.append(run_conf.project_root + '/bam/' + panel_sample + '/' + panel_sample + '.markdup.bam')
unique_bams = list(set(all_target_bams))
for bam in unique_bams:
dir_name = os.path.dirname(bam)
sample_name = os.path.basename(dir_name)
if os.path.exists(run_conf.project_root + '/sv/' + sample_name + '/' + sample_name + '.junction.clustered.bedpe.gz') and os.path.exists(run_conf.project_root + '/sv/' + sample_name + '/' + sample_name + '.junction.clustered.bedpe.gz.tbi'): continue
parse_sv_bam_list.append(bam)
# generate input list of 'SV merge'
unique_complist = []
merge_bedpe_list = []
for complist in sample_conf.sv_detection:
control_panel_name = complist[2]
if control_panel_name != None and control_panel_name not in unique_complist:
unique_complist.append(control_panel_name)
for control_panel_name in unique_complist:
if os.path.exists(run_conf.project_root + '/sv/non_matched_control_panel/' + control_panel_name + '.merged.junction.control.bedpe.gz') and os.path.exists(run_conf.project_root + '/sv/non_matched_control_panel/' + control_panel_name + '.merged.junction.control.bedpe.gz.tbi'): continue
tmp_list = []
tmp_list.append(run_conf.project_root + '/sv/control_panel/' + control_panel_name + ".control_info.txt")
for sample in sample_conf.control_panel[control_panel_name]:
tmp_list.append(run_conf.project_root+ "/sv/"+ sample +"/"+ sample +".junction.clustered.bedpe.gz")
merge_bedpe_list.append(tmp_list)
# generate input list of 'SV filt'
filt_bedpe_list = []
for complist in sample_conf.sv_detection:
if os.path.exists(run_conf.project_root + '/sv/' + complist[0] +'/'+ complist[0] +'.genomonSV.result.filt.txt'): continue
filt_bedpe_list.append(run_conf.project_root+ "/sv/"+ complist[0] +"/"+ complist[0] +".junction.clustered.bedpe.gz")
# generate input list of 'qc'
qc_bamstats_list = []
qc_coverage_list = []
qc_merge_list = []
for sample in sample_conf.qc:
if os.path.exists(run_conf.project_root + '/qc/' + sample + '/' + sample + '.genomonQC.result.txt'): continue
qc_merge_list.append(
[run_conf.project_root + '/qc/' + sample + '/' + sample + '.bamstats',
run_conf.project_root + '/qc/' + sample + '/' + sample + '.coverage'])
if not os.path.exists(run_conf.project_root + '/qc/' + sample + '/' + sample + '.bamstats'):
qc_bamstats_list.append(run_conf.project_root + '/bam/' + sample +'/'+ sample +'.markdup.bam')
if not os.path.exists(run_conf.project_root + '/qc/' + sample + '/' + sample + '.coverage'):
qc_coverage_list.append(run_conf.project_root + '/bam/' + sample +'/'+ sample +'.markdup.bam')
###
# input/output lists for post-analysis
###
genomon_conf_name, genomon_conf_ext = os.path.splitext(os.path.basename(run_conf.genomon_conf_file))
sample_conf_name, sample_conf_ext = os.path.splitext(os.path.basename(run_conf.sample_conf_file))
# generate input list of 'post analysis for mutation'
pa_outputs_mutation = r_post_analysis.output_files("mutation", sample_conf.mutation_call, run_conf.project_root, sample_conf_name, genomon_conf)
pa_inputs_mutation = []
if pa_outputs_mutation["run_pa"] == True:
for complist in sample_conf.mutation_call:
pa_inputs_mutation.append(run_conf.project_root + '/mutation/' + complist[0] +'/'+ complist[0] +'.genomon_mutation.result.filt.txt')
# generate input list of 'post analysis for SV'
pa_outputs_sv = r_post_analysis.output_files("sv", sample_conf.sv_detection, run_conf.project_root, sample_conf_name, genomon_conf)
pa_inputs_sv = []
if pa_outputs_sv["run_pa"] == True:
for complist in sample_conf.sv_detection:
pa_inputs_sv.append(run_conf.project_root + '/sv/' + complist[0] +'/'+ complist[0] +'.genomonSV.result.filt.txt')
# generate input list of 'post analysis for qc'
pa_outputs_qc = r_post_analysis.output_files("qc", sample_conf.qc, run_conf.project_root, sample_conf_name, genomon_conf)
pa_inputs_qc = []
if pa_outputs_qc["run_pa"] == True:
for sample in sample_conf.qc:
pa_inputs_qc.append(run_conf.project_root + '/qc/' + sample + '/' + sample + '.genomonQC.result.txt')
###
# input/output lists for paplot
###
paplot_output = run_conf.project_root + '/paplot/' + sample_conf_name + '/index.html'
## mutation
use_mutations = []
if pa_outputs_mutation["case1"]["output_filt"] != "":
use_mutations.append(pa_outputs_mutation["case1"]["output_filt"])
if pa_outputs_mutation["case2"]["output_filt"] != "" and genomon_conf.getboolean("paplot", "include_unpanel"):
use_mutations.append(pa_outputs_mutation["case2"]["output_filt"])
if pa_outputs_mutation["case3"]["output_filt"] != "" and genomon_conf.getboolean("paplot", "include_unpair"):
use_mutations.append(pa_outputs_mutation["case3"]["output_filt"])
if pa_outputs_mutation["case4"]["output_filt"] != "" and genomon_conf.getboolean("paplot", "include_unpanel") and genomon_conf.getboolean("paplot", "include_unpair"):
use_mutations.append(pa_outputs_mutation["case4"]["output_filt"])
paplot_inputs_mutation = []
if os.path.exists(paplot_output) == False or pa_outputs_mutation["run_pa"] == True:
paplot_inputs_mutation.extend(use_mutations)
## pmsignature
# ind
ind_outputs = []
ind_exists = True
for i in range(genomon_conf.getint("pmsignature_ind", "signum_min"), genomon_conf.getint("pmsignature_ind", "signum_max") + 1):
fname = run_conf.project_root + '/pmsignature/' + sample_conf_name + '/pmsignature.ind.result.%d.json' % i
ind_outputs.append(fname)
if not os.path.exists(fname): ind_exists = False
run_ind = False
paplot_inputs_ind = []
if len(sample_conf.mutation_call) > 0 and genomon_conf.getboolean("pmsignature_ind", "enable") and len(use_mutations) > 0:
if ind_exists == False: run_ind = True
elif pa_outputs_mutation["run_pa"] == True: run_ind = True
elif not os.path.exists(run_conf.project_root + '/pmsignature/' + sample_conf_name + '/mutation.cut.txt'): run_ind = True
if os.path.exists(paplot_output) == False or run_ind == True:
paplot_inputs_ind.extend(ind_outputs)
# full
full_outputs = []
full_exists = True
for i in range(genomon_conf.getint("pmsignature_full", "signum_min"), genomon_conf.getint("pmsignature_full", "signum_max") + 1):
fname = run_conf.project_root + '/pmsignature/' + sample_conf_name + '/pmsignature.full.result.%d.json' % i
full_outputs.append(fname)
if not os.path.exists(fname): full_exists = False
run_full = False
paplot_inputs_full = []
if len(sample_conf.mutation_call) > 0 and genomon_conf.getboolean("pmsignature_full", "enable") and len(use_mutations) > 0:
if full_exists == False: run_full = True
elif pa_outputs_mutation["run_pa"] == True: run_full = True
elif not os.path.exists(run_conf.project_root + '/pmsignature/' + sample_conf_name + '/mutation.cut.txt'): run_full = True
if os.path.exists(paplot_output) == False or run_full == True:
paplot_inputs_full.extend(full_outputs)
pmsignature_inputs = []
if run_ind == True or run_full == True:
pmsignature_inputs.extend(use_mutations)
## sv
paplot_inputs_sv = []
if os.path.exists(paplot_output) == False or pa_outputs_sv["run_pa"] == True:
if pa_outputs_sv["case1"]["output_filt"] != "":
paplot_inputs_sv.append(pa_outputs_sv["case1"]["output_filt"])
if pa_outputs_sv["case2"]["output_filt"] != "" and genomon_conf.getboolean("paplot", "include_unpanel"):
paplot_inputs_sv.append(pa_outputs_sv["case2"]["output_filt"])
if pa_outputs_sv["case3"]["output_filt"] != "" and genomon_conf.getboolean("paplot", "include_unpair"):
paplot_inputs_sv.append(pa_outputs_sv["case3"]["output_filt"])
if pa_outputs_sv["case4"]["output_filt"] != "" and genomon_conf.getboolean("paplot", "include_unpanel") and genomon_conf.getboolean("paplot", "include_unpair"):
paplot_inputs_sv.append(pa_outputs_sv["case4"]["output_filt"])
## qc
paplot_inputs_qc = []
if os.path.exists(paplot_output) == False or pa_outputs_qc["run_pa"] == True:
paplot_inputs_qc.extend(pa_outputs_qc["outputs"])
paplot_inputs = []
paplot_inputs.extend(paplot_inputs_qc)
paplot_inputs.extend(paplot_inputs_sv)
paplot_inputs.extend(paplot_inputs_mutation)
paplot_inputs.extend(paplot_inputs_ind)
paplot_inputs.extend(paplot_inputs_full)
if _debug:
from pprint import pprint
print ("post-analysis-mutation"); pprint (pa_outputs_mutation); print ("post-analysis-sv"); pprint (pa_outputs_sv); print ("post-analysis-qc"); pprint (pa_outputs_qc)
print ("paplot"); pprint (paplot_inputs)
print ("pmsignature"); pprint (pmsignature_inputs)
# prepare output directories
if not os.path.isdir(run_conf.project_root): os.mkdir(run_conf.project_root)
if not os.path.isdir(run_conf.project_root + '/script'): os.mkdir(run_conf.project_root + '/script')
if not os.path.isdir(run_conf.project_root + '/script/sv_merge'): os.mkdir(run_conf.project_root + '/script/sv_merge')
if not os.path.isdir(run_conf.project_root + '/log'): os.mkdir(run_conf.project_root + '/log')
if not os.path.isdir(run_conf.project_root + '/log/sv_merge'): os.mkdir(run_conf.project_root + '/log/sv_merge')
if not os.path.isdir(run_conf.project_root + '/fastq'): os.mkdir(run_conf.project_root + '/fastq')
if not os.path.isdir(run_conf.project_root + '/bam'): os.mkdir(run_conf.project_root + '/bam')
if not os.path.isdir(run_conf.project_root + '/mutation'): os.mkdir(run_conf.project_root + '/mutation')
if not os.path.isdir(run_conf.project_root + '/mutation/control_panel'): os.mkdir(run_conf.project_root + '/mutation/control_panel')
if not os.path.isdir(run_conf.project_root + '/mutation/hotspot'): os.mkdir(run_conf.project_root + '/mutation/hotspot')
if not os.path.isdir(run_conf.project_root + '/sv'): os.mkdir(run_conf.project_root + '/sv')
if not os.path.isdir(run_conf.project_root + '/sv/non_matched_control_panel'): os.mkdir(run_conf.project_root + '/sv/non_matched_control_panel')
if not os.path.isdir(run_conf.project_root + '/sv/control_panel'): os.mkdir(run_conf.project_root + '/sv/control_panel')
if not os.path.isdir(run_conf.project_root + '/qc'): os.mkdir(run_conf.project_root + '/qc')
for sample in sample_conf.qc:
if not os.path.isdir(run_conf.project_root + '/qc/' + sample): os.mkdir(run_conf.project_root + '/qc/' + sample)
if (genomon_conf.getboolean("post_analysis", "enable") == True):
if not os.path.exists(run_conf.project_root + '/post_analysis'): os.mkdir(run_conf.project_root + '/post_analysis')
if not os.path.exists(run_conf.project_root + '/post_analysis/' + sample_conf_name): os.mkdir(run_conf.project_root + '/post_analysis/' + sample_conf_name)
if not os.path.isdir(run_conf.project_root + '/script/post_analysis'): os.mkdir(run_conf.project_root + '/script/post_analysis')
if not os.path.isdir(run_conf.project_root + '/log/post_analysis'): os.mkdir(run_conf.project_root + '/log/post_analysis')
if (genomon_conf.getboolean("paplot", "enable") == True):
if not os.path.isdir(run_conf.project_root + '/paplot/'): os.mkdir(run_conf.project_root + '/paplot/')
if not os.path.isdir(run_conf.project_root + '/paplot/' + sample_conf_name): os.mkdir(run_conf.project_root + '/paplot/' + sample_conf_name)
if not os.path.isdir(run_conf.project_root + '/script/paplot'): os.mkdir(run_conf.project_root + '/script/paplot')
if not os.path.isdir(run_conf.project_root + '/log/paplot'): os.mkdir(run_conf.project_root + '/log/paplot')
if (genomon_conf.getboolean("pmsignature_ind", "enable") == True) or (genomon_conf.getboolean("pmsignature_full", "enable") == True):
if not os.path.isdir(run_conf.project_root + '/pmsignature/'): os.mkdir(run_conf.project_root + '/pmsignature/')
if not os.path.isdir(run_conf.project_root + '/pmsignature/' + sample_conf_name): os.mkdir(run_conf.project_root + '/pmsignature/' + sample_conf_name)
if not os.path.isdir(run_conf.project_root + '/script/pmsignature'): os.mkdir(run_conf.project_root + '/script/pmsignature')
if not os.path.isdir(run_conf.project_root + '/log/pmsignature'): os.mkdir(run_conf.project_root + '/log/pmsignature')
if not os.path.isdir(run_conf.project_root + '/config'): os.mkdir(run_conf.project_root + '/config')
for outputfiles in (bam2fastq_output_list, linked_fastq_list):
for outputfile in outputfiles:
sample = os.path.basename(os.path.dirname(outputfile[0][0]))
fastq_dir = run_conf.project_root + '/fastq/' + sample
bam_dir = run_conf.project_root + '/bam/' + sample
if not os.path.isdir(fastq_dir): os.mkdir(fastq_dir)
if not os.path.isdir(bam_dir): os.mkdir(bam_dir)
for target_sample_dict in (sample_conf.bam_import, sample_conf.fastq, sample_conf.bam_tofastq):
for sample in target_sample_dict:
script_dir = run_conf.project_root + '/script/' + sample
log_dir = run_conf.project_root + '/log/' + sample
if not os.path.isdir(script_dir): os.mkdir(script_dir)
if not os.path.isdir(log_dir): os.mkdir(log_dir)
shutil.copyfile(run_conf.genomon_conf_file, run_conf.project_root + '/config/' + genomon_conf_name +'_'+ run_conf.analysis_timestamp + genomon_conf_ext)
shutil.copyfile(run_conf.sample_conf_file, run_conf.project_root + '/config/' + sample_conf_name +'_'+ run_conf.analysis_timestamp + sample_conf_ext)
# prepare output directory for each sample and make mutation control panel file
for complist in sample_conf.mutation_call:
# make dir
mutation_dir = run_conf.project_root + '/mutation/' + complist[0]
if not os.path.isdir(mutation_dir): os.mkdir(mutation_dir)
# make the control panel text
control_panel_name = complist[2]
if control_panel_name != None:
control_panel_file = run_conf.project_root + '/mutation/control_panel/' + control_panel_name + ".control_panel.txt"
with open(control_panel_file, "w") as out_handle:
for panel_sample in sample_conf.control_panel[control_panel_name]:
out_handle.write(run_conf.project_root + '/bam/' + panel_sample + '/' + panel_sample + '.markdup.bam' + "\n")
# make SV configuration file
for complist in sample_conf.sv_detection:
# make the control yaml file
control_panel_name = complist[2]
if control_panel_name != None:
control_conf = run_conf.project_root + '/sv/control_panel/' + control_panel_name + ".control_info.txt"
with open(control_conf, "w") as out_handle:
for sample in sample_conf.control_panel[control_panel_name]:
out_handle.write(sample+ "\t"+ run_conf.project_root+ "/sv/"+ sample +"/"+ sample+ "\n")
# link the import bam to project directory
@originate(sample_conf.bam_import.keys())
def link_import_bam(sample):
bam = sample_conf.bam_import[sample]
link_dir = run_conf.project_root + '/bam/' + sample
bam_prefix, ext = os.path.splitext(bam)
if not os.path.isdir(link_dir): os.mkdir(link_dir)
if (not os.path.exists(link_dir +'/'+ sample +'.markdup.bam')) and (not os.path.exists(link_dir +'/'+ sample +'.markdup.bam.bai')):
os.symlink(bam, link_dir +'/'+ sample +'.markdup.bam')
if (os.path.exists(bam +'.bai')):
os.symlink(bam +'.bai', link_dir +'/'+ sample +'.markdup.bam.bai')
elif (os.path.exists(bam_prefix +'.bai')):
os.symlink(bam_prefix +'.bai', link_dir +'/'+ sample +'.markdup.bam.bai')
# convert bam to fastq
@originate(bam2fastq_output_list)
def bam2fastq(outputfiles):
sample = os.path.basename(os.path.dirname(outputfiles[0][0]))
output_dir = run_conf.project_root + '/fastq/' + sample
arguments = {"biobambam": genomon_conf.get("SOFTWARE", "biobambam"),
"param": genomon_conf.get("bam2fastq", "params"),
"input_bam": sample_conf.bam_tofastq[sample],
"f1_name": outputfiles[0][0],
"f2_name": outputfiles[1][0],
"o1_name": output_dir + '/unmatched_first_output.txt',
"o2_name": output_dir + '/unmatched_second_output.txt',
"t": output_dir + '/temp.txt',
"s": output_dir + '/single_end_output.txt'}
bamtofastq.task_exec(arguments, run_conf.project_root + '/log/' + sample, run_conf.project_root + '/script/'+ sample)
# link the input fastq to project directory
@originate(linked_fastq_list)
def link_input_fastq(output_file):
sample = os.path.basename(os.path.dirname(output_file[0][0]))
fastq_dir = run_conf.project_root + '/fastq/' + sample
fastq_prefix, ext = os.path.splitext(sample_conf.fastq[sample][0][0])
# Todo
# 1. should compare the timestamps between input and linked file
# 2. check md5sum ?
for (count, fastq_files) in enumerate(sample_conf.fastq[sample][0]):
fastq_prefix, ext = os.path.splitext(fastq_files)
if not os.path.exists(fastq_dir + '/'+str(count+1)+'_1'+ ext): os.symlink(sample_conf.fastq[sample][0][count], fastq_dir + '/'+str(count+1)+'_1'+ ext)
if not os.path.exists(fastq_dir + '/'+str(count+1)+'_2'+ ext): os.symlink(sample_conf.fastq[sample][1][count], fastq_dir + '/'+str(count+1)+'_2'+ ext)
# split fastq
@subdivide([bam2fastq, link_input_fastq], formatter(), "{path[0]}/*_*.fastq_split", "{path[0]}")
def split_files(input_files, output_files, target_dir):
sample_name = os.path.basename(target_dir)
for oo in output_files:
os.unlink(oo)
split_lines = genomon_conf.get("split_fastq", "split_fastq_line_number")
input_prefix, ext = os.path.splitext(input_files[0][0])
arguments = {"lines": split_lines,
"fastq_filter": genomon_conf.get("split_fastq", "fastq_filter"),
"target_dir": target_dir,
"ext": ext}
fastq_splitter.task_exec(arguments, run_conf.project_root + '/log/' + sample_name, run_conf.project_root + '/script/'+ sample_name, 2)
file_list = glob.glob(target_dir + '/1_*.fastq_split')
file_list.sort()
last_file_lines = sum(1 for line in open(file_list[-1]))
all_line_num = ((len(file_list)-1)*int(split_lines)) + last_file_lines
with open(target_dir + "/fastq_line_num.txt", "w") as out_handle:
out_handle.write(str(all_line_num)+"\n")
for input_fastq in input_files[0]:
os.unlink(input_fastq)
for input_fastq in input_files[1]:
os.unlink(input_fastq)
#bwa
@subdivide(split_files, formatter(".+/(.+)/1_0000.fastq_split"), add_inputs("{subpath[0][2]}/fastq/{subdir[0][0]}/2_0000.fastq_split"), "{subpath[0][2]}/bam/{subdir[0][0]}/{subdir[0][0]}_*.sorted.bam", "{subpath[0][2]}/fastq/{subdir[0][0]}", "{subpath[0][2]}/bam/{subdir[0][0]}")
def map_dna_sequence(input_files, output_files, input_dir, output_dir):
sample_name = os.path.basename(output_dir)
all_line_num = 0
with open(input_dir + "/fastq_line_num.txt") as in_handle:
tmp_num = in_handle.read()
all_line_num = int(tmp_num)
split_lines = genomon_conf.get("split_fastq", "split_fastq_line_number")
ans_quotient = all_line_num / int(split_lines)
ans_remainder = all_line_num % int(split_lines)
max_task_id = ans_quotient if ans_remainder == 0 else ans_quotient + 1
arguments = {"input_dir": input_dir,
"output_dir": output_dir,
"sample_name": sample_name,
"bwa": genomon_conf.get("SOFTWARE", "bwa"),
"bwa_params": genomon_conf.get("bwa_mem", "bwa_params"),
"ref_fa":genomon_conf.get("REFERENCE", "ref_fasta"),
"biobambam": genomon_conf.get("SOFTWARE", "biobambam")}
bwa_align.task_exec(arguments, run_conf.project_root + '/log/' + sample_name , run_conf.project_root + '/script/' + sample_name, max_task_id)
for task_id in range(max_task_id):
num = str(task_id).zfill(4)
os.unlink(input_dir +'/1_'+str(num)+'.fastq_split')
os.unlink(input_dir +'/2_'+str(num)+'.fastq_split')
os.unlink(output_dir+'/'+sample_name+'_'+str(num)+'.bwa.sam')
# merge sorted bams into one and mark duplicate reads with biobambam
@collate(map_dna_sequence, formatter(), "{subpath[0][2]}/bam/{subdir[0][0]}/{subdir[0][0]}.markdup.bam", "{subpath[0][2]}/bam/{subdir[0][0]}")
def markdup(input_files, output_file, output_dir):
sample_name = os.path.basename(output_dir)
output_prefix, ext = os.path.splitext(output_file)
input_bam_files = ""
for input_file in input_files:
input_bam_files = input_bam_files + " I=" + input_file
arguments = {"biobambam": genomon_conf.get("SOFTWARE", "biobambam"),
"out_prefix": output_prefix,
"input_bam_files": input_bam_files,
"out_bam": output_file}
markduplicates.task_exec(arguments, run_conf.project_root + '/log/' + sample_name , run_conf.project_root + '/script/'+ sample_name)
for input_file in input_files:
os.unlink(input_file)
os.unlink(input_file + ".bai")
# identify mutations
@follows( markdup )
@follows( link_import_bam )
@subdivide(markdup_bam_list, formatter(), "{subpath[0][2]}/mutation/{subdir[0][0]}/{subdir[0][0]}.genomon_mutation.result.filt.txt", "{subpath[0][2]}/mutation/{subdir[0][0]}")
def identify_mutations(input_file, output_file, output_dir):
sample_name = os.path.basename(output_dir)
active_inhouse_normal_flag = False
if genomon_conf.has_option("annotation", "active_inhouse_normal_flag"):
active_inhouse_normal_flag = genomon_conf.get("annotation", "active_inhouse_normal_flag")
inhouse_normal_tabix_db = ""
if genomon_conf.has_option("REFERENCE", "inhouse_normal_tabix_db"):
inhouse_normal_tabix_db = genomon_conf.get("REFERENCE", "inhouse_normal_tabix_db")
active_inhouse_tumor_flag = False
if genomon_conf.has_option("annotation", "active_inhouse_tumor_flag"):
active_inhouse_tumor_flag = genomon_conf.get("annotation", "active_inhouse_tumor_flag")
inhouse_tumor_tabix_db = ""
if genomon_conf.has_option("REFERENCE", "inhouse_tumor_tabix_db"):
inhouse_tumor_tabix_db = genomon_conf.get("REFERENCE", "inhouse_tumor_tabix_db")
active_HGMD_flag = False
if genomon_conf.has_option("annotation", "active_HGMD_flag"):
active_HGMD_flag = genomon_conf.get("annotation", "active_HGMD_flag")
HGMD_tabix_db = ""
if genomon_conf.has_option("REFERENCE", "HGMD_tabix_db"):
HGMD_tabix_db = genomon_conf.get("REFERENCE", "HGMD_tabix_db")
arguments = {
# fisher mutation
"fisher": genomon_conf.get("SOFTWARE", "fisher"),
"fisher_pair_params": genomon_conf.get("fisher_mutation_call", "pair_params"),
"fisher_single_params": genomon_conf.get("fisher_mutation_call", "single_params"),
# realignment filter
"mutfilter": genomon_conf.get("SOFTWARE", "mutfilter"),
"realignment_params": genomon_conf.get("realignment_filter","params"),
# indel filter
"indel_params": genomon_conf.get("indel_filter", "params"),
# breakpoint filter
"breakpoint_params": genomon_conf.get("breakpoint_filter","params"),
# simplerepeat filter
"simple_repeat_db":genomon_conf.get("REFERENCE", "simple_repeat_tabix_db"),
# EB filter
"EBFilter": genomon_conf.get("SOFTWARE", "ebfilter"),
"eb_map_quality": genomon_conf.get("eb_filter","map_quality"),
"eb_base_quality": genomon_conf.get("eb_filter","base_quality"),
"filter_flags": genomon_conf.get("eb_filter","filter_flags"),
"control_bam_list": input_file[2],
# hotspot mutation caller
"hotspot": genomon_conf.get("SOFTWARE","hotspot"),
"hotspot_database":genomon_conf.get("REFERENCE","hotspot_db"),
"active_hotspot_flag":genomon_conf.get("hotspot","active_hotspot_flag"),
"hotspot_params": genomon_conf.get("hotspot","params"),
"mutil": genomon_conf.get("SOFTWARE", "mutil"),
# original_annotations
"mutanno": genomon_conf.get("SOFTWARE", "mutanno"),
"active_inhouse_normal_flag": active_inhouse_normal_flag,
"inhouse_normal_database":inhouse_normal_tabix_db,
"active_inhouse_tumor_flag": active_inhouse_tumor_flag,
"inhouse_tumor_database":inhouse_tumor_tabix_db,
"active_HGVD_2013_flag": genomon_conf.get("annotation", "active_HGVD_2013_flag"),
"HGVD_2013_database":genomon_conf.get("REFERENCE", "HGVD_2013_tabix_db"),
"active_HGVD_2016_flag": genomon_conf.get("annotation", "active_HGVD_2016_flag"),
"HGVD_2016_database":genomon_conf.get("REFERENCE", "HGVD_2016_tabix_db"),
"active_ExAC_flag": genomon_conf.get("annotation", "active_ExAC_flag"),
"ExAC_database":genomon_conf.get("REFERENCE", "ExAC_tabix_db"),
"active_HGMD_flag": active_HGMD_flag,
"HGMD_database": HGMD_tabix_db,
# annovar
"active_annovar_flag": genomon_conf.get("annotation", "active_annovar_flag"),
"annovar": genomon_conf.get("SOFTWARE", "annovar"),
"annovar_database": genomon_conf.get("annotation", "annovar_database"),
"table_annovar_params": genomon_conf.get("annotation", "table_annovar_params"),
"annovar_buildver": genomon_conf.get("annotation", "annovar_buildver"),
# commmon
"pythonhome": genomon_conf.get("ENV", "PYTHONHOME"),
"pythonpath": genomon_conf.get("ENV", "PYTHONPATH"),
"ld_library_path": genomon_conf.get("ENV", "LD_LIBRARY_PATH"),
"ref_fa":genomon_conf.get("REFERENCE", "ref_fasta"),
"interval_list": genomon_conf.get("REFERENCE", "interval_list"),
"disease_bam": input_file[0],
"control_bam": input_file[1],
"out_prefix": output_dir + '/' + sample_name,
"samtools": genomon_conf.get("SOFTWARE", "samtools"),
"blat": genomon_conf.get("SOFTWARE", "blat")}
interval_list = genomon_conf.get("REFERENCE", "interval_list")
max_task_id = sum(1 for line in open(interval_list))
mutation_call.task_exec(arguments, run_conf.project_root + '/log/' + sample_name, run_conf.project_root + '/script/' + sample_name, max_task_id)
arguments = {
"pythonhome": genomon_conf.get("ENV", "PYTHONHOME"),
"pythonpath": genomon_conf.get("ENV", "PYTHONPATH"),
"ld_library_path": genomon_conf.get("ENV", "LD_LIBRARY_PATH"),
"control_bam": input_file[1],
"control_bam_list": input_file[2],
"active_annovar_flag": genomon_conf.get("annotation", "active_annovar_flag"),
"annovar_buildver": genomon_conf.get("annotation", "annovar_buildver"),
"active_HGVD_2013_flag": genomon_conf.get("annotation", "active_HGVD_2013_flag"),
"active_HGVD_2016_flag": genomon_conf.get("annotation", "active_HGVD_2016_flag"),
"active_ExAC_flag": genomon_conf.get("annotation", "active_ExAC_flag"),
"active_HGMD_flag": active_HGMD_flag,
"active_inhouse_normal_flag": active_inhouse_normal_flag,
"active_inhouse_tumor_flag": active_inhouse_tumor_flag,
"filecount": max_task_id,
"mutil": genomon_conf.get("SOFTWARE", "mutil"),
"pair_params": genomon_conf.get("mutation_util","pair_params"),
"single_params": genomon_conf.get("mutation_util","single_params"),
"active_hotspot_flag":genomon_conf.get("hotspot","active_hotspot_flag"),
"hotspot_database":genomon_conf.get("REFERENCE","hotspot_db"),
"meta_info_em": get_meta_info(["fisher", "mutfilter", "ebfilter", "mutil", "mutanno"]),
"meta_info_m": get_meta_info(["fisher", "mutfilter", "mutil", "mutanno"]),
"meta_info_ema": get_meta_info(["fisher", "mutfilter", "ebfilter", "mutil", "mutanno", "hotspot"]),
"meta_info_ma": get_meta_info(["fisher", "mutfilter", "mutil", "mutanno", "hotspot"]),
"out_prefix": output_dir + '/' + sample_name}
mutation_merge.task_exec(arguments, run_conf.project_root + '/log/' + sample_name, run_conf.project_root + '/script/' + sample_name)
annovar_buildver = genomon_conf.get("annotation", "annovar_buildver"),
for task_id in range(1,(max_task_id + 1)):
input_file = output_dir+'/'+sample_name+'_mutations_candidate.'+str(task_id)+'.'+annovar_buildver[0]+'_multianno.txt'
os.unlink(input_file)
for task_id in range(1,(max_task_id + 1)):
if os.path.exists(output_dir+'/'+sample_name+'.fisher_mutations.'+str(task_id)+'.txt'):
os.unlink(output_dir+'/'+sample_name+'.fisher_mutations.'+str(task_id)+'.txt')
if os.path.exists(output_dir+'/'+sample_name+'.hotspot_mutations.'+str(task_id)+'.txt'):
os.unlink(output_dir+'/'+sample_name+'.hotspot_mutations.'+str(task_id)+'.txt')
if os.path.exists(output_dir+'/'+sample_name+'.fisher_hotspot_mutations.'+str(task_id)+'.txt'):
os.unlink(output_dir+'/'+sample_name+'.fisher_hotspot_mutations.'+str(task_id)+'.txt')
if os.path.exists(output_dir+'/'+sample_name+'.realignment_mutations.'+str(task_id)+'.txt'):
os.unlink(output_dir+'/'+sample_name+'.realignment_mutations.'+str(task_id)+'.txt')
if os.path.exists(output_dir+'/'+sample_name+'.indel_mutations.'+str(task_id)+'.txt'):
os.unlink(output_dir+'/'+sample_name+'.indel_mutations.'+str(task_id)+'.txt')
if os.path.exists(output_dir+'/'+sample_name+'.breakpoint_mutations.'+str(task_id)+'.txt'):
os.unlink(output_dir+'/'+sample_name+'.breakpoint_mutations.'+str(task_id)+'.txt')
if os.path.exists(output_dir+'/'+sample_name+'.simplerepeat_mutations.'+str(task_id)+'.txt'):
os.unlink(output_dir+'/'+sample_name+'.simplerepeat_mutations.'+str(task_id)+'.txt')
if os.path.exists(output_dir+'/'+sample_name+'.ebfilter_mutations.'+str(task_id)+'.txt'):
os.unlink(output_dir+'/'+sample_name+'.ebfilter_mutations.'+str(task_id)+'.txt')
if os.path.exists(output_dir+'/'+sample_name+'.inhouse_normal.'+str(task_id)+'.txt'):
os.unlink(output_dir+'/'+sample_name+'.inhouse_normal.'+str(task_id)+'.txt')
if os.path.exists(output_dir+'/'+sample_name+'.inhouse_tumor.'+str(task_id)+'.txt'):
os.unlink(output_dir+'/'+sample_name+'.inhouse_tumor.'+str(task_id)+'.txt')
if os.path.exists(output_dir+'/'+sample_name+'.HGVD_2013.'+str(task_id)+'.txt'):
os.unlink(output_dir+'/'+sample_name+'.HGVD_2013.'+str(task_id)+'.txt')
if os.path.exists(output_dir+'/'+sample_name+'.HGVD_2016.'+str(task_id)+'.txt'):
os.unlink(output_dir+'/'+sample_name+'.HGVD_2016.'+str(task_id)+'.txt')
if os.path.exists(output_dir+'/'+sample_name+'.ExAC.'+str(task_id)+'.txt'):
os.unlink(output_dir+'/'+sample_name+'.ExAC.'+str(task_id)+'.txt')
if os.path.exists(output_dir+'/'+sample_name+'.HGMD.'+str(task_id)+'.txt'):
os.unlink(output_dir+'/'+sample_name+'.HGMD.'+str(task_id)+'.txt')
# parse SV
@follows( link_import_bam )
@follows( markdup )
@transform(parse_sv_bam_list, formatter(), "{subpath[0][2]}/sv/{subdir[0][0]}/{subdir[0][0]}.junction.clustered.bedpe.gz")
def parse_sv(input_file, output_file):
dir_name = os.path.dirname(output_file)
if not os.path.isdir(dir_name): os.mkdir(dir_name)
sample_name = os.path.basename(dir_name)
arguments = {"genomon_sv": genomon_conf.get("SOFTWARE", "genomon_sv"),
"input_bam": input_file,
"output_prefix": output_file.replace(".junction.clustered.bedpe.gz", ""),
"param": genomon_conf.get("sv_parse", "params"),
"pythonhome": genomon_conf.get("ENV", "PYTHONHOME"),
"pythonpath": genomon_conf.get("ENV", "PYTHONPATH"),
"ld_library_path": genomon_conf.get("ENV", "LD_LIBRARY_PATH"),
"htslib": genomon_conf.get("SOFTWARE", "htslib")}
sv_parse.task_exec(arguments, run_conf.project_root + '/log/' + sample_name , run_conf.project_root + '/script/' + sample_name)
# merge SV
@follows( parse_sv )
@transform(merge_bedpe_list, formatter(".+/(?P<NAME>.+).control_info.txt"), "{subpath[0][2]}/sv/non_matched_control_panel/{NAME[0]}.merged.junction.control.bedpe.gz")
def merge_sv(input_files, output_file):
arguments = {"genomon_sv": genomon_conf.get("SOFTWARE", "genomon_sv"),
"control_info": input_files[0],
"merge_output_file": output_file,
"param": genomon_conf.get("sv_merge", "params"),
"pythonhome": genomon_conf.get("ENV", "PYTHONHOME"),
"pythonpath": genomon_conf.get("ENV", "PYTHONPATH"),
"ld_library_path": genomon_conf.get("ENV", "LD_LIBRARY_PATH"),
"htslib": genomon_conf.get("SOFTWARE", "htslib")}
sv_merge.task_exec(arguments, run_conf.project_root + '/log/sv_merge', run_conf.project_root + '/script/sv_merge')
# filt SV
@follows( merge_sv )
@transform(filt_bedpe_list, formatter(), "{subpath[0][2]}/sv/{subdir[0][0]}/{subdir[0][0]}.genomonSV.result.filt.txt")
def filt_sv(input_files, output_file):
dir_name = os.path.dirname(output_file)
sample_name = os.path.basename(dir_name)
#sample_yaml = run_conf.project_root + "/sv/config/" + sample_name + ".yaml"
filt_param = ""
for complist in sample_conf.sv_detection:
if sample_name == complist[0]:
if complist[1] != None:
filt_param = filt_param + " --matched_control_bam " + run_conf.project_root + "/bam/" + complist[1] + '/' + complist[1] + ".markdup.bam"
if complist[2] != None:
filt_param = filt_param + " --non_matched_control_junction " + run_conf.project_root +"/sv/non_matched_control_panel/"+ complist[2] +".merged.junction.control.bedpe.gz"
if complist[1] != None:
filt_param = filt_param + " --matched_control_label " + complist[1]
break
filt_param = filt_param.lstrip(' ') + ' ' + genomon_conf.get("sv_filt", "params")
arguments = {"genomon_sv": genomon_conf.get("SOFTWARE", "genomon_sv"),
"input_bam": run_conf.project_root + "/bam/" + sample_name + '/' + sample_name + ".markdup.bam",
"output_prefix": run_conf.project_root + "/sv/" + sample_name + '/' + sample_name,
"reference_genome": genomon_conf.get("REFERENCE", "ref_fasta"),
"annotation_dir": genomon_conf.get("sv_filt", "annotation_dir"),
"param": filt_param,
"meta_info": get_meta_info(["genomon_sv", "sv_utils"]),
"sv_utils": genomon_conf.get("SOFTWARE", "sv_utils"),
"sv_utils_annotation_dir": genomon_conf.get("sv_filt", "sv_utils_annotation_dir"),
"sv_utils_param": genomon_conf.get("sv_filt", "sv_utils_params"),
"pythonhome": genomon_conf.get("ENV", "PYTHONHOME"),
"pythonpath": genomon_conf.get("ENV", "PYTHONPATH"),
"ld_library_path": genomon_conf.get("ENV", "LD_LIBRARY_PATH"),
"htslib": genomon_conf.get("SOFTWARE", "htslib"),
"blat": genomon_conf.get("SOFTWARE", "blat")}
sv_filt.task_exec(arguments, run_conf.project_root + '/log/' + sample_name, run_conf.project_root + '/script/' + sample_name)
# qc
@follows( link_import_bam )
@follows( markdup )
@follows( filt_sv )
@follows( identify_mutations )
@transform(qc_bamstats_list, formatter(), "{subpath[0][2]}/qc/{subdir[0][0]}/{subdir[0][0]}.bamstats")
def bam_stats(input_file, output_file):
dir_name = os.path.dirname(output_file)
sample_name = os.path.basename(dir_name)
arguments = {"pythonhome": genomon_conf.get("ENV", "PYTHONHOME"),
"pythonpath": genomon_conf.get("ENV", "PYTHONPATH"),
"genomon_qc": genomon_conf.get("SOFTWARE", "genomon_qc"),
"bamstats": genomon_conf.get("SOFTWARE", "bamstats"),
"perl5lib": genomon_conf.get("ENV", "PERL5LIB"),
"input_file": input_file,
"output_file": output_file}
r_qc_bamstats.task_exec(arguments, run_conf.project_root + '/log/' + sample_name, run_conf.project_root + '/script/' + sample_name)
@follows( link_import_bam )
@follows( markdup )
@follows( filt_sv )
@follows( identify_mutations )
@transform(qc_coverage_list, formatter(), "{subpath[0][2]}/qc/{subdir[0][0]}/{subdir[0][0]}.coverage")
def coverage(input_file, output_file):
dir_name = os.path.dirname(output_file)
sample_name = os.path.basename(dir_name)
data_type = "exome"
if genomon_conf.get("qc_coverage", "wgs_flag") == "True":
data_type = "wgs"
arguments = {"data_type": data_type,
"pythonhome": genomon_conf.get("ENV", "PYTHONHOME"),
"pythonpath": genomon_conf.get("ENV", "PYTHONPATH"),
"genomon_qc": genomon_conf.get("SOFTWARE", "genomon_qc"),
"coverage_text": genomon_conf.get("qc_coverage", "coverage"),
"i_bed_lines": genomon_conf.get("qc_coverage", "wgs_i_bed_lines"),
"i_bed_width": genomon_conf.get("qc_coverage", "wgs_i_bed_width"),
"incl_bed_width":genomon_conf.get("qc_coverage", "wgs_incl_bed_width"),
"genome_size_file": genomon_conf.get("REFERENCE", "genome_size"),
"gaptxt": genomon_conf.get("REFERENCE", "gaptxt"),
"bait_file": genomon_conf.get("REFERENCE", "bait_file"),
"samtools_params": genomon_conf.get("qc_coverage", "samtools_params"),
"bedtools": genomon_conf.get("SOFTWARE", "bedtools"),
"samtools": genomon_conf.get("SOFTWARE", "samtools"),
"ld_library_path": genomon_conf.get("ENV", "LD_LIBRARY_PATH"),
"input_file": input_file,
"output_file": output_file}
r_qc_coverage.task_exec(arguments, run_conf.project_root + '/log/' + sample_name , run_conf.project_root + '/script/' + sample_name)
@follows( bam_stats )
@follows( coverage )
@collate(qc_merge_list, formatter(), "{subpath[0][2]}/qc/{subdir[0][0]}/{subdir[0][0]}.genomonQC.result.txt")
def merge_qc(input_files, output_file):
dir_name = os.path.dirname(output_file)
sample_name = os.path.basename(dir_name)
arguments = {"pythonhome": genomon_conf.get("ENV", "PYTHONHOME"),
"pythonpath": genomon_conf.get("ENV", "PYTHONPATH"),
"genomon_qc": genomon_conf.get("SOFTWARE", "genomon_qc"),
"bamstats_file": input_files[0][0],
"coverage_file": input_files[0][1],
"output_file": output_file,
"meta": get_meta_info(["genomon_pipeline"]),
"fastq_line_num_file": run_conf.project_root +'/fastq/'+ sample_name +'/fastq_line_num.txt'}
r_qc_merge.task_exec(arguments, run_conf.project_root + '/log/' + sample_name, run_conf.project_root + '/script/' + sample_name)
#####################
# post analysis stage
@active_if(genomon_conf.getboolean("post_analysis", "enable"))
@active_if(len(pa_inputs_mutation) > 0)
@follows(filt_sv)
@follows(identify_mutations)
@collate(pa_inputs_mutation, formatter(), pa_outputs_mutation["outputs"])
def post_analysis_mutation(input_files, output_file):
arguments = {"pythonhome": genomon_conf.get("ENV", "PYTHONHOME"),
"ld_library_path": genomon_conf.get("ENV", "LD_LIBRARY_PATH"),
"pythonpath": genomon_conf.get("ENV", "PYTHONPATH"),
"genomon_pa": genomon_conf.get("SOFTWARE", "genomon_pa"),
"mode": "mutation",
"genomon_root": run_conf.project_root,
"output_dir": run_conf.project_root + "/post_analysis/" + sample_conf_name,
"sample_sheet": os.path.abspath(run_conf.sample_conf_file),
"config_file": genomon_conf.get("post_analysis", "config_file"),
"samtools": genomon_conf.get("SOFTWARE", "samtools"),
"bedtools": genomon_conf.get("SOFTWARE", "bedtools"),
"input_file_case1": ",".join(pa_outputs_mutation["case1"]["samples"]),
"input_file_case2": ",".join(pa_outputs_mutation["case2"]["samples"]),
"input_file_case3": ",".join(pa_outputs_mutation["case3"]["samples"]),
"input_file_case4": ",".join(pa_outputs_mutation["case4"]["samples"]),
}
r_post_analysis.task_exec(arguments, run_conf.project_root + '/log/post_analysis', run_conf.project_root + '/script/post_analysis')
@active_if(genomon_conf.getboolean("post_analysis", "enable"))
@active_if(len(pa_inputs_sv) > 0)
@follows(filt_sv)
@follows(identify_mutations)
@collate(pa_inputs_sv, formatter(), pa_outputs_sv["outputs"])
def post_analysis_sv(input_files, output_file):
arguments = {"pythonhome": genomon_conf.get("ENV", "PYTHONHOME"),
"ld_library_path": genomon_conf.get("ENV", "LD_LIBRARY_PATH"),
"pythonpath": genomon_conf.get("ENV", "PYTHONPATH"),
"genomon_pa": genomon_conf.get("SOFTWARE", "genomon_pa"),
"mode": "sv",
"genomon_root": run_conf.project_root,
"output_dir": run_conf.project_root + "/post_analysis/" + sample_conf_name,
"sample_sheet": os.path.abspath(run_conf.sample_conf_file),
"config_file": genomon_conf.get("post_analysis", "config_file"),
"samtools": genomon_conf.get("SOFTWARE", "samtools"),
"bedtools": genomon_conf.get("SOFTWARE", "bedtools"),
"input_file_case1": ",".join(pa_outputs_sv["case1"]["samples"]),
"input_file_case2": ",".join(pa_outputs_sv["case2"]["samples"]),
"input_file_case3": ",".join(pa_outputs_sv["case3"]["samples"]),
"input_file_case4": ",".join(pa_outputs_sv["case4"]["samples"]),
}
r_post_analysis.task_exec(arguments, run_conf.project_root + '/log/post_analysis', run_conf.project_root + '/script/post_analysis')
@active_if(genomon_conf.getboolean("post_analysis", "enable"))
@active_if(len(pa_inputs_qc) > 0)
@follows(merge_qc)
@collate(pa_inputs_qc, formatter(), pa_outputs_qc["outputs"])
def post_analysis_qc(input_files, output_file):
arguments = {"pythonhome": genomon_conf.get("ENV", "PYTHONHOME"),
"ld_library_path": genomon_conf.get("ENV", "LD_LIBRARY_PATH"),
"pythonpath": genomon_conf.get("ENV", "PYTHONPATH"),
"genomon_pa": genomon_conf.get("SOFTWARE", "genomon_pa"),
"mode": "qc",
"genomon_root": run_conf.project_root,
"output_dir": run_conf.project_root + "/post_analysis/" + sample_conf_name,
"sample_sheet": os.path.abspath(run_conf.sample_conf_file),
"config_file": genomon_conf.get("post_analysis", "config_file"),
"samtools": genomon_conf.get("SOFTWARE", "samtools"),
"bedtools": genomon_conf.get("SOFTWARE", "bedtools"),
"input_file_case1": ",".join(sample_conf.qc),
"input_file_case2": "",
"input_file_case3": "",
"input_file_case4": "",
}
r_post_analysis.task_exec(arguments, run_conf.project_root + '/log/post_analysis', run_conf.project_root + '/script/post_analysis')
@active_if(genomon_conf.getboolean("post_analysis", "enable"))
@active_if(genomon_conf.getboolean("pmsignature_ind", "enable") or genomon_conf.getboolean("pmsignature_full", "enable"))
@active_if(len(pmsignature_inputs) > 0)
@follows(post_analysis_mutation)
@collate(pmsignature_inputs, formatter(), run_conf.project_root + '/pmsignature/' + sample_conf_name + "/mutation.cut.txt")
def pre_pmsignature(input_files, output_file):
arguments = {"input_files" : " ".join(input_files),
"output_file" : run_conf.project_root + '/pmsignature/' + sample_conf_name + "/mutation.cut.txt"
}
r_pre_pmsignature.task_exec(arguments, run_conf.project_root + '/log/pmsignature', run_conf.project_root + '/script/pmsignature')
@active_if(genomon_conf.getboolean("post_analysis", "enable"))
@active_if(genomon_conf.getboolean("pmsignature_ind", "enable"))
@active_if(run_ind)
@follows(pre_pmsignature)
@transform(run_conf.project_root + '/pmsignature/' + sample_conf_name + "/mutation.cut.txt", formatter(), ind_outputs[0])
def pmsignature_ind(input_file, output_file):
command = r_pmsignature_ind.ind_template.format(
inputfile = input_file,
outputdir = run_conf.project_root + '/pmsignature/' + sample_conf_name,
trdirflag = genomon_conf.get("pmsignature_ind", "trdirflag").upper(),
trialnum = genomon_conf.getint("pmsignature_ind", "trialnum"),
bs_genome = genomon_conf.get("pmsignature_ind", "bs_genome"),
bgflag = genomon_conf.get("pmsignature_ind", "bgflag"),
txdb_transcript = genomon_conf.get("pmsignature_ind", "txdb_transcript"),
script_path = genomon_conf.get("SOFTWARE", "r_scripts"))
sig_nums = range(genomon_conf.getint("pmsignature_ind", "signum_min"), genomon_conf.getint("pmsignature_ind", "signum_max") + 1)
sig_num_text = ""
for i in sig_nums: sig_num_text += "%d " % i
arguments = {"r_path": genomon_conf.get("ENV", "R_PATH"),
"r_ld_library_path": genomon_conf.get("ENV", "R_LD_LIBRARY_PATH"),
"r_libs": genomon_conf.get("ENV", "R_LIBS"),
"command": command,
"sig_list": sig_num_text
}
max_task_id = len(sig_nums)
r_pmsignature_ind.task_exec(arguments, run_conf.project_root + '/log/pmsignature', run_conf.project_root + '/script/pmsignature', max_task_id)
@active_if(genomon_conf.getboolean("post_analysis", "enable"))
@active_if(genomon_conf.getboolean("pmsignature_full", "enable"))
@active_if(run_full)
@follows(pre_pmsignature)
@transform(run_conf.project_root + '/pmsignature/' + sample_conf_name + "/mutation.cut.txt", formatter(), full_outputs[0])
def pmsignature_full(input_file, output_file):
command = r_pmsignature_full.full_template.format(
inputfile = input_file,
outputdir = run_conf.project_root + '/pmsignature/' + sample_conf_name,
trdirflag = genomon_conf.get("pmsignature_full", "trdirflag").upper(),
trialnum = genomon_conf.getint("pmsignature_full", "trialnum"),
bgflag = genomon_conf.get("pmsignature_full", "bgflag"),
bs_genome = genomon_conf.get("pmsignature_full", "bs_genome"),
txdb_transcript = genomon_conf.get("pmsignature_full", "txdb_transcript"),
script_path = genomon_conf.get("SOFTWARE", "r_scripts"))
sig_nums = range(genomon_conf.getint("pmsignature_full", "signum_min"), genomon_conf.getint("pmsignature_full", "signum_max") + 1)
sig_num_text = ""
for i in sig_nums: sig_num_text += "%d " % i
arguments = {"r_path": genomon_conf.get("ENV", "R_PATH"),
"r_ld_library_path": genomon_conf.get("ENV", "R_LD_LIBRARY_PATH"),
"r_libs": genomon_conf.get("ENV", "R_LIBS"),
"command": command,
"sig_list": sig_num_text
}
max_task_id = len(sig_nums)
r_pmsignature_full.task_exec(arguments, run_conf.project_root + '/log/pmsignature', run_conf.project_root + '/script/pmsignature', max_task_id)
@active_if(genomon_conf.getboolean("post_analysis", "enable"))
@active_if(genomon_conf.getboolean("paplot", "enable"))
@active_if(len(paplot_inputs) > 0)
@follows(post_analysis_sv)
@follows(post_analysis_qc)
@follows(pmsignature_ind)
@follows(pmsignature_full)
@collate(paplot_inputs, formatter(), run_conf.project_root + '/paplot/' + sample_conf_name + '/index.html')
def paplot(input_file, output_file):
if not os.path.exists(paplot_output) and os.path.exists(run_conf.project_root + '/paplot/' + sample_conf_name + '/.meta.json'):
os.unlink(run_conf.project_root + '/paplot/' + sample_conf_name + '/.meta.json')
command = ""
if len(paplot_inputs_qc) > 0:
command += r_paplot.qc_template.format(
paplot = genomon_conf.get("SOFTWARE", "paplot"),
inputs = ",".join(paplot_inputs_qc),
output_dir = run_conf.project_root + "/paplot/" + sample_conf_name,
title = genomon_conf.get("paplot", "title"),
config_file = genomon_conf.get("paplot", "config_file"))
if len(paplot_inputs_sv) > 0:
command += r_paplot.sv_template.format(
paplot = genomon_conf.get("SOFTWARE", "paplot"),
inputs = ",".join(paplot_inputs_sv),
output_dir = run_conf.project_root + "/paplot/" + sample_conf_name,
title = genomon_conf.get("paplot", "title"),
config_file = genomon_conf.get("paplot", "config_file"))
if len(paplot_inputs_mutation) > 0:
command += r_paplot.mutation_template.format(
paplot = genomon_conf.get("SOFTWARE", "paplot"),
inputs = ",".join(paplot_inputs_mutation),
output_dir = run_conf.project_root + "/paplot/" + sample_conf_name,
title = genomon_conf.get("paplot", "title"),
config_file = genomon_conf.get("paplot", "config_file"),
annovar = genomon_conf.getboolean("annotation", "active_annovar_flag"))
if genomon_conf.getboolean("pmsignature_ind", "enable"):
for i in range(len(paplot_inputs_ind)):
command += r_paplot.ind_template.format(
paplot = genomon_conf.get("SOFTWARE", "paplot"),
input = paplot_inputs_ind[i],
output_dir = run_conf.project_root + "/paplot/" + sample_conf_name,
title = genomon_conf.get("paplot", "title"),
config_file = genomon_conf.get("paplot", "config_file"))
if genomon_conf.getboolean("pmsignature_full", "enable"):
for i in range(len(paplot_inputs_full)):
command += r_paplot.full_template.format(
paplot =genomon_conf.get("SOFTWARE", "paplot"),
input = paplot_inputs_full[i],
output_dir = run_conf.project_root + "/paplot/" + sample_conf_name,
title = genomon_conf.get("paplot", "title"),
config_file = genomon_conf.get("paplot", "config_file"))
remark = genomon_conf.get("paplot", "remarks")
remark += "<ul>"
for item in genomon_conf.get("paplot", "software").split(","):
key = item.split(":")[0].strip(" ").rstrip(" ")
name = item.split(":")[1].strip(" ").rstrip(" ")
try:
version = get_version(key).split("-")
except Exception:
print ("[WARNING] paplot: %s is not defined." % (key))
continue
remark += "<li>" + name + " " + version[-1] + "</li>"
remark += "</ul>"
command += r_paplot.index_template.format(
paplot = genomon_conf.get("SOFTWARE", "paplot"),
output_dir = run_conf.project_root + "/paplot/" + sample_conf_name,
remarks = remark,
config_file = genomon_conf.get("paplot", "config_file"))
arguments = {"pythonhome": genomon_conf.get("ENV", "PYTHONHOME"),
"ld_library_path": genomon_conf.get("ENV", "LD_LIBRARY_PATH"),
"pythonpath": genomon_conf.get("ENV", "PYTHONPATH"),
"paplot": genomon_conf.get("SOFTWARE", "paplot"),
"command": command
}
r_paplot.task_exec(arguments, run_conf.project_root + '/log/paplot', run_conf.project_root + '/script/paplot')
|
gpl-2.0
| 2,453,567,865,520,221,000
| 54.965184
| 288
| 0.628586
| false
| 3.115202
| true
| false
| false
|
twicki/dawn
|
docs/_extension/cmake.py
|
1
|
4098
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
##===-----------------------------------------------------------------------------*- Python -*-===##
## _
## | |
## __| | __ ___ ___ ___
## / _` |/ _` \ \ /\ / / '_ |
## | (_| | (_| |\ V V /| | | |
## \__,_|\__,_| \_/\_/ |_| |_| - Compiler Toolchain
##
##
## This file is distributed under the MIT License (MIT).
## See LICENSE.txt for details.
##
##===------------------------------------------------------------------------------------------===##
import os
import re
from docutils.parsers.rst import Directive, directives
from docutils.transforms import Transform
from docutils.utils.error_reporting import SafeString, ErrorString
from docutils import io, nodes
from sphinx.directives import ObjectDescription
from sphinx.domains import Domain, ObjType
from sphinx.roles import XRefRole
from sphinx.util.nodes import make_refnode
from sphinx import addnodes
class CMakeModule(Directive):
""" Declare the cmake-module directive
"""
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = True
option_spec = {'encoding': directives.encoding}
def __init__(self, *args, **keys):
self.re_start = re.compile(r'^#\[(?P<eq>=*)\[\.rst:$')
Directive.__init__(self, *args, **keys)
def run(self):
settings = self.state.document.settings
if not settings.file_insertion_enabled:
raise self.warning('"%s" directive disabled.' % self.name)
env = self.state.document.settings.env
rel_path, path = env.relfn2path(self.arguments[0])
path = os.path.normpath(path)
encoding = self.options.get('encoding', settings.input_encoding)
e_handler = settings.input_encoding_error_handler
try:
settings.record_dependencies.add(path)
f = io.FileInput(source_path=path, encoding=encoding,
error_handler=e_handler)
except UnicodeEncodeError as error:
raise self.severe('Problems with "%s" directive path:\n'
'Cannot encode input file path "%s" '
'(wrong locale?).' %
(self.name, SafeString(path)))
except IOError as error:
raise self.severe('Problems with "%s" directive path:\n%s.' %
(self.name, ErrorString(error)))
raw_lines = f.read().splitlines()
f.close()
rst = None
lines = []
for line in raw_lines:
if rst is not None and rst != '#':
# Bracket mode: check for end bracket
pos = line.find(rst)
if pos >= 0:
if line[0] == '#':
line = ''
else:
line = line[0:pos]
rst = None
else:
# Line mode: check for .rst start (bracket or line)
m = self.re_start.match(line)
if m:
rst = ']%s]' % m.group('eq')
line = ''
elif line == '#.rst:':
rst = '#'
line = ''
elif rst == '#':
if line == '#' or line[:2] == '# ':
line = line[2:]
else:
rst = None
line = ''
elif rst is None:
line = ''
lines.append(line)
if rst is not None and rst != '#':
raise self.warning('"%s" found unclosed bracket "#[%s[.rst:" in %s' %
(self.name, rst[1:-1], path))
self.state_machine.insert_input(lines, path)
return []
def setup(app):
app.add_directive('cmake-module', CMakeModule)
# app.add_transform(CMakeTransform)
# app.add_transform(CMakeXRefTransform)
# app.add_domain(CMakeDomain)
|
mit
| 2,626,606,504,850,924,000
| 36.263636
| 100
| 0.464129
| false
| 4.364217
| false
| false
| false
|
masoodking/LinkPrediction
|
tsne_python/tsne.py
|
1
|
5760
|
#
# tsne.py
#
# Implementation of t-SNE in Python. The implementation was tested on Python 2.5.1, and it requires a working
# installation of NumPy. The implementation comes with an example on the MNIST dataset. In order to plot the
# results of this example, a working installation of matplotlib is required.
# The example can be run by executing: ipython tsne.py -pylab
#
#
# Created by Laurens van der Maaten on 20-12-08.
# Copyright (c) 2008 Tilburg University. All rights reserved.
import numpy as Math
import pylab as Plot
def Hbeta(D = Math.array([]), beta = 1.0):
"""Compute the perplexity and the P-row for a specific value of the precision of a Gaussian distribution."""
# Compute P-row and corresponding perplexity
P = Math.exp(-D.copy() * beta);
sumP = sum(P);
H = Math.log(sumP) + beta * Math.sum(D * P) / sumP;
P = P / sumP;
return H, P;
def x2p(X = Math.array([]), tol = 1e-5, perplexity = 30.0):
"""Performs a binary search to get P-values in such a way that each conditional Gaussian has the same perplexity."""
# Initialize some variables
print "Computing pairwise distances..."
(n, d) = X.shape;
sum_X = Math.sum(Math.square(X), 1);
D = Math.add(Math.add(-2 * Math.dot(X, X.T), sum_X).T, sum_X);
P = Math.zeros((n, n));
beta = Math.ones((n, 1));
logU = Math.log(perplexity);
# Loop over all datapoints
for i in range(n):
# Print progress
if i % 500 == 0:
print "Computing P-values for point ", i, " of ", n, "..."
# Compute the Gaussian kernel and entropy for the current precision
betamin = -Math.inf;
betamax = Math.inf;
Di = D[i, Math.concatenate((Math.r_[0:i], Math.r_[i+1:n]))];
(H, thisP) = Hbeta(Di, beta[i]);
# Evaluate whether the perplexity is within tolerance
Hdiff = H - logU;
tries = 0;
while Math.abs(Hdiff) > tol and tries < 50:
# If not, increase or decrease precision
if Hdiff > 0:
betamin = beta[i].copy();
if betamax == Math.inf or betamax == -Math.inf:
beta[i] = beta[i] * 2;
else:
beta[i] = (beta[i] + betamax) / 2;
else:
betamax = beta[i].copy();
if betamin == Math.inf or betamin == -Math.inf:
beta[i] = beta[i] / 2;
else:
beta[i] = (beta[i] + betamin) / 2;
# Recompute the values
(H, thisP) = Hbeta(Di, beta[i]);
Hdiff = H - logU;
tries = tries + 1;
# Set the final row of P
P[i, Math.concatenate((Math.r_[0:i], Math.r_[i+1:n]))] = thisP;
# Return final P-matrix
print "Mean value of sigma: ", Math.mean(Math.sqrt(1 / beta))
return P;
def pca(X = Math.array([]), no_dims = 50):
"""Runs PCA on the NxD array X in order to reduce its dimensionality to no_dims dimensions."""
print "Preprocessing the data using PCA..."
(n, d) = X.shape;
X = X - Math.tile(Math.mean(X, 0), (n, 1));
(l, M) = Math.linalg.eig(Math.dot(X.T, X));
Y = Math.dot(X, M[:,0:no_dims]);
return Y;
def tsne(X = Math.array([]), no_dims = 2, initial_dims = 50, perplexity = 30.0):
"""Runs t-SNE on the dataset in the NxD array X to reduce its dimensionality to no_dims dimensions.
The syntaxis of the function is Y = tsne.tsne(X, no_dims, perplexity), where X is an NxD NumPy array."""
# Check inputs
if X.dtype != "float64":
print "Error: array X should have type float64.";
return -1;
#if no_dims.__class__ != "<type 'int'>": # doesn't work yet!
# print "Error: number of dimensions should be an integer.";
# return -1;
# Initialize variables
X = pca(X, initial_dims).real;
(n, d) = X.shape;
max_iter = 200;
initial_momentum = 0.5;
final_momentum = 0.8;
eta = 500;
min_gain = 0.01;
Y = Math.random.randn(n, no_dims);
dY = Math.zeros((n, no_dims));
iY = Math.zeros((n, no_dims));
gains = Math.ones((n, no_dims));
# Compute P-values
P = x2p(X, 1e-5, perplexity);
P = P + Math.transpose(P);
P = P / Math.sum(P);
P = P * 4; # early exaggeration
P = Math.maximum(P, 1e-12);
# Run iterations
for iter in range(max_iter):
# Compute pairwise affinities
sum_Y = Math.sum(Math.square(Y), 1);
num = 1 / (1 + Math.add(Math.add(-2 * Math.dot(Y, Y.T), sum_Y).T, sum_Y));
num[range(n), range(n)] = 0;
Q = num / Math.sum(num);
Q = Math.maximum(Q, 1e-12);
# Compute gradient
PQ = P - Q;
for i in range(n):
dY[i,:] = Math.sum(Math.tile(PQ[:,i] * num[:,i], (no_dims, 1)).T * (Y[i,:] - Y), 0);
# Perform the update
if iter < 20:
momentum = initial_momentum
else:
momentum = final_momentum
gains = (gains + 0.2) * ((dY > 0) != (iY > 0)) + (gains * 0.8) * ((dY > 0) == (iY > 0));
gains[gains < min_gain] = min_gain;
iY = momentum * iY - eta * (gains * dY);
Y = Y + iY;
Y = Y - Math.tile(Math.mean(Y, 0), (n, 1));
# Compute current value of cost function
if (iter + 1) % 10 == 0:
C = Math.sum(P * Math.log(P / Q));
print "Iteration ", (iter + 1), ": error is ", C
# Stop lying about P-values
if iter == 100:
P = P / 4;
# Return solution
return Y;
if __name__ == "__main__":
print "Run Y = tsne.tsne(X, no_dims, perplexity) to perform t-SNE on your dataset."
print "Running example on 2,500 MNIST digits..."
X = Math.loadtxt("d500.txt");
#labels = Math.loadtxt("labels.txt");
text_file = open("l500.txt", "r")
labels = text_file.readlines()
Y = tsne(X, 2, 50, 20.0);
#Plot.scatter(Y[:,0], Y[:,1], 20, labels)
Plot.scatter(
Y[:, 0], Y[:, 1], marker = 'o', c = Y[:, 1],
cmap = Plot.get_cmap('Spectral'))
'''
for label, x, y in zip(labels, Y[:, 0], Y[:, 1]):
Plot.annotate(label, xy = (x, y), xytext = (-20, 20),
textcoords = 'offset points', ha = 'right', va = 'bottom',
bbox = dict(boxstyle = 'round,pad=0.5', fc = 'yellow', alpha = 0),
arrowprops = dict(arrowstyle = '->', connectionstyle = 'arc3,rad=0'))
'''
Plot.show()
|
bsd-3-clause
| 3,899,504,067,676,658,000
| 29.967742
| 117
| 0.602257
| false
| 2.648276
| false
| false
| false
|
convexopt/gpkit
|
gpkit/tests/t_vars.py
|
1
|
7635
|
"""Test VarKey, Variable, VectorVariable, and ArrayVariable classes"""
import unittest
import numpy as np
from gpkit import (Monomial, NomialArray, Variable, VarKey,
VectorVariable, ArrayVariable)
import gpkit
from gpkit.nomials import Variable as PlainVariable
class TestVarKey(unittest.TestCase):
"""TestCase for the VarKey class"""
def test_init(self):
"""Test VarKey initialization"""
# test type
x = VarKey('x')
self.assertEqual(type(x), VarKey)
# test no args
x = VarKey()
self.assertEqual(type(x), VarKey)
y = VarKey(x)
self.assertEqual(x, y)
# test special 'name' keyword overwriting behavior
x = VarKey('x', flavour='vanilla')
self.assertEqual(x.name, 'x')
x = VarKey(name='x')
self.assertEqual(x.name, 'x')
# pylint: disable=redundant-keyword-arg
self.assertRaises(TypeError, lambda: VarKey('x', name='y'))
# pylint: disable=redundant-keyword-arg
self.assertRaises(TypeError, lambda: VarKey(x, name='y'))
def test_eq_neq(self):
"""Test boolean equality operators"""
# no args
vk1 = VarKey()
vk2 = VarKey()
self.assertTrue(vk1 != vk2)
self.assertFalse(vk1 == vk2)
self.assertEqual(vk1, vk1)
V = VarKey('V')
vel = VarKey('V')
self.assertTrue(V == vel)
self.assertFalse(V != vel)
self.assertEqual(vel, vel)
x1 = Variable("x", 3, "m")
x2 = Variable("x", 2, "ft")
x3 = Variable("x", 2, "m")
if gpkit.units:
self.assertNotEqual(x2.key, x3.key)
else: # units don't distinguish variables when they're disabled
self.assertEqual(x2.key, x3.key)
self.assertEqual(x1.key, x3.key)
def test_repr(self):
"""Test __repr__ method"""
for k in ('x', '$x$', 'var_name', 'var name', r"\theta", r'$\pi_{10}$'):
var = VarKey(k)
self.assertEqual(repr(var), k)
# not sure what this means, but I want to know if it changes
for num in (2, 2.0):
v = VarKey(num)
self.assertEqual(v, VarKey(str(num)))
def test_dict_key(self):
"""make sure variables are well-behaved dict keys"""
v = VarKey()
x = VarKey('$x$')
d = {v: 1273, x: 'foo'}
self.assertEqual(d[v], 1273)
self.assertEqual(d[x], 'foo')
d = {VarKey(): None, VarKey(): 12}
self.assertEqual(len(d), 2)
def test_units_attr(self):
"""Make sure VarKey objects have a units attribute"""
x = VarKey('x')
for vk in (VarKey(), x, VarKey(x), VarKey(units='m')):
self.assertTrue(hasattr(vk, 'units'))
class TestVariable(unittest.TestCase):
"""TestCase for the Variable class"""
def test_init(self):
"""Test Variable initialization"""
v = Variable('v')
self.assertTrue(isinstance(v, PlainVariable))
self.assertTrue(isinstance(v, Monomial))
# test that operations on Variable cast to Monomial
self.assertTrue(isinstance(3*v, Monomial))
self.assertFalse(isinstance(3*v, PlainVariable))
def test_value(self):
"""Detailed tests for value kwarg of __init__"""
a = Variable('a')
b = Variable('b', value=4)
c = a**2 + b
self.assertEqual(b.value, 4)
self.assertTrue(isinstance(b.value, float))
p1 = c.value
p2 = a**2 + 4
self.assertEqual(p1, p2)
self.assertEqual(a.value, a)
def test_hash(self):
x1 = Variable("x", "-", "first x")
x2 = Variable("x", "-", "second x")
self.assertEqual(hash(x1), hash(x2))
p1 = Variable("p", "psi", "first pressure")
p2 = Variable("p", "psi", "second pressure")
self.assertEqual(hash(p1), hash(p2))
xu = Variable("x", "m", "x with units")
if gpkit.units:
self.assertNotEqual(hash(x1), hash(xu))
else: # units don't distinguish variables when they're disabled
self.assertEqual(hash(x1), hash(xu))
def test_unit_parsing(self):
x = Variable("x", "s^0.5/m^0.5")
y = Variable("y", "(m/s)^-0.5")
self.assertEqual(x.units, y.units)
def test_to(self):
if gpkit.units:
x = Variable("x", "ft")
self.assertEqual(x.to("inch").c.magnitude, 12)
def test_eq_ne(self):
# test for #1138
W = Variable("W", 5, "lbf", "weight of 1 bag of sugar")
self.assertTrue(W != W.key)
self.assertTrue(W.key != W)
self.assertFalse(W == W.key)
self.assertFalse(W.key == W)
class TestVectorVariable(unittest.TestCase):
"""TestCase for the VectorVariable class.
Note: more relevant tests in t_posy_array."""
def test_init(self):
"""Test VectorVariable initialization"""
# test 1
n = 3
v = VectorVariable(n, 'v', label='dummy variable')
self.assertTrue(isinstance(v, NomialArray))
v_mult = 3*v
for i in range(n):
self.assertTrue(isinstance(v[i], PlainVariable))
self.assertTrue(isinstance(v[i], Monomial))
# test that operations on Variable cast to Monomial
self.assertTrue(isinstance(v_mult[i], Monomial))
self.assertFalse(isinstance(v_mult[i], PlainVariable))
# test 2
x = VectorVariable(3, 'x', label='dummy variable')
x_0 = Monomial('x', idx=(0,), shape=(3,), label='dummy variable')
x_1 = Monomial('x', idx=(1,), shape=(3,), label='dummy variable')
x_2 = Monomial('x', idx=(2,), shape=(3,), label='dummy variable')
x2 = NomialArray([x_0, x_1, x_2])
self.assertEqual(x, x2)
# test inspired by issue 137
N = 20
x_arr = np.arange(0, 5., 5./N) + 1e-6
x = VectorVariable(N, 'x', x_arr, 'm', "Beam Location")
def test_constraint_creation_units(self):
v = VectorVariable(2, "v", "m/s")
c = (v >= 40*gpkit.units("ft/s"))
c2 = (v >= np.array([20, 30])*gpkit.units("ft/s"))
if gpkit.units:
self.assertTrue(c.right.units)
self.assertTrue(NomialArray(c2.right).units)
else:
self.assertEqual(type(c.right), int)
self.assertEqual(type(c2.right), np.ndarray)
class TestArrayVariable(unittest.TestCase):
"""TestCase for the ArrayVariable class"""
def test_is_vector_variable(self):
"""
Make sure ArrayVariable is a shortcut to VectorVariable
(we want to know if this changes).
"""
self.assertTrue(ArrayVariable is VectorVariable)
def test_str(self):
"""Make sure string looks something like a numpy array"""
x = ArrayVariable((2, 4), 'x')
strx = str(x)
self.assertEqual(strx.count("["), 3)
self.assertEqual(strx.count("]"), 3)
class TestVectorize(unittest.TestCase):
"""TestCase for gpkit.vectorize"""
def test_shapes(self):
with gpkit.Vectorize(3):
with gpkit.Vectorize(5):
y = gpkit.Variable("y")
x = gpkit.VectorVariable(2, "x")
z = gpkit.VectorVariable(7, "z")
self.assertEqual(y.shape, (5, 3))
self.assertEqual(x.shape, (2, 5, 3))
self.assertEqual(z.shape, (7, 3))
TESTS = [TestVarKey, TestVariable, TestVectorVariable, TestArrayVariable,
TestVectorize]
if __name__ == '__main__':
# pylint: disable=wrong-import-position
from gpkit.tests.helpers import run_tests
run_tests(TESTS)
|
mit
| 2,720,635,913,569,846,000
| 33.863014
| 80
| 0.568173
| false
| 3.486301
| true
| false
| false
|
plang85/rough_surfaces
|
rough_surfaces/plot.py
|
1
|
2368
|
import numpy as np
from matplotlib import rcParams
import scipy.stats as scst
# TODO get rid of these and we won't need matplotlib in the setup, only for examples
rcParams['font.size'] = 14
rcParams['legend.fontsize'] = 10
rcParams['savefig.dpi'] = 300
rcParams['legend.loc'] = 'upper right'
rcParams['image.cmap'] = 'hot'
def roughness_spectrum(ax, q, C, lenght_unit, onedim=False):
ax.loglog(q, C)
ax.set_xlabel('q (' + lenght_unit + '$^{-1}$' + ')')
ax.set_ylabel('C (' + lenght_unit + '$^{4}$' + ')')
if onedim:
ax.set_ylabel('C (' + lenght_unit + '$^{3}$' + ')')
def roughness(ax, h, dxy, length_unit):
N = h.shape[0]
L = dxy * N
x = np.linspace(-L / 2.0, L / 2.0, N)
XX, YY = np.meshgrid(x, x)
ax.pcolor(XX, YY, h)
ax.axis('equal')
unit_den = ''.join(['(', length_unit, ')'])
ax.set_xlabel('x ' + unit_den)
ax.set_ylabel('y ' + unit_den)
def roughness_histogram(ax, h, length_unit):
bins = 30
ax.hist(h.flatten(), bins, normed=1, color='gray', ec='white')
hsigma = np.std(h)
hspace = np.linspace(h.min(), h.max(), 100)
ax.plot(hspace, scst.norm.pdf(hspace, np.mean(h), hsigma), lw=3, alpha=0.5)
unit_den = ''.join(['(', length_unit, ')'])
ax.set_xlabel('Surface Height ' + unit_den)
ax.set_ylabel('Relative Probability')
def trace(surface, index, axis=0):
if axis == 0:
return surface[index, :]
elif axis == 1:
return surface[:, index]
else:
raise ValueError('axis must be 0(x) or 1(y)')
def traces(ax, surface, displacements=[], index=None, axis=0):
if not index:
index = int(surface.shape[axis] / 2)
surface_trace = trace(surface, index, axis)
ax.plot(surface_trace, label='rigid surface')
if displacements:
for displacement in displacements:
shifted_displacement = displacement - (np.max(displacement) - np.max(surface))
ax.plot(trace(shifted_displacement, index, axis), label='elastic body')
def slope_histogram(ax, h):
bins = 30
g = np.gradient(h)
ax.hist(np.ravel(g), bins, normed=1, color='gray', ec='white')
# hsigma = np.std(h)
# hspace = np.linspace(h.min(), h.max(), 100)
# ax.plot(hspace, scst.norm.pdf(hspace, np.mean(h), hsigma), lw=3, alpha=0.5)
ax.set_xlabel('Surface Slope (-)')
ax.set_ylabel('Relative Probability')
|
mit
| 3,680,067,757,627,451,400
| 32.352113
| 90
| 0.602618
| false
| 2.912669
| false
| false
| false
|
vergecurrency/VERGE
|
test/functional/interface_http.py
|
1
|
4779
|
#!/usr/bin/env python3
# Copyright (c) 2014-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the RPC HTTP basics."""
from test_framework.test_framework import VergeTestFramework
from test_framework.util import *
import http.client
import urllib.parse
class HTTPBasicsTest (VergeTestFramework):
def set_test_params(self):
self.num_nodes = 3
def setup_network(self):
self.setup_nodes()
def run_test(self):
#################################################
# lowlevel check for http persistent connection #
#################################################
url = urllib.parse.urlparse(self.nodes[0].url)
authpair = url.username + ':' + url.password
headers = {"Authorization": "Basic " + str_to_b64str(authpair)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
out1 = conn.getresponse().read()
assert(b'"error":null' in out1)
assert(conn.sock!=None) #according to http/1.1 connection must still be open!
#send 2nd request without closing connection
conn.request('POST', '/', '{"method": "getchaintips"}', headers)
out1 = conn.getresponse().read()
assert(b'"error":null' in out1) #must also response with a correct json-rpc message
assert(conn.sock!=None) #according to http/1.1 connection must still be open!
conn.close()
#same should be if we add keep-alive because this should be the std. behaviour
headers = {"Authorization": "Basic " + str_to_b64str(authpair), "Connection": "keep-alive"}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
out1 = conn.getresponse().read()
assert(b'"error":null' in out1)
assert(conn.sock!=None) #according to http/1.1 connection must still be open!
#send 2nd request without closing connection
conn.request('POST', '/', '{"method": "getchaintips"}', headers)
out1 = conn.getresponse().read()
assert(b'"error":null' in out1) #must also response with a correct json-rpc message
assert(conn.sock!=None) #according to http/1.1 connection must still be open!
conn.close()
#now do the same with "Connection: close"
headers = {"Authorization": "Basic " + str_to_b64str(authpair), "Connection":"close"}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
out1 = conn.getresponse().read()
assert(b'"error":null' in out1)
assert(conn.sock==None) #now the connection must be closed after the response
#node1 (2nd node) is running with disabled keep-alive option
urlNode1 = urllib.parse.urlparse(self.nodes[1].url)
authpair = urlNode1.username + ':' + urlNode1.password
headers = {"Authorization": "Basic " + str_to_b64str(authpair)}
conn = http.client.HTTPConnection(urlNode1.hostname, urlNode1.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
out1 = conn.getresponse().read()
assert(b'"error":null' in out1)
#node2 (third node) is running with standard keep-alive parameters which means keep-alive is on
urlNode2 = urllib.parse.urlparse(self.nodes[2].url)
authpair = urlNode2.username + ':' + urlNode2.password
headers = {"Authorization": "Basic " + str_to_b64str(authpair)}
conn = http.client.HTTPConnection(urlNode2.hostname, urlNode2.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
out1 = conn.getresponse().read()
assert(b'"error":null' in out1)
assert(conn.sock!=None) #connection must be closed because verged should use keep-alive by default
# Check excessive request size
conn = http.client.HTTPConnection(urlNode2.hostname, urlNode2.port)
conn.connect()
conn.request('GET', '/' + ('x'*1000), '', headers)
out1 = conn.getresponse()
assert_equal(out1.status, http.client.NOT_FOUND)
conn = http.client.HTTPConnection(urlNode2.hostname, urlNode2.port)
conn.connect()
conn.request('GET', '/' + ('x'*10000), '', headers)
out1 = conn.getresponse()
assert_equal(out1.status, http.client.BAD_REQUEST)
if __name__ == '__main__':
HTTPBasicsTest ().main ()
|
mit
| 6,686,118,359,302,574,000
| 43.25
| 106
| 0.626282
| false
| 3.910802
| true
| false
| false
|
GovReady/govready-q
|
guidedmodules/module_logic.py
|
1
|
95357
|
import uuid
from itertools import groupby
from urllib.parse import urlunparse
from django.conf import settings
from jinja2.sandbox import SandboxedEnvironment
from controls.enums.statements import StatementTypeEnum
from controls.oscal import Catalogs, Catalog
from siteapp.settings import GOVREADY_URL
def get_jinja2_template_vars(template):
from jinja2 import meta, TemplateSyntaxError
env = SandboxedEnvironment()
try:
expr = env.parse(template)
except TemplateSyntaxError as e:
raise Exception("expression {} is invalid: {}".format(template, e))
return set(meta.find_undeclared_variables(expr))
class Jinja2Environment(SandboxedEnvironment):
# A Jinja2 Environment for template and expression execution.
intercepted_binops = frozenset(['+'])
def call_binop(self, context, operator, left, right):
# If the operands are RenderedAnswer instances, then unwrap them
# to the raw Python value before executing the operator.
def unwrap(operand):
if isinstance(operand, RenderedAnswer):
operand = operand.answer
return operand
left = unwrap(left)
right = unwrap(right)
# Example from Jinja2 docs about overriding an operator.
#if operator == '+':
# return self.undefined('the power operator is unavailable')
# Call default operator logic.
return SandboxedEnvironment.call_binop(self, context, operator, left, right)
def walk_module_questions(module, callback):
# Walks the questions in depth-first order following the dependency
# tree connecting questions. If a question is a dependency of multiple
# questions, it is walked only once.
#
# callback is called for each question in the module with these arguments:
#
# 1) A ModuleQuestion, after the callback has been called for all
# ModuleQuestions that the question depends on.
# 2) A dictionary that has been merged from the return value of all
# of the callback calls on its dependencies.
# 3) A set of ModuleQuestion instances that this question depends on,
# so that the callback doesn't have to compute it (again) itself.
# Remember each question that is processed so we only process each
# question at most once. Cache the state that it gives.
processed_questions = { }
# Pre-load all of the dependencies between questions in this module
# and get the questions that are not depended on by any question,
# which is where the dependency chains start.
dependencies, root_questions = get_all_question_dependencies(module)
# Local function that processes a single question.
def walk_question(q, stack):
# If we've seen this question already as a dependency of another
# question, then return its state dict from last time.
if q.key in processed_questions:
return processed_questions[q.key]
# Prevent infinite recursion.
if q.key in stack:
raise ValueError("Cyclical dependency in questions: " + "->".join(stack + [q.key]))
# Execute recursively on the questions it depends on,
# in module definition order rather than in a random
# order.
state = { }
deps = list(dependencies[q])
deps.sort(key = lambda q : q.definition_order)
for qq in deps:
state.update(walk_question(qq, stack+[q.key]))
# Run the callback and get its state.
state = callback(q, state, dependencies[q])
# Remember the state in case we encounter it later.
processed_questions[q.key] = dict(state) # clone
# Return the state to the caller.
return state
# Walk the dependency root questions in document order.
root_questions = list(root_questions)
root_questions.sort(key = lambda q : q.definition_order)
for q in root_questions:
walk_question(q, [])
def evaluate_module_state(current_answers, parent_context=None):
# Compute the next question to ask the user, given the user's
# answers to questions so far, and all imputed answers up to
# that point.
#
# To figure this out, we walk the dependency tree of questions
# until we arrive at questions that have no unanswered dependencies.
# Such questions can be put forth to the user.
# Build a list of ModuleQuestion that the are not unanswerable
# because they are imputed or unavailable to the user. These
# questions may have no answer or can be updatd with a new answer.
answerable = set()
# Build a list of ModuleQuestions that the user may answer now
# excluding questions that the have already been answered.
can_answer = set()
# Build a list of ModuleQuestions that still need an answer,
# including can_answer and unanswered ModuleQuestions that
# have dependencies that are unanswered and need to be answered
# first before the questions in this list can be answered.
unanswered = set()
# Build a new array of answer values.
from collections import OrderedDict
answertuples = OrderedDict()
# Build a list of questions whose answers were imputed.
was_imputed = set()
# Create some reusable context for evaluating impute conditions --- really only
# so that we can pass down project and organization values. Everything else is
# cleared from the context's cache for each question because each question sees
# a different set of dependencies.
impute_context_parent = TemplateContext(
ModuleAnswers(current_answers.module, current_answers.task, {}), lambda _0, _1, _2, _3, value : str(value), # escapefunc
parent_context=parent_context)
# Visitor function.
def walker(q, state, deps):
# If any of the dependencies don't have answers yet, then this question
# cannot be processed yet.
for qq in deps:
if qq.key not in state:
unanswered.add(q)
answertuples[q.key] = (q, False, None, None)
return { }
# Can this question's answer be imputed from answers that
# it depends on? If the user answered this question (during
# a state in which it wasn't imputed, but now it is), the
# user's answer is overridden with the imputed value for
# consistency with the Module's logic.
# Before running impute conditions below, we need a TemplateContext
# which provides the functionality of resolving variables mentioned
# in the impute condition. The TemplateContext that we use here is
# different from the one we normally use to render output documents
# because an impute condition in a question should not be able to see
# the answers to questions that come later in the module. The purpose
# of evaluate_module_state is, in part, to get the answers to questions,
# included imputed answers, and so the answers to later questions are not
# yet know. Therefore, we construct a TemplateContext that only includes
# the answers to questions that we've computed so far.
impute_context = TemplateContext(
ModuleAnswers(current_answers.module, current_answers.task, state),
impute_context_parent.escapefunc, parent_context=impute_context_parent, root=True)
v = run_impute_conditions(q.spec.get("impute", []), impute_context)
if v:
# An impute condition matched. Unwrap to get the value.
answerobj = None
v = v[0]
was_imputed.add(q.key)
elif q.key in current_answers.as_dict():
# The user has provided an answer to this question. Question can be updated.
answerobj = current_answers.get(q.key)
v = current_answers.as_dict()[q.key]
answerable.add(q)
elif current_answers.module.spec.get("type") == "project" and q.key == "_introduction":
# Projects have an introduction but it isn't displayed as a question.
# It's not explicitly answered, but treat it as answered so that questions
# that implicitly depend on it can be evaluated.
# TODO: Is this still necessary?
answerobj = None
v = None
else:
# This question does not have an answer yet. We don't set
# anything in the state that we return, which flags that
# the question is not answered.
#
# But we can remember that this question *can* be answered
# by the user, and that it's not answered yet.
answerable.add(q)
can_answer.add(q)
unanswered.add(q)
answertuples[q.key] = (q, False, None, None)
return state
# Update the state that's passed to questions that depend on this
# and also the global state of all answered questions.
state[q.key] = (q, True, answerobj, v)
answertuples[q.key] = (q, True, answerobj, v)
return state
# Walk the dependency tree.
walk_module_questions(current_answers.module, walker)
# There may be multiple routes through the tree of questions,
# so we'll prefer the question that is defined first in the spec.
can_answer = sorted(can_answer, key = lambda q : q.definition_order)
# The list of unanswered questions should be in the same order as
# can_answer so that as the user goes through the questions they
# are following the same order as the list of upcoming questions.
# Ideally we'd form both can_answer and unanswered in the same way
# in the right order without needing to sort later, but until then
# we'll just sort both.
unanswered = sorted(unanswered, key = lambda q : q.definition_order)
# Create a new ModuleAnswers object that holds the user answers,
# imputed answers (which override user answers), and next-question
# information.
ret = ModuleAnswers(current_answers.module, current_answers.task, answertuples)
ret.was_imputed = was_imputed
ret.unanswered = unanswered
ret.can_answer = can_answer
ret.answerable = answerable
return ret
def get_question_context(answers, question):
# What is the context of questions around the given question so show
# the user their progress through the questions?
# Create an object to lazy-render values, since we only use it on
# the module-finished page and not to display context on question
# pages.
from guidedmodules.module_logic import TemplateContext, RenderedAnswer, HtmlAnswerRenderer
class LazyRenderedAnswer:
def __init__(self, q, is_answered, answer_obj, answer_value):
self.q = q
self.is_answered = is_answered
self.answer_obj = answer_obj
self.answer_value = answer_value
def __call__(self):
if not self.is_answered:
return "<i>not answered</i>"
if self.q.spec["type"] == "interstitial":
return "<i>seen</i>"
if self.answer_value is None:
return "<i>skipped</i>"
if not hasattr(LazyRenderedAnswer, 'tc'):
LazyRenderedAnswer.tc = TemplateContext(answers, HtmlAnswerRenderer(show_metadata=False))
return RenderedAnswer(answers.task, self.q, self.is_answered, self.answer_obj, self.answer_value, LazyRenderedAnswer.tc).__html__()
answers.as_dict() # force lazy-load
context = []
for q, is_answered, answer_obj, answer_value in answers.answertuples.values():
# Sometimes we want to skip imputed questions, but for the sake
# of the authoring tool we need to keep imputed questions so
# the user can navigate to them.
context.append({
"key": q.key,
"title": q.spec['title'],
"link": answers.task.get_absolute_url_to_question(q),
# Any question that has been answered or can be answered next can be linked to,
"can_link": (answer_obj or q in answers.can_answer),
"imputed": is_answered and answer_obj is None,
"skipped": (answer_obj is not None and answer_value is None) and (q.spec["type"] != "interstitial"),
"answered": answer_obj is not None,
"reviewed": answer_obj.reviewed if answer_obj is not None else None,
"is_this_question": (question is not None) and (q.key == question.key),
"value": LazyRenderedAnswer(q, is_answered, answer_obj, answer_value),
"definition_order": q.definition_order
})
# Sort list of questions by definition_order
from operator import itemgetter
context_sorted = sorted(context, key=itemgetter('definition_order'))
return context_sorted
def oscal_context(answers):
"""
Generate a dictionary of values useful for rendering OSCAL.
Lots of work in progress here!
"""
# sometimes we run into answers w/o a task, in which case
# there is not much we can do
if not hasattr(answers, 'task'):
return dict()
project = answers.task.project
system = project.system
# TODO: where do we get the catalog key from?
catalog_key = Catalogs.NIST_SP_800_53_rev4
catalog = Catalog.GetInstance(catalog_key)
# build a component from an Element
def _component(e):
return {
'uuid': e.uuid,
'title': e.name,
'description': e.description,
'state': e.component_state,
'type': e.component_type
}
components = [_component(e) for e in system.producer_elements]
# collect all the control implementation statements
statements = system.root_element.statements_consumed \
.filter(statement_type=StatementTypeEnum.CONTROL_IMPLEMENTATION.name) \
.order_by('sid')
# and all the project's organizational parameters
params = project.get_parameter_values(catalog_key)
# loop over all statements, grouped by control id and
# build a list of implemented_requirements
implemented_requirements = []
for control_id, group in groupby(statements, lambda s: s.sid):
ir = {
"control_id": control_id,
"uuid": str(uuid.uuid4()),
"statements": []
}
param_ids = catalog.get_parameter_ids_for_control(control_id)
ir["parameter_settings"] = [
dict(param_id=param_id, value=params.get(param_id))
for param_id in param_ids
if params.get(param_id)
]
# loop over all the statements for this control, grouped by
# "part id". I.e., "ac-1.a", "ac-1.b", etc.
for pid, group in groupby(sorted(group, key=lambda s: s.pid),
lambda s: s.pid):
# useful to extract the statement id from the first statement
# (should be the same for all the statements in this group)
group = list(group)
first_statement = group[0]
statement = {
"id": first_statement.oscal_statement_id,
"uuid": str(uuid.uuid4()),
"by_components": []
}
# assumption: at this point, each statement in the group
# has been contributed by a different component. if
# assumption is not valid, we'll have to fix this code a
# bit, since OSCAL doesn't obiviously support multiple
# contributions to a statement from the same component
for s in group:
by_component = {
"uuid": str(s.uuid),
"component_uuid": s.producer_element.uuid,
"description": s.body
}
statement["by_components"].append(by_component)
ir['statements'].append(statement)
implemented_requirements.append(ir)
# TODO: placeholder for information types -- should be able to pull this out
# from questionnaire
security_body = project.system.get_security_impact_level
confidentiality = security_body.get("security_objective_confidentiality", "UNKOWN")
integrity = security_body.get("security_objective_integrity", "UNKOWN")
availability = security_body.get("security_objective_availability", "UNKOWN")
information_types = [
{
"uuid": str(uuid.uuid4()),
"title": "UNKNOWN information type title",
# "categorizations": [], # TODO https://doi.org/10.6028/NIST.SP.800-60v2r1
"description": "information type description",
"confidentiality_impact": confidentiality,
"integrity_impact": integrity,
"availability_impact": availability
}
]
# generate a URL to reference this system's OSCAL profile (baseline)
# TODO: fix url pattern matching for backward compatibility, figure out profile usage
# profile_path = reverse('profile_oscal_json', kwargs=dict(system_id=system.id))
profile = urlunparse((GOVREADY_URL.scheme, GOVREADY_URL.netloc,
"profile_path",
None, None, None))
return {
"uuid": str(uuid.uuid4()), # SSP UUID
"make_uuid": uuid.uuid4, # so we can gen UUIDS if needed in the templates
"version": float(project.version),
"profile": profile,
"oscal_version": "1.0.0rc1",
"last_modified": str(project.updated),
"system_id": f"govready-{system.id}",
"system_authorization_boundary": "System authorization boundary, TBD", # TODO
"system_security_impact_level_confidentiality":confidentiality,
"system_security_impact_level_integrity": integrity,
"system_security_impact_level_availability": availability,
"system_operating_status": "operational", # TODO: need from questionnaire, but wrong format
"components": components,
"implemented_requirements": implemented_requirements,
"information_types": information_types
}
def render_content(content, answers, output_format, source,
additional_context={}, demote_headings=True,
show_answer_metadata=False, use_data_urls=False,
is_computing_title=False):
# Renders content (which is a dict with keys "format" and "template")
# into the requested output format, using the ModuleAnswers in answers
# to provide the template context.
# Get the template.
template_format = content["format"]
template_body = content["template"]
# Markdown cannot be used with Jinja2 because auto-escaping is highly
# context dependent. For instance, Markdown can have HTML literal blocks
# and in those blocks the usual backslash-escaping is replaced with
# HTML's usual &-escaping. Also, when a template value is substituted
# inside a Markdown block like a blockquote, newlines in the substituted
# value will break the logical structure of the template without some
# very complex handling of adding line-initial whitespace and block
# markers ("*", ">").
#
# So we must convert Markdown to another format prior to running templates.
#
# If the output format is HTML, convert the Markdown to HTML.
#
# If the output format is plain-text, treat the Markdown as if it is plain text.
#
# No other output formats are supported.
if template_format == "markdown":
if output_format == "html" or output_format == "PARSE_ONLY":
# Convert the template first to HTML using CommonMark.
if not isinstance(template_body, str): raise ValueError("Template %s has incorrect type: %s" % (source, type(template_body)))
# We don't want CommonMark to mess up template tags, however. If
# there are symbols which have meaning both to Jinaj2 and CommonMark,
# then they may get ruined by CommonMark because they may be escaped.
# For instance:
#
# {% hello "*my friend*" %}
#
# would become
#
# {% hello "<em>my friend</em>" %}
#
# and
#
# [my link]({{variable_holding_url}})
#
# would become a link whose target is
#
# %7B%7Bvariable_holding_url%7D%7D
#
# And that's not good!
#
# Do a simple lexical pass over the template and replace template
# tags with special codes that CommonMark will ignore. Then we'll
# put back the strings after the CommonMark has been rendered into
# HTML, so that the template tags end up in their appropriate place.
#
# Since CommonMark will clean up Unicode in URLs, e.g. in link and
# image URLs, by %-encoding non-URL-safe characters, we have to
# also override CommonMark's URL escaping function at
# https://github.com/rtfd/CommonMark-py/blob/master/CommonMark/common.py#L71
# to not %-encode our special codes. Unfortunately urllib.parse.quote's
# "safe" argument does not handle non-ASCII characters.
from commonmark import inlines
def urlencode_special(uri):
import urllib.parse
return "".join(
urllib.parse.quote(c, safe="/@:+?=&()%#*,") # this is what CommonMark does
if c not in "\uE000\uE001" else c # but keep our special codes
for c in uri)
inlines.normalize_uri = urlencode_special
substitutions = []
import re
def replace(m):
# Record the substitution.
index = len(substitutions)
substitutions.append(m.group(0))
return "\uE000%d\uE001" % index # use Unicode private use area code points
template_body = re.sub(r"{%[\w\W]*?%}|{{.*?}}", replace, template_body)
# Use our CommonMark Tables parser & renderer.
from commonmark_extensions.tables import \
ParserWithTables as CommonMarkParser, \
RendererWithTables as CommonMarkHtmlRenderer
# Subclass the renderer to control the output a bit.
class q_renderer(CommonMarkHtmlRenderer):
def __init__(self):
# Our module templates are currently trusted, so we can keep
# safe mode off, and we're making use of that. Safe mode is
# off by default, but I'm making it explicit. If we ever
# have untrusted template content, we will need to turn
# safe mode on.
super().__init__(options={ "safe": False })
def heading(self, node, entering):
# Generate <h#> tags with one level down from
# what would be normal since they should not
# conflict with the page <h1>.
if entering and demote_headings:
node.level += 1
super().heading(node, entering)
def code_block(self, node, entering):
# Suppress info strings because with variable substitution
# untrusted content could land in the <code> class attribute
# without a language- prefix.
node.info = None
super().code_block(node, entering)
def make_table_node(self, node):
return "<table class='table'>"
template_format = "html"
template_body = q_renderer().render(CommonMarkParser().parse(template_body))
# Put the Jinja2 template tags back that we removed prior to running
# the CommonMark renderer.
def replace(m):
return substitutions[int(m.group(1))]
template_body = re.sub("\uE000(\d+)\uE001", replace, template_body)
elif output_format in ("text", "markdown"):
# Pass through the markdown markup unchanged.
pass
else:
raise ValueError("Cannot render a markdown template to %s in %s." % (output_format, source))
# Execute the template.
if template_format in ("json", "yaml"):
# The json and yaml template types are not rendered in the usual
# way. The template itself is a Python data structure (not a string).
# We will replace all string values in the data structure (except
# dict keys) with what we get by calling render_content recursively
# on the string value, assuming it is a template of plain-text type.
import re
from collections import OrderedDict
import jinja2
env = Jinja2Environment(
autoescape=True,
undefined=jinja2.StrictUndefined) # see below - we defined any undefined variables
context = dict(additional_context) # clone
if answers:
def escapefunc(question, task, has_answer, answerobj, value):
# Don't perform any escaping. The caller will wrap the
# result in jinja2.Markup().
return str(value)
def errorfunc(message, short_message, long_message, **format_vars):
# Wrap in jinja2.Markup to prevent auto-escaping.
return jinja2.Markup("<" + message.format(**format_vars) + ">")
tc = TemplateContext(answers, escapefunc,
root=True,
errorfunc=errorfunc,
source=source,
show_answer_metadata=show_answer_metadata,
is_computing_title=is_computing_title)
context.update(tc)
def walk(value, path, additional_context_2 = {}):
# Render string values through the templating logic.
if isinstance(value, str):
return render_content(
{
"format": "text",
"template": value
},
answers,
"text",
source + " " + "->".join(path),
{ **additional_context, **additional_context_2 }
)
# Process objects with a special "%___" key specially.
# If it has a %for key with a string value, then interpret the string value as
# an expression in Jinja2 which we assume evaluates to a sequence-like object
# and loop over the items in the sequence. For each item, the "%loop" key
# of this object is rendered with the context amended with variable name
# assigned the sequence item.
elif isinstance(value, dict) and isinstance(value.get("%for"), str):
# The value of the "%for" key is "variable in expression". Parse that
# first.
m = re.match(r"^(\w+) in (.*)", value.get("%for"), re.I)
if not m:
raise ValueError("%for directive needs 'variable in expression' value")
varname = m.group(1)
expr = m.group(2)
# print(print"%for: expr = ", expr)
condition_func = compile_jinja2_expression(expr)
if output_format == "PARSE_ONLY":
return value
# Evaluate the expression.
context.update(additional_context_2)
seq = condition_func(context)
# print("%for: seq = ", seq)
# Render the %loop key for each item in sequence.
return [
walk(
value.get("%loop"),
path+[str(i)],
{ **additional_context_2, **{ varname: item } })
for i, item in enumerate(seq)
]
# For a %dict key, we will add a dictionary for each element in the
# sequence. The key for the dictionary is specified by value of %key
# item, and the value of the item itself is specified by the %value
elif isinstance(value, dict) and isinstance(value.get("%dict"), str):
# The value of the "%dict" key is "variable in expression". Parse that
# first.
m = re.match(r"^(\w+) in (.*)", value.get("%dict"), re.I)
if not m:
raise ValueError("%dict directive needs 'variable in expression' value")
varname = m.group(1)
expr = m.group(2)
condition_func = compile_jinja2_expression(expr)
if output_format == "PARSE_ONLY":
return value
# Evaluate the expression.
context.update(additional_context_2)
seq = condition_func(context)
# Render the %value key for each item in sequence,
# producing a dict of dicts. Each rendered dict
# must contain a special item with the key "%key".
# The value of "%key" is used to key a dictionary
# containing the remainder of the rendered items.
# E.g.,
# {
# "books": {
# "%dict": "book in books",
# "%value": {
# "%key": "{{ book.id }}",
# "title": "{{ book.title }}",
# "author": "{{ book.author }}"
# }
# }
# }
# will render to:
# {
# "books": {
# "100": {
# "title": "Harry Potter and the Chamber of Secrets",
# "author": "JK"
# },
# "101": {
# "title": "Harry Potter and the Goblet of Fire",
# "author": "JK"
# }
# }
# }
retval = dict()
if "%value" not in value:
raise ValueError("%dict directive missing %value")
item_value = value["%value"]
for i, item in enumerate(seq):
obj = walk(
item_value,
path+[str(i)],
{ **additional_context_2, **{ varname: item } })
if not isinstance(obj, dict):
raise ValueError("%value did not produce a dict")
if "%key" not in obj:
raise ValueError("dict returned by %value had no %key")
dict_key = obj.pop('%key')
retval[dict_key] = obj
return retval
elif isinstance(value, dict) and isinstance(value.get("%if"), str):
# The value of the "%if" key is an expression.
condition_func = compile_jinja2_expression(value["%if"])
if output_format == "PARSE_ONLY":
return value
# Evaluate the expression.
context.update(additional_context_2)
test = condition_func(context)
# If the expression is true, then we render the "%then" key.
if test:
return walk(
value.get("%then"),
path+["%then"],
additional_context_2)
else:
return None
# All other JSON data passes through unchanged.
elif isinstance(value, list):
# Recursively enter each value in the list and re-assemble a new list with
# the return value of this function on each item in the list.
return [
walk(i, path+[str(i)], additional_context_2)
for i in value]
elif isinstance(value, dict):
# Recursively enter each value in each key-value pair in the JSON object.
# Return a new JSON object with the same keys but with the return value
# of this function applied to the value.
return OrderedDict([
( k,
walk(v, path+[k], additional_context_2)
)
for k, v in value.items()
])
else:
# Leave unchanged.
return value
# Render the template. Recursively walk the JSON data structure and apply the walk()
# function to each value in it.
oscal = oscal_context(answers)
value = walk(template_body, [], dict(oscal=oscal) if oscal else {})
# If we're just testing parsing the template, return
# any output now. Since the inner templates may have
# returned a value of any type, we can't serialize back to
# JSON --- pyyaml's safe dumper will raise an error if
# it gets a non-safe value type.
if output_format == "PARSE_ONLY":
return value
# Render to JSON or YAML depending on what was specified on the
# template.
if template_format == "json":
import json
output = json.dumps(value, indent=True)
elif template_format == "yaml":
import rtyaml
output = rtyaml.dump(value)
if output_format == "html":
# Convert to HTML.
import html
return "<pre>" + html.escape(output) + "</pre>"
elif output_format == "text":
# Treat as plain text.
return output
elif output_format == "PARSE_ONLY":
# For tests, callers can use the "PARSE_ONLY" output format to
# stop after the template is prepared.
return output
else:
raise ValueError("Cannot render %s to %s in %s." % (template_format, output_format, source))
elif template_format in ("text", "markdown", "html", "xml"):
# The plain-text and HTML template types are rendered using Jinja2.
#
# The only difference is in how escaping of substituted variables works.
# For plain-text, there is no escaping. For HTML, we render 'longtext'
# anwers as if the user was typing Markdown. That makes sure that
# paragraphs aren't collapsed in HTML, and gives us other benefits.
# For other values we perform standard HTML escaping.
import jinja2
if template_format in ("text", "markdown", "xml"):
def escapefunc(question, task, has_answer, answerobj, value):
# Don't perform any escaping. The caller will wrap the
# result in jinja2.Markup().
return str(value)
def errorfunc(message, short_message, long_message, **format_vars):
# Wrap in jinja2.Markup to prevent auto-escaping.
return jinja2.Markup("<" + message.format(**format_vars) + ">")
elif template_format == "html":
escapefunc = HtmlAnswerRenderer(show_metadata=show_answer_metadata, use_data_urls=use_data_urls)
def errorfunc(message, short_message, long_message, **format_vars):
if output_format == "html" and show_answer_metadata:
# In HTML outputs with popovers for answer metadata, use a popover
# TODO: Display detailed error info in task-finished.html more explicitly
# renders popovers in templates.
return jinja2.Markup("""
<span class="text-danger"
data-toggle="popover" data-content="{}">
<{}>
</span>
""".format(jinja2.escape(long_message.format(**format_vars)),
jinja2.escape(short_message.format(**format_vars))))
else:
# Simple error message for HTML output when popovers are not
# being used. Auto-escaping will take care of escaping.
return "<{}>".format(message.format(**format_vars))
# Execute the template.
# Evaluate the template. Ensure autoescaping is turned on. Even though
# we handle it ourselves, we do so using the __html__ method on
# RenderedAnswer, which relies on autoescaping logic. This also lets
# the template writer disable autoescaping with "|safe".
env = Jinja2Environment(
autoescape=True,
undefined=jinja2.StrictUndefined) # see below - we defined any undefined variables
try:
template = env.from_string(template_body)
except jinja2.TemplateSyntaxError as e:
raise ValueError("There was an error loading the Jinja2 template %s: %s, line %d" % (source, str(e), e.lineno))
# For tests, callers can use the "PARSE_ONLY" output format to
# stop after the template is compiled.
if output_format == "PARSE_ONLY":
return template
# Create an intial context dict with the additional_context provided
# by the caller, add additional context variables and functions, and
# add rendered answers into it.
context = dict(additional_context) # clone
if answers and answers.task:
context['static_asset_path_for'] = answers.task.get_static_asset_url
# Render.
try:
# context.update will immediately load all top-level values, which
# unfortuntately might throw an error if something goes wrong
if answers:
tc = TemplateContext(answers, escapefunc,
root=True,
errorfunc=errorfunc,
source=source,
show_answer_metadata=show_answer_metadata,
is_computing_title=is_computing_title)
context.update(tc)
# Define undefined variables. Jinja2 will normally raise an exception
# when an undefined variable is accessed. It can also be set to not
# raise an exception and treat the variables as nulls. As a middle
# ground, we'll render these variables as error messages. This isn't
# great because an undefined variable indicates an incorrectly authored
# template, and rendering the variable might mean no one will notice
# the template is incorrect. But it's probably better UX than having
# a big error message for the output as a whole or silently ignoring it.
for varname in get_jinja2_template_vars(template_body):
context.setdefault(varname, UndefinedReference(varname, errorfunc, [source]))
# Now really render.
output = template.render(context)
except Exception as e:
raise ValueError("There was an error executing the template %s: %s" % (source, str(e)))
# Convert the output to the desired output format.
if template_format == "text":
if output_format == "text":
# text => text (nothing to do)
return output
# TODO: text => markdown
elif output_format == "html":
# convert text to HTML by ecaping and wrapping in a <pre> tag
import html
return "<pre>" + html.escape(output) + "</pre>"
elif template_format == "markdown":
if output_format == "text":
# TODO: markdown => text, for now just return the Markdown markup
return output
elif output_format == "markdown":
# markdown => markdown -- nothing to do
return output
elif template_format == "xml":
if output_format == "text":
# TODO: markdown => text, for now just return the Markdown markup
return output
elif output_format == "markdown":
# markdown => markdown -- nothing to do
return output
# markdown => html never occurs because we convert the Markdown to
# HTML earlier and then we see it as html => html.
elif template_format == "html":
if output_format == "html":
# html => html
#
# There is no data transformation, but we must check that no
# unsafe content was inserted by variable substitution ---
# in particular, unsafe URLs like javascript: and data: URLs.
# When the content comes from a Markdown template, unsafe content
# can only end up in <a> href's and <img> src's. If the template
# has unsafe content like raw HTML, then it is up to the template
# writer to ensure that variable substitution does not create
# a vulnerability.
#
# We also rewrite non-absolute URLs in <a> href's and <img> src
# to allow for linking to module-defined static content.
#
# This also fixes the nested <p>'s within <p>'s when a longtext
# field is rendered.
def rewrite_url(url, allow_dataurl=False):
# Rewrite for static assets.
if answers and answers.task:
url = answers.task.get_static_asset_url(url, use_data_urls=use_data_urls)
# Check final URL.
import urllib.parse
u = urllib.parse.urlparse(url)
# Allow data URLs in some cases.
if use_data_urls and allow_dataurl and u.scheme == "data":
return url
if u.scheme not in ("", "http", "https", "mailto"):
return "javascript:alert('Invalid link.');"
return url
import html5lib
dom = html5lib.HTMLParser().parseFragment(output)
for node in dom.iter():
if node.get("href"):
node.set("href", rewrite_url(node.get("href")))
if node.get("src"):
node.set("src", rewrite_url(node.get("src"), allow_dataurl=(node.tag == "{http://www.w3.org/1999/xhtml}img")))
output = html5lib.serialize(dom, quote_attr_values="always", omit_optional_tags=False, alphabetical_attributes=True)
# But the p's within p's fix gives us a lot of empty p's.
output = output.replace("<p></p>", "")
return output
raise ValueError("Cannot render %s to %s." % (template_format, output_format))
else:
raise ValueError("Invalid template format encountered: %s." % template_format)
class HtmlAnswerRenderer:
def __init__(self, show_metadata, use_data_urls=False):
self.show_metadata = show_metadata
self.use_data_urls = use_data_urls
def __call__(self, question, task, has_answer, answerobj, value):
import html
if question is not None and question.spec["type"] == "longtext":
# longtext fields are rendered into the output
# using CommonMark. Escape initial <'s so they
# are not treated as the start of HTML tags,
# which are not permitted in safe mode, but
# <'s appear tag-like in certain cases like
# when we say <not answerd>.
if value.startswith("<"): value = "\\" + value
from commonmark_extensions.tables import \
ParserWithTables as CommonMarkParser, \
RendererWithTables as CommonMarkHtmlRenderer
parsed = CommonMarkParser().parse(value)
value = CommonMarkHtmlRenderer({ "safe": True }).render(parsed)
wrappertag = "div"
elif question is not None and question.spec["type"] == "file" \
and hasattr(value, "file_data"):
# Files turn into link tags, possibly containing a thumbnail
# or the uploaded image itself.
img_url = None
if self.use_data_urls and value.file_data.get("thumbnail_dataurl"):
img_url = value.file_data["thumbnail_dataurl"]
elif self.use_data_urls and value.file_data.get("content_dataurl"):
img_url = value.file_data["content_dataurl"]
elif self.use_data_urls:
img_url = "data:"
elif value.file_data.get("thumbnail_url"):
img_url = value.file_data["thumbnail_url"]
elif question.spec.get("file-type") == "image":
img_url = value.file_data['url']
from jinja2.filters import do_filesizeformat
label = "Download attachment ({format}; {size}; {date})".format(
format=value.file_data["type_display"],
size=do_filesizeformat(value.file_data['size']),
date=answerobj.created.strftime("%x") if answerobj else "",
)
if not img_url:
# no thumbnail
value = """<p><a href="%s">%s</a></p>""" % (
html.escape(value.file_data['url']),
label,
)
else:
# has a thumbnail
# used to have max-height: 100vh; here but wkhtmltopdf understands it as 0px
value = """
<p>
<a href="%s" class="user-media">
<img src="%s" class="img-responsive" style=" border: 1px solid #333; margin-bottom: .25em;">
<div style='font-size: 90%%;'>%s</a></div>
</a>
</p>""" % (
html.escape(value.file_data['url']),
html.escape(img_url or ""),
label,
)
wrappertag = "div"
elif question is not None and question.spec["type"] == "datagrid":
# Assuming that RenderedAnswer gives us string version of the stored datagrid object
# that is an Array of Dictionaries
import ast
try:
# Get datagrid data if datagrid question has been answered with information
datagrid_rows = ast.literal_eval(value)
except:
if value == "<nothing chosen>":
# Datagrid question has been visited and instantiated but no answer given
# No data was entered into data grid
datagrid_rows = []
else:
# Datagrid question has not been visited and not yet instantiated
# `value` is set to "<Software Inventory (datagrid)>"
datagrid_rows = []
if "render" in question.spec and question.spec["render"] == "vertical":
# Build a vertical table to display datagrid information
value = ""
for item in datagrid_rows:
# Start a new table
value += "<table class=\"table\">\n"
# Create a row for each field
for field in question.spec["fields"]:
value += "<tr><td class=\"td_datagrid_vertical\">{}</td><td>{}</td></tr>".format(html.escape(str(field["text"])), html.escape(str(item[field["key"]])))
value += "\n</table>"
else:
# Build a standard table to display datagrid information
value = "<table class=\"table\">\n"
value += "<thead>\n<tr>"
# To get the correct order, get keys from question specification fields
for field in question.spec["fields"]:
value += "<th>{}</th>".format(html.escape(str(field["text"])))
value += "</tr></thead>\n"
for item in datagrid_rows:
value += "<tr>"
# To get the correct order, get keys from question specification fields
for field in question.spec["fields"]:
value += "<td>{}</td>".format(html.escape(str(item[field["key"]])))
value += "</tr>\n"
# value = html.escape(str(datagrid_rows))
value += "\n</table>"
wrappertag = "div"
else:
# Regular text fields just get escaped.
value = html.escape(str(value))
wrappertag = "span"
if (not self.show_metadata) or (question is None):
return value
# Wrap the output in a tag that holds metadata.
# If the question is imputed...
if has_answer and not answerobj:
return """<{tag} class='question-answer'
data-module='{module}'
data-question='{question}'
data-answer-type='{answer_type}'
{edit_link}
>{value}</{tag}>""".format(
tag=wrappertag,
module=html.escape(question.module.spec['title']),
question=html.escape(question.spec["title"]),
answer_type="skipped" if not has_answer else "imputed",
edit_link="",
value=value,
)
# If the question is unanswered...
if not answerobj:
return """<{tag} class='question-answer'
data-module='{module}'
data-question='{question}'
data-answer-type='{answer_type}'
{edit_link}
>{value}</{tag}>""".format(
tag=wrappertag,
module=html.escape(question.module.spec['title']),
question=html.escape(question.spec["title"]),
answer_type="skipped" if not has_answer else "imputed",
edit_link=("data-edit-link='" + task.get_absolute_url_to_question(question) + "'") if task else "",
value=value,
)
# If the question is answered (by a user).
return """<{tag} class='question-answer'
data-module='{module}'
data-question='{question}'
data-answer-type='user-answer'
data-edit-link='{edit_link}'
data-answered-by='{answered_by}'
data-answered-on='{answered_on}'
data-reviewed='{reviewed}'
>{value}</{tag}>""".format(
tag=wrappertag,
module=html.escape(question.module.spec['title']),
question=html.escape(question.spec["title"]),
edit_link=answerobj.taskanswer.get_absolute_url(),
answered_by=html.escape(str(answerobj.answered_by)),
answered_on=html.escape(answerobj.created.strftime("%c")),
reviewed=str(answerobj.reviewed),
value=value,
)
def clear_module_question_cache():
if hasattr(get_all_question_dependencies, 'cache'):
del get_all_question_dependencies.cache
def get_all_question_dependencies(module):
# Initialize cache, query cache.
if not hasattr(get_all_question_dependencies, 'cache'):
get_all_question_dependencies.cache = { }
if module.id in get_all_question_dependencies.cache:
return get_all_question_dependencies.cache[module.id]
# Pre-load all of the questions by their key so that the dependency
# evaluation is fast.
all_questions = { }
for q in module.questions.all():
all_questions[q.key] = q
# Compute all of the dependencies of all of the questions.
dependencies = {
q: get_question_dependencies(q, get_from_question_id=all_questions)
for q in all_questions.values()
}
# Find the questions that are at the root of the dependency tree.
is_dependency_of_something = set()
for deps in dependencies.values():
is_dependency_of_something |= deps
root_questions = { q for q in dependencies if q not in is_dependency_of_something }
ret = (dependencies, root_questions)
# Save to in-memory (in-process) cache. Never in debugging.
if not settings.DEBUG:
get_all_question_dependencies.cache[module.id] = ret
return ret
def get_question_dependencies(question, get_from_question_id=None):
return set(edge[1] for edge in get_question_dependencies_with_type(question, get_from_question_id))
def get_question_dependencies_with_type(question, get_from_question_id=None):
if get_from_question_id is None:
# dict-like interface
class GetFromQuestionId:
def __getitem__(self, qid):
return question.module.questions.filter(key=qid).get()
def __contains__(self, qid):
return question.module.questions.filter(key=qid).exists()
get_from_question_id = GetFromQuestionId()
# Returns a set of ModuleQuestion instances that this question is dependent on
# as a list of edges that are tuples of (edge_type, question obj).
ret = []
# All questions mentioned in prompt text become dependencies.
for qid in get_jinja2_template_vars(question.spec.get("prompt", "")):
ret.append(("prompt", qid))
# All questions mentioned in the impute conditions become dependencies.
# And when impute values are expressions, then similarly for those.
for rule in question.spec.get("impute", []):
if "condition" in rule:
for qid in get_jinja2_template_vars(
r"{% if (" + rule["condition"] + r") %}...{% endif %}"
):
ret.append(("impute-condition", qid))
if rule.get("value-mode") == "expression":
for qid in get_jinja2_template_vars(
r"{% if (" + rule["value"] + r") %}...{% endif %}"
):
ret.append(("impute-value", qid))
if rule.get("value-mode") == "template":
for qid in get_jinja2_template_vars(rule["value"]):
ret.append(("impute-value", qid))
# Other dependencies can just be listed.
for qid in question.spec.get("ask-first", []):
ret.append(("ask-first", qid))
# Turn IDs into ModuleQuestion instances.
return [ (edge_type, get_from_question_id[qid])
for (edge_type, qid) in ret
if qid in get_from_question_id
]
jinja2_expression_compile_cache = { }
def compile_jinja2_expression(expr):
# If the expression has already been compiled and is in the cache,
# return the compiled expression.
if expr in jinja2_expression_compile_cache:
return jinja2_expression_compile_cache[expr]
# The expression is not in the cache. Compile it.
env = Jinja2Environment()
compiled = env.compile_expression(expr)
# Save it to the cache.
jinja2_expression_compile_cache[expr] = compiled
# Return it.
return compiled
def run_impute_conditions(conditions, context):
# Check if any of the impute conditions are met based on
# the questions that have been answered so far and return
# the imputed value. Be careful about values like 0 that
# are false-y --- must check for "is None" to know if
# something was imputed or not.
env = Jinja2Environment()
for rule in conditions:
if "condition" in rule:
condition_func = compile_jinja2_expression(rule["condition"])
try:
value = condition_func(context)
except:
value = None
else:
value = True
if value:
# The condition is met. Compute the imputed value.
if rule.get("value-mode", "raw") == "raw":
# Imputed value is the raw YAML value.
value = rule["value"]
elif rule.get("value-mode", "raw") == "expression":
value = compile_jinja2_expression(rule["value"])(context)
if isinstance(value, RenderedAnswer):
# Unwrap.
value = value.answer
elif hasattr(value, "__html__"):
# some things might return something that safely wraps a string,
# like our SafeString instance
value = value.__html__()
elif hasattr(value, "as_raw_value"):
# RenderedProject, RenderedOrganization
value = value.as_raw_value()
elif rule.get("value-mode", "raw") == "template":
env = Jinja2Environment(autoescape=True)
try:
template = env.from_string(rule["value"])
except jinja2.TemplateSyntaxError as e:
raise ValueError("There was an error loading the template %s: %s" % (rule["value"], str(e)))
value = template.render(context)
else:
raise ValueError("Invalid impute condition value-mode.")
# Since the imputed value may be None, return
# the whole thing in a tuple to distinguish from
# a None indicating the lack of an imputed value.
return (value,)
return None
def get_question_choice(question, key):
for choice in question.spec["choices"]:
if choice["key"] == key:
return choice
raise KeyError(repr(key) + " is not a choice")
class ModuleAnswers(object):
"""Represents a set of answers to a Task."""
def __init__(self, module, task, answertuples):
self.module = module
self.task = task
self.answertuples = answertuples
self.answers_dict = None
def __str__(self):
return "<ModuleAnswers for %s - %s>" % (self.module, self.task)
def as_dict(self):
if self.answertuples is None:
# Lazy-load by calling the task's get_answers function
# and copying its answers dictionary.
if self.task is None:
self.answertuples = { q.key: (q, False, None, None) for q in sorted(self.module.questions.all(), key = lambda q : q.definition_order) }
else:
self.answertuples = self.task.get_answers().answertuples
if self.answers_dict is None:
self.answers_dict = { q.key: value for q, is_ans, ansobj, value in self.answertuples.values() if is_ans }
return self.answers_dict
def with_extended_info(self, parent_context=None):
# Return a new ModuleAnswers instance that has imputed values added
# and information about the next question(s) and unanswered questions.
return evaluate_module_state(self, parent_context=parent_context)
def get(self, question_key):
return self.answertuples[question_key][2]
def get_questions(self):
self.as_dict() # lazy load if necessary
return [v[0] for v in self.answertuples.values()]
def render_answers(self, show_unanswered=True, show_imputed=True, show_imputed_nulls=True, show_metadata=False):
# Return a generator that provides tuples of
# (question, answerobj, answerhtml) where
# * question is a ModuleQuestion instance
# * answerobj is a TaskAnswerHistory instance (e.g. holding user and review state), or None if the answer was skipped or imputed
# * answerhtml is a str of rendered HTML
tc = TemplateContext(self, HtmlAnswerRenderer(show_metadata=show_metadata))
for q, is_answered, a, value in self.answertuples.values():
if not is_answered and not show_unanswered: continue # skip questions that have no answers
if not a and not show_imputed: continue # skip imputed answers
if not a and value is None and not show_imputed_nulls: continue # skip questions whose imputed value is null
if q.spec["type"] == "interstitial": continue # skip question types that display awkwardly
if value is None:
# Question is skipped.
if a.skipped_reason:
value_display = "<i>{}</i>".format( a.get_skipped_reason_display() )
else:
value_display = "<i>skipped</i>"
else:
# Use the template rendering system to produce a human-readable
# HTML rendering of the value.
value_display = RenderedAnswer(self.task, q, is_answered, a, value, tc)
# For question types whose primary value is machine-readable,
# show a nice display form if possible using the .text attribute,
# if possible. It probably returns a SafeString which needs __html__()
# to be called on it. "file" questions render nicer without .text.
if q.spec["type"] not in ("file",):
try:
value_display = value_display.text
except AttributeError:
pass
# Whether or not we called .text, call __html__() to get
# a rendered form.
if hasattr(value_display, "__html__"):
value_display = value_display.__html__()
yield (q, a, value_display)
def render_output(self, use_data_urls=False):
# Now that all questions have been answered, generate this
# module's output. The output is a set of documents. The
# documents are lazy-rendered because not all of them may
# be used by the caller.
class LazyRenderedDocument(object):
output_formats = ("html", "text", "markdown")
def __init__(self, module_answers, document, index, use_data_urls):
self.module_answers = module_answers
self.document = document
self.index = index
self.rendered_content = { }
self.use_data_urls = use_data_urls
def __iter__(self):
# Yield all of the keys (entry) that are in the output document
# specification, plus all of the output formats which are
# keys (entry) in our returned dict that lazily render the document.
for entry, value in self.document.items():
if entry not in self.output_formats:
yield entry
for entry in self.output_formats:
yield entry
def __getitem__(self, entry):
if entry in self.output_formats:
# entry is an output format -> lazy render.
if entry not in self.rendered_content:
# Cache miss.
# For errors, what is the name of this document?
if "id" in self.document:
doc_name = self.document["id"]
else:
doc_name = "at index " + str(self.index)
if "title" in self.document:
doc_name = repr(self.document["title"]) + " (" + doc_name + ")"
doc_name = "'%s' output document '%s'" % (self.module_answers.module.module_name, doc_name)
# Try to render it.
task_cache_entry = "output_r1_{}_{}_{}".format(
self.index,
entry,
1 if self.use_data_urls else 0,
)
def do_render():
try:
return render_content(self.document, self.module_answers, entry, doc_name, show_answer_metadata=True, use_data_urls=self.use_data_urls)
except Exception as e:
# Put errors into the output. Errors should not occur if the
# template is designed correctly.
ret = str(e)
if entry == "html":
import html
ret = "<p class=text-danger>" + html.escape(ret) + "</p>"
return ret
self.rendered_content[entry] = self.module_answers.task._get_cached_state(task_cache_entry, do_render)
return self.rendered_content[entry]
elif entry in self.document:
# entry is a entry in the specification for the document.
# Return it unchanged.
return self.document[entry]
raise KeyError(entry)
def get(self, entry, default=None):
if entry in self.output_formats or entry in self.document:
return self[entry]
return [ LazyRenderedDocument(self, d, i, use_data_urls) for i, d in enumerate(self.module.spec.get("output", [])) ]
class UndefinedReference:
def __init__(self, varname, errorfunc, path=[]):
self.varname = varname
self.errorfunc = errorfunc
self.path = path
def __html__(self):
return self.errorfunc(
"invalid reference to '{varname}' in {source}",
"invalid reference",
"Invalid reference to variable '{varname}' in {source}.",
varname=self.varname,
source=" -> ".join(self.path),
)
def __getitem__(self, item):
return UndefinedReference(item, self.errorfunc, self.path+[self.varname])
from collections.abc import Mapping
class TemplateContext(Mapping):
"""A Jinja2 execution context that wraps the Pythonic answers to questions
of a ModuleAnswers instance in RenderedAnswer instances that provide
template and expression functionality like the '.' accessor to get to
the answers of a sub-task."""
def __init__(self, module_answers, escapefunc, parent_context=None, root=False, errorfunc=None, source=None, show_answer_metadata=None, is_computing_title=False):
self.module_answers = module_answers
self.escapefunc = escapefunc
self.root = root
self.errorfunc = parent_context.errorfunc if parent_context else errorfunc
self.source = (parent_context.source if parent_context else []) + ([source] if source else [])
self.show_answer_metadata = parent_context.show_answer_metadata if parent_context else (show_answer_metadata or False)
self.is_computing_title = parent_context.is_computing_title if parent_context else is_computing_title
self._cache = { }
self.parent_context = parent_context
def __str__(self):
return "<TemplateContext for %s>" % (self.module_answers)
def __getitem__(self, item):
# Cache every context variable's value, since some items are expensive.
if item not in self._cache:
self._cache[item] = self.getitem(item)
return self._cache[item]
def _execute_lazy_module_answers(self):
if self.module_answers is None:
# This is a TemplateContext for an unanswered question with an unknown
# module type. We treat this as if it were a Task that had no questions but
# also is not finished.
self._module_questions = { }
return
if callable(self.module_answers):
self.module_answers = self.module_answers()
self._module_questions = { q.key: q for q in self.module_answers.get_questions() }
def getitem(self, item):
self._execute_lazy_module_answers()
# If 'item' matches a question ID, wrap the internal Pythonic/JSON-able value
# with a RenderedAnswer instance which take care of converting raw data values
# into how they are rendered in templates (escaping, iteration, property accessors)
# and evaluated in expressions.
question = self._module_questions.get(item)
if question:
# The question might or might not be answered. If not, its value is None.
self.module_answers.as_dict() # trigger lazy-loading
_, is_answered, answerobj, answervalue = self.module_answers.answertuples.get(item, (None, None, None, None))
return RenderedAnswer(self.module_answers.task, question, is_answered, answerobj, answervalue, self)
# The context also provides the project and organization that the Task belongs to,
# and other task attributes, assuming the keys are not overridden by question IDs.
if self.module_answers and self.module_answers.task:
if item == "title" and (not self.is_computing_title or not self.root):
return self.module_answers.task.title
if item == "task_link":
return self.module_answers.task.get_absolute_url()
if item == "project":
if self.parent_context is not None: # use parent's cache
return self.parent_context[item]
return RenderedProject(self.module_answers.task.project, parent_context=self)
if item == "organization":
if self.parent_context is not None: # use parent's cache
return self.parent_context[item]
return RenderedOrganization(self.module_answers.task, parent_context=self)
if item == "control_catalog":
# Retrieve control catalog(s) for project
# Temporarily retrieve a single catalog
# TODO: Retrieve multiple catalogs because we could have catalogs plus overlays
# Will need a better way to determine the catalogs on a system so we can retrieve at once
# Maybe get the catalogs as a property of the system
# Retrieve a Django dictionary of dictionaries object of full control catalog
from controls.oscal import Catalog
try:
all_keys = list(set([controls.oscal_catalog_key for controls in
self.module_answers.task.project.system.root_element.controls.all()]))
except:
all_keys = []
# Need default if there are no control catalogs present
control_catalog = []
# If there are multiple catalogs
if len(all_keys) > 1:
for idx, key in enumerate(all_keys):
# Detect single control catalog from first control
try:
parameter_values = self.module_answers.task.project.get_parameter_values(key)
sca = Catalog.GetInstance(catalog_key=key,
parameter_values=parameter_values)
control_catalog.append(sca.flattened_controls_all_as_dict_list)
except:
control_catalog = None
# If there is one catalog
elif len(all_keys) == 1:
try:
parameter_values = self.module_answers.task.project.get_parameter_values(all_keys[0])
sca = Catalog.GetInstance(catalog_key=all_keys[0],
parameter_values=parameter_values)
control_catalog = sca.flattened_controls_all_as_dict
except:
control_catalog = None
return control_catalog
if item == "system":
# Retrieve the system object associated with this project
# Returned value must be a python dictionary
return self.module_answers.task.project.system
if item == "oscal":
return oscal_context(self.module_answers.task.project.system)
if item in ("is_started", "is_finished"):
# These are methods on the Task instance. Don't
# call the method here because that leads to infinite
# recursion. Figuring out if a module is finished
# requires imputing all question answers, which calls
# into templates, and we can end up back here.
return getattr(self.module_answers.task, item)
else:
# If there is no Task associated with this context, then we're
# faking the attributes.
if item in ("is_started", "is_finished"):
return (lambda : False) # the attribute normally returns a bound function
# The 'questions' key returns (question, answer) pairs.
if item == "questions":
if self.module_answers is None:
return []
self.module_answers.as_dict() # trigger lazy-loading
ret = []
for question, is_answered, answerobj, answervalue in self.module_answers.answertuples.values():
ret.append((
question.spec,
RenderedAnswer(self.module_answers.task, question, is_answered, answerobj, answervalue, self)
))
return ret
# The output_documents key returns the output documents as a dict-like mapping
# from IDs to rendered content.
if item == "output_documents":
return TemplateContext.LazyOutputDocuments(self)
# The item is not something found in the context.
error_message = "'{item}' is not a question or property of '{object}'."
error_message_vars = { "item": item, "object": (self.module_answers.task.title if self.module_answers and self.module_answers.task else self.module_answers.module.spec["title"]) }
if self.errorfunc:
return UndefinedReference(item, self.errorfunc, self.source + ["(" + error_message_vars["object"] + ")"])
raise AttributeError(error_message.format(**error_message_vars))
def __iter__(self):
self._execute_lazy_module_answers()
seen_keys = set()
# question names
for q in self._module_questions.values():
seen_keys.add(q.key)
yield q.key
# special values
# List the name of variables that are available in the templatecontext `getitem`
if self.module_answers and self.module_answers.task:
# Attributes that are only available if there is a task.
if not self.is_computing_title or not self.root:
# 'title' isn't available if we're in the process of
# computing it
yield "title"
for attribute in ("task_link", "project", "organization", "control_catalog", "system"):
if attribute not in seen_keys:
yield attribute
# Attributes that are available even when peering into unanswered module-type questions.
for attribute in ("is_started", "is_finished", "questions", "output_documents"):
if attribute not in seen_keys:
yield attribute
def __len__(self):
return len([x for x in self])
# Class that lazy-renders output documents on request.
class LazyOutputDocuments:
def __init__(self, context):
self.context = context
def __getattr__(self, item):
try:
# Find the requested output document in the module.
for doc in self.context.module_answers.module.spec.get("output", []):
if doc.get("id") == item:
# Render it.
content = render_content(doc, self.context.module_answers, "html",
"'%s' output document '%s'" % (repr(self.context.module_answers.module), item),
{}, show_answer_metadata=self.context.show_answer_metadata)
# Mark it as safe.
from jinja2 import Markup
return Markup(content)
else:
raise ValueError("%s is not the id of an output document in %s." % (item, self.context.module_answers.module))
except Exception as e:
return str(e)
def __contains__(self, item):
for doc in self.context.module_answers.module.spec.get("output", []):
if doc.get("id") == item:
return True
return False
def __iter__(self):
for doc in self.context.module_answers.module.spec.get("output", []):
if doc.get("id"):
yield doc["id"]
class RenderedProject(TemplateContext):
def __init__(self, project, parent_context=None):
self.project = project
def _lazy_load():
if self.project.root_task:
return self.project.root_task.get_answers()
super().__init__(_lazy_load, parent_context.escapefunc, parent_context=parent_context)
self.source = self.source + ["project variable"]
def __str__(self):
return "<TemplateContext for %s - %s>" % (self.project, self.module_answers)
def as_raw_value(self):
if self.is_computing_title:
# When we're computing the title for "instance-name", prevent
# infinite recursion.
return self.project.root_task.module.spec['title']
return self.project.title
def __html__(self):
return self.escapefunc(None, None, None, None, self.as_raw_value())
class RenderedOrganization(TemplateContext):
def __init__(self, task, parent_context=None):
self.task =task
def _lazy_load():
project = self.organization.get_organization_project()
if project.root_task:
return project.root_task.get_answers()
super().__init__(_lazy_load, parent_context.escapefunc, parent_context=parent_context)
self.source = self.source + ["organization variable"]
@property
def organization(self):
if not hasattr(self, "_org"):
self._org = self.task.project.organization
return self._org
def __str__(self):
return "<TemplateContext for %s - %s>" % (self.organization, self.module_answers)
def as_raw_value(self):
return self.organization.name
def __html__(self):
return self.escapefunc(None, None, None, None, self.as_raw_value())
class RenderedAnswer:
def __init__(self, task, question, is_answered, answerobj, answer, parent_context):
self.task = task
self.question = question
self.is_answered = is_answered
self.answerobj = answerobj
self.answer = answer
self.parent_context = parent_context
self.escapefunc = parent_context.escapefunc
self.question_type = self.question.spec["type"]
self.cached_tc = None
def __html__(self):
# This method name is a Jinja2 convention. See http://jinja.pocoo.org/docs/2.10/api/#jinja2.Markup.
# Jinja2 calls this method to get the string to put into the template when this value
# appears in template in a {{variable}} directive.
#
# So this method returns how the templates render a question's answer when used as in e.g. {{q0}}.
if self.answer is None:
# Render a non-answer answer.
if self.parent_context.is_computing_title:
# When computing an instance-name title,
# raise an exception (caught higher up) if
# an unanswered question is rendered.
raise ValueError("Attempt to render unanswered question {}.".format(self.question.key))
value = "<%s>" % self.question.spec['title']
elif self.question_type == "multiple-choice":
# Render multiple-choice as a comma+space-separated list of the choice keys.
value = ", ".join(self.answer)
elif self.question_type == "datagrid":
# Render datagrid as an array of dictionaries
value = str(self.answer)
elif self.question_type == "file":
# Pass something to the escapefunc that HTML rendering can
# recognize as a file but non-HTML rendering sees as a string.
class FileValueWrapper:
def __init__(self, answer):
self.file_data = answer
def __str__(self):
return "<uploaded file: " + self.file_data['url'] + ">"
value = FileValueWrapper(self.answer)
elif self.question_type in ("module", "module-set"):
ans = self.answer # ModuleAnswers or list of ModuleAnswers
if self.question_type == "module": ans = [ans] # make it a lsit
def get_title(task):
if self.parent_context.is_computing_title:
# When we're computing the title for "instance-name", prevent
# infinite recursion.
return task.module.spec['title']
else:
# Get the computed title.
return task.title
value = ", ".join(get_title(a.task) for a in ans)
else:
# For all other question types, just call Python str().
value = str(self.answer)
# And in all cases, escape the result.
return self.escapefunc(self.question, self.task, self.answer is not None, self.answerobj, value)
@property
def text(self):
# How the template renders {{q0.text}} to get a nice display form of the answer.
if self.answer is None:
if self.parent_context.is_computing_title:
# When computing an instance-name title,
# raise an exception (caught higher up) if
# an unanswered question is rendered.
raise ValueError("Attempt to render unanswered question {}.".format(self.question.key))
value = "<not answered>"
elif self.question_type == "date":
# Format the ISO date for display.
value = str(self.answer) # fall-back
import re, datetime
m = re.match("(\d\d\d\d)-(\d\d)-(\d\d)$", self.answer)
if m:
try:
year, month, date = [int(x) for x in m.groups()]
value = datetime.date(year, month, date).strftime("%x")
except ValueError:
pass
elif self.question_type == "yesno":
value = ("Yes" if self.answer == "yes" else "No")
elif self.question_type == "choice":
value = get_question_choice(self.question, self.answer)["text"]
elif self.question_type == "multiple-choice":
if len(self.answer) == 0:
value = "<nothing chosen>"
else:
choices = [get_question_choice(self.question, c)["text"] for c in self.answer] # get choice text
delim = "," if ("," not in "".join(choices)) else ";" # separate choices by commas unless there are commas in the choices, then use semicolons
value = (delim+" ").join(choices)
elif self.question_type == "datagrid":
if len(self.answer) == 0:
value = "<nothing chosen>"
else:
value = str(self.answer)
elif self.question_type in ("integer", "real"):
# Use a locale to generate nice human-readable numbers.
# The locale is set on app startup using locale.setlocale in settings.py.
import locale
value = locale.format(
"%d" if self.question_type == "integer" else "%g",
self.answer,
grouping=True)
elif self.question_type == "file":
value = "<uploaded file: " + self.answer['url'] + ">"
elif self.question_type in ("module", "module-set"):
# This field is not present for module-type questions because
# the keys are attributes exposed by the answer.
raise AttributeError()
else:
# For all other question types, just call Python str().
value = str(self.answer)
# Wrap the value in something that provides a __html__
# method to override Jinja2 escaping so we can use our
# own function.
class SafeString:
def __init__(self, value, ra):
self.value = value
self.ra = ra
def __html__(self):
return self.ra.escapefunc(self.ra.question, self.ra.task, self.ra.answer is not None, self.ra.answerobj, self.value)
return SafeString(value, self)
@property
def edit_link(self):
# Return a link to edit this question.
return self.task.get_absolute_url_to_question(self.question)
@property
def choices_selected(self):
# Return the dicts for each choice that is a part of the answer.
if self.question_type == "multiple-choice":
return [
choice
for choice in self.question.spec["choices"]
if self.answer is not None and choice["key"] in self.answer
]
raise AttributeError
@property
def choices_not_selected(self):
# Return the dicts for each choice that is not a part of the answer.
if self.question_type == "multiple-choice":
return [
choice
for choice in self.question.spec["choices"]
if choice["key"] not in self.answer or self.answer is None
]
raise AttributeError
@property
def not_yet_answered(self):
return not self.is_answered
@property
def imputed(self):
# The answer was imputed if it's considered 'answered'
# but there is no TaskAnswerHistory record in the database
# for it, which means the user didn't provide the answer.
return self.is_answered and (self.answerobj is None)
@property
def skipped(self):
# The question has a null answer either because it was imputed null
# or the user skipped it.
return self.is_answered and (self.answer is None)
@property
def skipped_by_user(self):
# The question has a null answer but it wasn't imputed null.
return self.is_answered and (self.answerobj is not None) and (self.answer is None)
@property
def answered(self):
# This question has an answer, either because it was imputed or it was
# answered by the user, but not if it was imputed null or answered null
# because those are skipped states above.
return self.is_answered and (self.answer is not None)
@property
def skipped_reason(self):
if self.answerobj is None:
return self.answerobj
return self.answerobj.skipped_reason
@property
def unsure(self):
# If the question was answered by a user, return its unsure flag.
if not self.answerobj:
return None
return self.answerobj.unsure
@property
def date_answered(self):
# Date question was answered.
if not self.answerobj:
return None
return self.answerobj.created
@property
def reviewed_state(self):
# Question reviewed value.
if not self.answerobj:
return None
return self.answerobj.reviewed
def __bool__(self):
# How the template converts a question variable to
# a boolean within an expression (i.e. within an if).
# true.
if self.question_type == "yesno":
# yesno questions are true if they are answered as yes.
return self.answer == "yes"
else:
# Other question types are true if they are answered.
# (It would be bad to use Python bool() because it might
# give unexpected results for e.g. integer/real zero.)
return self.answer is not None
def __iter__(self):
if self.answer is None:
# If the question was skipped, return a generator that
# yields nothing --- there is nothing to iterate over.
return (None for _ in [])
if self.question_type == "multiple-choice":
# Iterate by creating a RenderedAnswer for each selected choice,
# with a made-up temporary Question instance that has the same
# properties as the actual multiple-choice choice but whose
# type is a single "choice".
from .models import ModuleQuestion
return (
RenderedAnswer(
self.task,
ModuleQuestion(
module=self.question.module,
key=self.question.key,
spec={
"type": "choice",
"title": self.question.spec['title'],
"prompt": self.question.spec['prompt'],
"choices": self.question.spec["choices"],
}),
self.is_answered,
self.answerobj,
ans, self.parent_context)
for ans in self.answer)
elif self.question_type == "datagrid":
# Iterate by creating a RenderedAnswer for each selected field,
# with a made-up temporary Question instance that has the same
# properties as the actual datagrid field but whose
# type is a single "datagrid".
from .models import ModuleQuestion
return (
RenderedAnswer(
self.task,
ModuleQuestion(
module=self.question.module,
key=self.question.key,
spec={
"type": "datagrid",
"title": self.question.spec['title'],
"prompt": self.question.spec['prompt'],
"fields": self.question.spec["fields"],
}),
self.is_answered,
self.answerobj,
ans, self.parent_context)
for ans in self.answer)
elif self.question_type == "module-set":
# Iterate over the sub-tasks' answers. Load each's answers + imputed answers.
return (TemplateContext(
v.with_extended_info(parent_context=self.parent_context if not v.task or not self.task or v.task.project_id==self.task.project_id else None),
self.escapefunc, parent_context=self.parent_context)
for v in self.answer)
raise TypeError("Answer of type %s is not iterable." % self.question_type)
def __len__(self):
if self.question_type in ("multiple-choice", "module-set"):
if self.answer is None: return 0
return len(self.answer)
if self.question_type in ("datagrid"):
if self.answer is None: return 0
return len(self.answer)
raise TypeError("Answer of type %s has no length." % self.question_type)
def __getattr__(self, item):
# For module-type questions, provide the answers of the
# sub-task as properties of this context variable.
if self.question_type == "module":
# Pass through via a temporary TemplateContext.
if self.answer is not None:
# If the question was not skipped, then we have the ModuleAnswers for it.
# Load its answers + evaluate impute conditions.
if not self.cached_tc:
self.cached_tc = TemplateContext(
lambda : self.answer.with_extended_info(parent_context=self.parent_context if not self.answer.task or not self.task or self.answer.task.project_id==self.task.project_id else None),
self.escapefunc,
parent_context=self.parent_context)
tc = self.cached_tc
else:
# The question was skipped -- i.e. we have no ModuleAnswers for
# the question that this RenderedAnswer represents. But we want
# to gracefully represent the inner item attribute as skipped too.
# If self.question.answer_type_module is set, then we know the
# inner Module type, so we can create a dummy instance that
# represents an unanswered instance of the Module.
if self.question.answer_type_module is not None:
ans = ModuleAnswers(self.question.answer_type_module, None, None)
else:
ans = None
tc = TemplateContext(ans, self.escapefunc, parent_context=self.parent_context)
return tc[item]
# For the "raw" question type, the answer value is any
# JSONable Python data structure. Forward the getattr
# request onto the value.
# Similarly for file questions which have their own structure.
elif self.question_type in ("raw", "file"):
if self.answer is not None:
return self.answer[item]
else:
# Avoid attribute errors.
return None
# For other types of questions, or items that are not question
# IDs of the subtask, just do normal Python behavior.
return super().__getattr__(self, item)
def __eq__(self, other):
if isinstance(other, RenderedAnswer):
other = other.answer
return self.answer == other
def __gt__(self, other):
if isinstance(other, RenderedAnswer):
other = other.answer
if self.answer is None or other is None:
# if either represents a skipped/imputed-null question,
# prevent a TypeError by just returning false
return False
try:
return self.answer > other
except TypeError:
# If one tries to compare a string to an integer, just
# say false.
return False
def __lt__(self, other):
if isinstance(other, RenderedAnswer):
other = other.answer
if self.answer is None or other is None:
# if either represents a skipped/imputed-null question,
# prevent a TypeError by just returning false
return False
try:
return self.answer < other
except TypeError:
# If one tries to compare a string to an integer, just
# say false.
return False
|
gpl-3.0
| 5,768,756,161,847,980,000
| 44.321768
| 204
| 0.57162
| false
| 4.561445
| false
| false
| false
|
ellmo/rogue-python-engine
|
rpe/player.py
|
1
|
1367
|
import rpe_map
import camera
class Player(object):
def __init__(self, rpe_map, direction_vector):
self._rpe_map = rpe_map
self._x = rpe_map.start_position[0]
self._y = rpe_map.start_position[1]
self._camera = camera.Camera(rpe_map.start_position, direction_vector)
@property
def rpe_map(self):
return self._rpe_map
@property
def camera(self):
return self._camera
@property
def x(self):
return self._x
@property
def y(self):
return self._y
@property
def position(self):
return (self._x, self._y)
@property
def dirx(self):
return self._camera.dirx
@property
def diry(self):
return self._camera.diry
def move(self, forward, left):
if left is 0:
new_x = self._x + self.dirx * forward
new_y = self._y + self.diry * forward
else:
new_x = self._x + self.diry * left
new_y = self._y - self.dirx * left
_new_tile = self._rpe_map.tiles[int(new_y)][int(new_x)]
if not (_new_tile.solid or (_new_tile.thing and _new_tile.thing.blocking)):
self._x = new_x
self._y = new_y
self._camera.x = new_x
self._camera.y = new_y
def rotate(self, direction):
self._camera.rotate(direction)
|
gpl-3.0
| -1,962,575,099,425,045,200
| 23.854545
| 83
| 0.548647
| false
| 3.408978
| false
| false
| false
|
qedsoftware/commcare-hq
|
custom/ilsgateway/tanzania/handlers/generic_stock_report_handler.py
|
1
|
2041
|
from django.conf import settings
from corehq.apps.commtrack.exceptions import NotAUserClassError
from corehq.apps.commtrack.sms import process
from corehq.apps.sms.api import send_sms_to_verified_number
from custom.ilsgateway.tanzania.handlers.ils_stock_report_parser import ILSStockReportParser
from custom.ilsgateway.tanzania.handlers.keyword import KeywordHandler
from dimagi.utils.decorators.memoized import memoized
class GenericStockReportHandler(KeywordHandler):
formatter = None
status_type = None
status_value = None
@property
@memoized
def data(self):
return ILSStockReportParser(
self.domain_object,
self.verified_contact,
self.formatter()
).parse(self.msg.text)
def get_message(self, data):
raise NotImplemented()
def on_success(self):
raise NotImplemented()
def on_error(self, data):
raise NotImplemented()
def handle(self):
location = self.user.location
domain = self.domain_object
location_id = self.location_id
if not location_id:
return False
if location.location_type_name == 'FACILITY':
try:
data = self.data
if not data:
return True
if not data.get('transactions'):
self.on_error(data)
return True
process(domain.name, data)
if not data['errors']:
self.on_success()
else:
self.on_error(data)
return True
self.respond(self.get_message(data))
except NotAUserClassError:
return True
except Exception, e: # todo: should we only trap SMSErrors?
if settings.UNIT_TESTING or settings.DEBUG:
raise
send_sms_to_verified_number(self.verified_contact, 'problem with stock report: %s' % str(e))
return True
|
bsd-3-clause
| 5,092,202,569,598,729,000
| 29.924242
| 108
| 0.593827
| false
| 4.427332
| false
| false
| false
|
qedsoftware/commcare-hq
|
corehq/apps/case_search/views.py
|
1
|
1603
|
import json
from corehq.apps.domain.decorators import cls_require_superuser_or_developer
from corehq.apps.domain.views import DomainViewMixin
from django.http import Http404
from dimagi.utils.web import json_response
from django.views.generic import TemplateView
from corehq.apps.case_search.models import case_search_enabled_for_domain
from corehq.util.view_utils import json_error, BadRequest
class CaseSearchView(DomainViewMixin, TemplateView):
template_name = 'case_search/case_search.html'
urlname = 'case_search'
@cls_require_superuser_or_developer
def get(self, request, *args, **kwargs):
if not case_search_enabled_for_domain(self.domain):
raise Http404("Domain does not have case search enabled")
return self.render_to_response(self.get_context_data())
@json_error
@cls_require_superuser_or_developer
def post(self, request, *args, **kwargs):
from corehq.apps.es.case_search import CaseSearchES
if not case_search_enabled_for_domain(self.domain):
raise BadRequest("Domain does not have case search enabled")
query = json.loads(request.POST.get('q'))
case_type = query.get('type')
search_params = query.get('parameters', [])
search = CaseSearchES()
search = search.domain(self.domain).is_closed(False)
if case_type:
search = search.case_type(case_type)
for param in search_params:
search = search.case_property_query(**param)
search_results = search.values()
return json_response({'values': search_results})
|
bsd-3-clause
| -1,550,747,963,873,273,300
| 39.075
| 76
| 0.696818
| false
| 3.825776
| false
| false
| false
|
googleapis/python-aiplatform
|
tests/unit/enhanced_library/test_enhanced_types.py
|
1
|
2007
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from google.cloud.aiplatform.v1.schema.trainingjob import definition
from google.cloud.aiplatform.v1beta1.schema.trainingjob import (
definition as definition_v1beta1,
)
ModelType = definition.AutoMlImageClassificationInputs().ModelType
test_training_input = definition.AutoMlImageClassificationInputs(
multi_label=True,
model_type=ModelType.CLOUD,
budget_milli_node_hours=8000,
disable_early_stopping=False,
)
ModelType_v1beta1 = definition_v1beta1.AutoMlImageClassificationInputs().ModelType
test_training_input_v1beta1 = definition.AutoMlImageClassificationInputs(
multi_label=True,
model_type=ModelType_v1beta1.CLOUD,
budget_milli_node_hours=8000,
disable_early_stopping=False,
)
# Test the v1 enhanced types.
def test_exposes_to_value_method_v1():
assert hasattr(test_training_input, "to_value")
def test_exposes_from_value_method_v1():
assert hasattr(test_training_input, "from_value")
def test_exposes_from_map_method_v1():
assert hasattr(test_training_input, "from_map")
# Test the v1beta1 enhanced types.
def test_exposes_to_value_method_v1beta1():
assert hasattr(test_training_input_v1beta1, "to_value")
def test_exposes_from_value_method_v1beta1():
assert hasattr(test_training_input_v1beta1, "from_value")
def test_exposes_from_map_method_v1beta1():
assert hasattr(test_training_input_v1beta1, "from_map")
|
apache-2.0
| 1,963,648,343,854,830,300
| 31.901639
| 82
| 0.765321
| false
| 3.333887
| true
| false
| false
|
praekelt/wsgi-ua-mapper
|
ua_mapper/updatewurfl.py
|
1
|
1900
|
import os
import sys
from optparse import OptionParser
from urllib import urlopen
from ua_mapper.wurfl2python import WurflPythonWriter, DeviceSerializer
OUTPUT_PATH = os.path.abspath(os.path.dirname(__file__))
WURFL_ARCHIVE_PATH = os.path.join(OUTPUT_PATH, "wurfl.zip")
WURFL_XML_PATH = os.path.join(OUTPUT_PATH, "wurfl.xml")
WURFL_PY_PATH = os.path.join(OUTPUT_PATH, "wurfl.py")
WURFL_DOWNLOAD_URL = 'http://downloads.sourceforge.net/project/wurfl/WURFL/latest/wurfl-latest.zip'
class Updater(object):
help = 'Updates Wurfl devices database.'
def write_archive(self, filename, data):
f = open(WURFL_ARCHIVE_PATH, "w")
f.write(data)
f.close()
def fetch_latest_wurfl(self):
print "Downloading Wurfl..."
data = urlopen(WURFL_DOWNLOAD_URL).read()
self.write_archive(WURFL_ARCHIVE_PATH, data)
os.system("unzip -o %s -d %s" % (WURFL_ARCHIVE_PATH, OUTPUT_PATH))
return True
def wurfl_to_python(self):
print "Compiling device list..."
# Setup options.
op = OptionParser()
op.add_option("-l", "--logfile", dest="logfile", default=sys.stderr,
help="where to write log messages")
# Cleanup args for converter to play nicely.
if '-f' in sys.argv:
sys.argv.remove('-f')
if '--force' in sys.argv:
sys.argv.remove('--force')
options, args = op.parse_args()
options = options.__dict__
options.update({"outfile": WURFL_PY_PATH})
# Perform conversion.
wurfl = WurflPythonWriter(WURFL_XML_PATH, device_handler=DeviceSerializer, options=options)
wurfl.process()
def handle(self, *args, **options):
self.fetch_latest_wurfl()
self.wurfl_to_python()
from ua_mapper.wurfl import devices
print "Done."
Updater().handle()
|
bsd-3-clause
| 2,836,465,306,440,326,000
| 31.758621
| 99
| 0.623684
| false
| 3.392857
| false
| false
| false
|
Clarity-89/clarityv2
|
src/clarityv2/work_entries/admin.py
|
1
|
1978
|
import datetime
from datetime import timedelta
from django.contrib import admin
from django.contrib.admin.filters import DateFieldListFilter
from django.db.models import Sum
from django.utils import timezone
from django.utils.translation import gettext_lazy as _
from import_export.admin import ImportExportActionModelAdmin
from .models import WorkEntry
class CustomDateTimeFilter(DateFieldListFilter):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
now = timezone.now()
# When time zone support is enabled, convert "now" to the user's time
# zone so Django's definition of "Today" matches what the user expects.
if timezone.is_aware(now):
now = timezone.localtime(now)
today = now.date()
last_year_begin = today.replace(year=today.year - 1, month=1, day=1)
last_year_end = today.replace(year=today.year, month=1, day=1)
self.links += ((
(_('Last year'), {
self.lookup_kwarg_since: str(last_year_begin),
self.lookup_kwarg_until: str(last_year_end),
}),
))
@admin.register(WorkEntry)
class WorkEntryAdmin(ImportExportActionModelAdmin):
list_display = ('date', 'duration', 'project', 'notes')
list_filter = ('project__client', 'project', ('date', CustomDateTimeFilter))
search_fields = ('notes',)
change_list_template = 'admin/work_entries/workentry/change_list.html'
def changelist_view(self, request, extra_context=None):
response = super().changelist_view(request, extra_context=None)
if hasattr(response, 'context_data'):
cl = response.context_data.get('cl')
if cl:
queryset = cl.get_queryset(request)
duration = (queryset.aggregate(Sum('duration'))['duration__sum']) or timedelta()
response.context_data['total_duration'] = duration.total_seconds() / 3600
return response
|
mit
| 394,790,853,219,527,550
| 37.038462
| 96
| 0.650152
| false
| 4.078351
| false
| false
| false
|
amilan/dev-maxiv-pynutaq
|
src/pynutaq/perseus/perseusutils.py
|
1
|
7717
|
#!/usr/bin/env python
###############################################################################
# NutaqDiags device server.
#
# Copyright (C) 2013 Max IV Laboratory, Lund Sweden
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see [http://www.gnu.org/licenses/].
###############################################################################
"""This module contains useful functions to be used in the devices.
"""
__author__ = 'antmil'
__docformat__ = 'restructuredtext'
import math
from pynutaq.perseus.perseusdefs import *
def get_offset(type, cavity):
if type == 'read':
if cavity == 'A':
return SETTINGS_READ_OFFSET_A
elif cavity == 'B':
return SETTINGS_READ_OFFSET_B
else:
raise 'Unknown cavity. Must be A or B.'
elif type == 'write':
if cavity == 'A':
return SETTINGS_WRITE_OFFSET_A
elif cavity == 'B':
return SETTINGS_WRITE_OFFSET_B
else:
raise 'Unknown cavity. Must be A or B.'
elif type == 'diag':
if cavity == 'A':
return DIAGNOSTICS_OFFSET_A
elif cavity == 'B':
return DIAGNOSTICS_OFFSET_B
else:
raise 'Unknown cavity. Must be A or B.'
else:
raise 'Wrong type of offset!'
def read_angle(perseus, address, cavity):
# =IF(P6>32767;(P6-65536)/32767*180;P6/32767*180)
offset = get_offset('read', cavity)
perseus.write(offset, address)
value = perseus.read(offset)
if value > 32767:
angle = (value - 65536) * 180.0 / 32767
else:
angle = (value * 180.0) / 32767
return angle
def write_angle(perseus, value, address, cavity):
"""=ROUND(IF(
E6<0; E6/180*32767+65536;
IF(E6<=180; E6/180*32767;
(E6-360)/180*32767+65536)
);0
)
"""
if value < 0:
angle = (value * 32767 / 180.0) + 65536
elif value <= 180.0:
angle = (value * 32767) / 180.0
else:
angle = ((value - 360) * 32767 / 180.0) + 65536
value = address << 17 | int(angle)
offset = get_offset('write', cavity)
perseus.write(offset, value)
def read_milivolts(perseus, address, cavity):
"""
This method converts the value readed from a register in milivolts usign the following formula:
VALUE = ROUND(P23*1000/32767*1,6467602581;0)
:param value: value read from a register.
:return: value converted in milivolts
"""
offset = get_offset('read', cavity)
perseus.write(offset, address)
value = perseus.read(offset)
milis = value * 1000.0 / 32767 * 1.6467602581
return milis
def write_milivolts(perseus, milivolts, address, cavity):
"""
This method converts the value from milivolts to bit to be written in the register usign the following
formula:
VALUE =ROUND(E23/1000*32767/1,6467602581;0)
:param value: value to be converted.
:return: value to write in the register.
"""
value = (milivolts * 32767 / 1.6467602581) / 1000.0
value = address << 17 | int(value)
offset = get_offset('write', cavity)
perseus.write(offset, value)
def read_settings_diag_milivolts(perseus, address, cavity):
"""
This method converts the value readed from a register in milivolts usign the following formula:
VALUE = ROUND(P23*1000/32767*1,6467602581;0)
:param value: value read from a register.
:return: value converted in milivolts
"""
offset = get_offset('read', cavity)
perseus.write(offset, address)
value = perseus.read(offset)
milis = value * 1000.0 / 32767
return milis
def write_settings_diag_milivolts(perseus, milivolts, address, cavity):
"""
This method converts the value from milivolts to bit to be written in the register usign the following
formula:
VALUE =ROUND(E23/1000*32767/1,6467602581;0)
:param value: value to be converted.
:return: value to write in the register.
"""
value = (milivolts / 1000.0) * 32767
value = address << 17 | int(value)
offset = get_offset('write', cavity)
perseus.write(offset, value)
def read_settings_diag_percentage(perseus, address, cavity):
"""
This method converts the value readed from a register in milivolts usign the following formula:
VALUE = ROUND(P23*1000/32767*1,6467602581;0)
:param value: value read from a register.
:return: value converted in milivolts
"""
offset = get_offset('read', cavity)
perseus.write(offset, address)
value = perseus.read(offset)
percentage = value * 100.0 / 32767
return percentage
def write_settings_diag_percentage(perseus, percentage, address, cavity):
"""
This method converts the value from milivolts to bit to be written in the register usign the following
formula:
VALUE =ROUND(E23/1000*32767/1,6467602581;0)
:param value: value to be converted.
:return: value to write in the register.
"""
value = (percentage / 100.0) * 32767
value = address << 17 | int(value)
offset = get_offset('write', cavity)
perseus.write(offset, value)
def read_direct(perseus, address, cavity):
offset = get_offset('read', cavity)
perseus.write(offset, address)
value = perseus.read(offset)
return value
def write_direct(perseus, value, address, cavity):
value = address << 17 | int(value)
offset = get_offset('write', cavity)
perseus.write(offset, value)
def read_diag_angle(perseus, address, cavity):
offset = get_offset('diag', cavity)
perseus.write(offset, address)
value = perseus.read(offset)
# =IF(D49>32767;
# (D49-65536)/32767*180;
# D49/32767*180)
if value > 32767:
angle = (value - (1 << 16)) * 180.0 / 32767
else:
angle = value * 180.0 / 32767
return angle
def read_diag_direct(perseus, address, cavity):
offset = get_offset('diag', cavity)
perseus.write(offset, address)
value = perseus.read(offset)
return value
def read_diag_milivolts(perseus, address, cavity):
offset = get_offset('diag', cavity)
perseus.write(offset, address)
value = perseus.read(offset)
#and now convert the value
#=IF(D9<32768;
# D9/32767*1000;
# (D9-2^16)/32767*1000)
if value < 32768:
milis = value * 1000.0 / 32767
else:
milis = ((value - (1 << 16)) * 1000.0) / 32767
return milis
def calc_amplitude(perseus, ivalue, qvalue):
amplitude = math.sqrt((ivalue**2) + (qvalue**2))
return amplitude
def calc_phase(perseus, ivalue, qvalue):
phase = math.atan2(qvalue, ivalue)
return phase
def start_reading_diagnostics(perseus, cavity):
offset = get_offset('diag', cavity)
value = 1 << 16
perseus.write(offset, value)
#@warning: I know ... this is not needed
value = 0 << 16
#lets continue
perseus.write(offset, value)
def end_reading_diagnostics(perseus, cavity):
offset = get_offset('diag', cavity)
value = 1 << 16
perseus.write(offset, value)
|
gpl-3.0
| -5,668,996,451,132,105,000
| 28.795367
| 110
| 0.618116
| false
| 3.361063
| false
| false
| false
|
nick41496/Beatnik
|
slackbot/migrations/0001_initial.py
|
1
|
1031
|
# Generated by Django 2.2.9 on 2020-04-26 23:18
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Install',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('app_id', models.TextField(unique=True, verbose_name='Install-specific app ID')),
('authed_user_id', models.TextField(verbose_name='Installing user ID')),
('scope', models.TextField(verbose_name='OAuth scopes granted')),
('access_token', models.TextField(verbose_name='OAuth access token')),
('bot_user_id', models.TextField(verbose_name='Install-specific bot ID')),
('team_name', models.TextField(verbose_name='Workspace name')),
('team_id', models.TextField(verbose_name='Workspace ID')),
],
),
]
|
gpl-3.0
| 4,604,570,838,081,418,000
| 37.185185
| 114
| 0.588749
| false
| 4.368644
| false
| false
| false
|
sachabest/cis599
|
web/dashboard/uploader.py
|
1
|
2445
|
import csv, logging
from .models import Student, Project
from django.contrib.auth.models import User
logger = logging.getLogger(__name__)
def parse_input_csv(csv_file_wrapper, project_file_wrapper):
'''
Takes in raw text and outputs json for group information.
Expected format of project_file:
Name / Number / PM PennKey / Customer PennKey
Expected format of csv_file:
Name / Class / PennKey / Major / Team #
'''
new_projects = {}
new_students = []
data = csv.reader(project_file_wrapper.read().decode(encoding='UTF-8').splitlines())
for row in data:
project_number = int(row[1])
username = row[2] + "@upenn.edu"
customer_username = row[3] + "@upenn.edu"
try:
pm_user = User.objects.get(username=username)
except:
pm_user = User(username=username)
try:
customer_user = User.objects.get(username=customer_username)
except:
customer_user = User(username=customer_username)
pm_user.save()
customer_user.save()
try:
new_project = Projects.objects.get(number=project_number)
except:
new_project = Project(name=row[0], number=project_number, pm_user=pm_user, \
client_user=customer_user)
new_project.save()
# set pm_user and customer_user later
new_projects[project_number] = new_project
data = csv.reader(csv_file_wrapper.read().decode(encoding='UTF-8').splitlines())
project_mapping = {}
for row in data:
username = row[2] + "@upenn.edu"
try:
student = User.objects.get(username=username)
except:
student = User(username=username)
student.first_name = "Not"
student.last_name = "Registered"
student.save()
student.student = Student()
student.student.year = row[1]
student.student.major = row[3]
student.student.save()
student.save()
# add code here to find if the PM user exists
project_number = int(row[4])
new_project = new_projects[project_number]
student.student.project = new_project
student.student.save()
if project_number not in project_mapping:
project_mapping[project_number] = []
project_mapping[project_number].append(student)
return (new_projects.values(), project_mapping)
|
mit
| -322,003,598,205,245,440
| 34.970588
| 88
| 0.608589
| false
| 3.918269
| false
| false
| false
|
ZTH1970/alcide
|
scripts/import_users.py
|
1
|
5758
|
# -*- coding: utf-8 -*-
#!/usr/bin/env python
import os
import csv
import codecs
import string
import random
from datetime import datetime, time
import django.core.management
import alcide.settings
django.core.management.setup_environ(alcide.settings)
from django.contrib.auth.models import User
from alcide.actes.models import EventAct
from alcide.agenda.models import Event, EventType
from alcide.dossiers.models import PatientRecord, Status, FileState
from alcide.ressources.models import Service
from alcide.personnes.models import Worker, Holiday, UserWorker
from alcide.ressources.models import WorkerType
wt="./scripts/worker_type.csv"
access_worker_enabled="./scripts/access_worker.csv"
worker_only_disabled="./scripts/worker_only.csv"
db_path = "./scripts/20121219-212026"
dbs = ["F_ST_ETIENNE_SESSAD_TED", "F_ST_ETIENNE_CMPP", "F_ST_ETIENNE_CAMSP", "F_ST_ETIENNE_SESSAD"]
def _to_date(str_date):
if not str_date:
return None
return datetime.strptime(str_date[:-13], "%Y-%m-%d")
def _to_int(str_int):
if not str_int:
return None
return int(str_int)
def discipline_mapper(tables_data, service):
for line in tables_data['discipline']:
# Insert workertype
if not WorkerType.objects.filter(name=line['libelle']):
WorkerType.objects.create(name=line['libelle'])
def intervenants_mapper(tables_data, service):
for line in tables_data['intervenants']:
# Insert workers
for disp in tables_data['discipline']:
if disp['id'] == line['discipline']:
type = WorkerType.objects.get(name=disp['libelle'])
# TODO : import actif or not
worker, created = Worker.objects.get_or_create(
type=type,
last_name=line['nom'],
first_name=line['prenom'],
email=line['email'],
phone=line['tel'],
gender=int(line['titre']),
)
worker.services.add(service)
class UTF8Recoder:
"""
Iterator that reads an encoded stream and reencodes the input to UTF-8
"""
def __init__(self, f, encoding):
self.reader = codecs.getreader(encoding)(f)
def __iter__(self):
return self
def next(self):
return self.reader.next().encode("utf-8")
class UnicodeReader:
"""
A CSV reader which will iterate over lines in the CSV file "f",
which is encoded in the given encoding.
"""
def __init__(self, f, dialect=csv.excel, encoding="iso8859-15", **kwds):
f = UTF8Recoder(f, encoding)
self.reader = csv.reader(f, dialect=dialect, **kwds)
def next(self):
row = self.reader.next()
return [unicode(s, "utf-8") for s in row]
def __iter__(self):
return self
def main():
'''User and worker'''
cmpp = Service.objects.get(name="CMPP")
camsp = Service.objects.get(name="CAMSP")
sessad_ted = Service.objects.get(name="SESSAD TED")
sessad_dys = Service.objects.get(name="SESSAD DYS")
csvfile = open(access_worker_enabled, 'rb')
csvlines = UnicodeReader(csvfile, delimiter=';', quotechar='|',encoding='utf-8')
csvlines.next()
for line in csvlines:
user = User(username=line[2])
user.set_password(line[3])
user.save()
last_name = line[0]
first_name = line[1]
gender = 1
if line[14] == 'femme':
gender = 2
email = line[13]
type = WorkerType.objects.get(pk=int(line[8]))
enabled = True
old_camsp_id = None
if line[9] != '':
old_camsp_id = line[9]
old_cmpp_id = None
if line[10] != '':
old_cmpp_id = line[10]
old_sessad_dys_id = None
if line[11] != '':
old_sessad_dys_id = line[11]
old_sessad_ted_id = None
if line[12] != '':
old_sessad_ted_id = line[12]
worker = Worker(last_name=last_name, first_name=first_name,
gender=gender, email=email, type=type,
old_camsp_id=old_camsp_id, old_cmpp_id=old_cmpp_id,
old_sessad_dys_id=old_sessad_dys_id, old_sessad_ted_id=old_sessad_ted_id,
enabled=enabled)
worker.save()
if line[4] != '':
worker.services.add(camsp)
if line[5] != '':
worker.services.add(cmpp)
if line[6] != '':
worker.services.add(sessad_dys)
if line[7] != '':
worker.services.add(sessad_ted)
worker.save()
UserWorker(user=user,worker=worker).save()
'''Worker only'''
csvfile = open(worker_only_disabled, 'rb')
csvlines = UnicodeReader(csvfile, delimiter=';', quotechar='|',encoding='utf-8')
csvlines.next()
for line in csvlines:
old_camsp_id = None
old_cmpp_id = None
old_sessad_dys_id = None
old_sessad_ted_id = None
service = line[5]
if service == 'CAMSP':
old_camsp_id = line[0]
elif service == 'CMPP':
old_cmpp_id = line[0]
elif service == 'SESSAD DYS':
old_sessad_dys_id = line[0]
else:
old_sessad_ted_id = line[0]
last_name = line[1]
first_name = line[2]
gender = 1
if line[3] == 'Femme':
gender = 2
type = WorkerType.objects.get(pk=int(line[4]))
enabled = False
worker = Worker(last_name=last_name, first_name=first_name,
gender=gender, email=None, type=type,
old_camsp_id=old_camsp_id, old_cmpp_id=old_cmpp_id,
old_sessad_dys_id=old_sessad_dys_id, old_sessad_ted_id=old_sessad_ted_id,
enabled=enabled)
worker.save()
if __name__ == "__main__":
main()
|
agpl-3.0
| -7,221,619,280,167,472,000
| 30.464481
| 99
| 0.588051
| false
| 3.307295
| false
| false
| false
|
sheqi/TVpgGLM
|
test/practice6_pystan_hmc_Qi_loop_test.py
|
1
|
1347
|
import pickle
import numpy as np
import numpy.random as npr
import matplotlib.pyplot as plt
from pyglm.utils.utils import expand_scalar, compute_optimal_rotation
dim = 2
N = 20
r = 1 + np.arange(N) // (N/2.)
th = np.linspace(0, 4 * np.pi, N, endpoint=False)
x = r * np.cos(th)
y = r * np.sin(th)
L = np.hstack((x[:, None], y[:, None]))
L1 = np.random.randn(N, dim)
W = np.zeros((N, N))
# Distance matrix
D = ((L[:, None, :] - L[None, :, :]) ** 2).sum(2)
sig = np.exp(-D/2)
Sig = np.tile(sig[:, :, None, None], (1, 1, 1, 1))
Mu = expand_scalar(0, (N, N, 1))
for n in range(N):
for m in range(N):
W[n, m] = npr.multivariate_normal(Mu[n, m], Sig[n, m])
aa = 1.0
bb = 1.0
cc = 1.0
sm = pickle.load(open('/Users/pillowlab/Dropbox/pyglm-master/Practices/model.pkl', 'rb'))
new_data = dict(N=N, W=W, B=dim)
for i in range(100):
fit = sm.sampling(data=new_data, iter=100, warmup=50, chains=1, init=[dict(l=L1, sigma=aa)],
control=dict(stepsize=0.001))
samples = fit.extract(permuted=True)
aa = np.mean(samples['sigma'])
#aa = samples['sigma'][-1]
#bb = np.mean(samples['eta'])
#cc = np.mean(samples['rho'])
L1 = np.mean(samples['l'], 0)
#L1 = samples['l'][-1]
R = compute_optimal_rotation(L1, L)
L1 = np.dot(L1, R)
plt.scatter(L1[:,0],L1[:,1])
plt.scatter(L[:,0],L[:,1])
|
mit
| 6,740,229,076,920,193,000
| 24.923077
| 96
| 0.582777
| false
| 2.440217
| false
| false
| false
|
googleads/googleads-python-lib
|
examples/ad_manager/v202105/activity_group_service/get_all_activity_groups.py
|
1
|
1854
|
#!/usr/bin/env python
#
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example gets all activity groups.
"""
# Import appropriate modules from the client library.
from googleads import ad_manager
def main(client):
# Initialize appropriate service.
activity_group_service = client.GetService(
'ActivityGroupService', version='v202105')
# Create a statement to select activity groups.
statement = ad_manager.StatementBuilder(version='v202105')
# Retrieve a small amount of activity groups at a time, paging
# through until all activity groups have been retrieved.
while True:
response = activity_group_service.getActivityGroupsByStatement(
statement.ToStatement())
if 'results' in response and len(response['results']):
for activity_group in response['results']:
# Print out some information for each activity group.
print('Activity group with ID "%d" and name "%s" was found.\n' %
(activity_group['id'], activity_group['name']))
statement.offset += statement.limit
else:
break
print('\nNumber of results found: %s' % response['totalResultSetSize'])
if __name__ == '__main__':
# Initialize client object.
ad_manager_client = ad_manager.AdManagerClient.LoadFromStorage()
main(ad_manager_client)
|
apache-2.0
| 4,992,633,580,900,890,000
| 35.352941
| 74
| 0.721143
| false
| 4.19457
| false
| false
| false
|
vxsx/djangocms-text-ckeditor
|
djangocms_text_ckeditor/utils.py
|
1
|
6174
|
# -*- coding: utf-8 -*-
import os
import re
from collections import OrderedDict
from functools import wraps
from classytags.utils import flatten_context
from cms.models import CMSPlugin
from django.core.files.storage import get_storage_class
from django.template.defaultfilters import force_escape
from django.template.loader import render_to_string
from django.utils.decorators import available_attrs
from django.utils.functional import LazyObject
OBJ_ADMIN_RE_PATTERN = r'<cms-plugin .*?\bid="(?P<pk>\d+)".*?>.*?</cms-plugin>'
OBJ_ADMIN_WITH_CONTENT_RE_PATTERN = r'<cms-plugin .*?\bid="(?P<pk>\d+)".*?>(?P<content>.*?)</cms-plugin>'
OBJ_ADMIN_RE = re.compile(OBJ_ADMIN_RE_PATTERN, flags=re.DOTALL)
def _render_cms_plugin(plugin, context):
context = flatten_context(context)
context['plugin'] = plugin
# This my fellow ckeditor enthusiasts is a hack..
# If I let djangoCMS render the plugin using {% render_plugin %}
# it will wrap the output in the toolbar markup which we don't want.
# If I render the plugin without rendering a template first, then context processors
# are not called and so plugins that rely on these like those using sekizai will error out.
# The compromise is to render a template so that Django binds the context to it
# and thus calls context processors AND render the plugin manually with the context
# after it's been bound to a template.
response = render_to_string(
'cms/plugins/render_plugin_preview.html',
context,
request=context['request'],
)
return response
def random_comment_exempt(view_func):
# Borrowed from
# https://github.com/lpomfrey/django-debreach/blob/f778d77ffc417/debreach/decorators.py#L21
# This is a no-op if django-debreach is not installed
def wrapped_view(*args, **kwargs):
response = view_func(*args, **kwargs)
response._random_comment_exempt = True
return response
return wraps(view_func, assigned=available_attrs(view_func))(wrapped_view)
def plugin_to_tag(obj, content='', admin=False):
plugin_attrs = OrderedDict(
id=obj.pk,
icon_alt=force_escape(obj.get_instance_icon_alt()),
content=content,
)
if admin:
# Include extra attributes when rendering on the admin
plugin_class = obj.get_plugin_class()
preview = getattr(plugin_class, 'text_editor_preview', True)
plugin_tag = (
u'<cms-plugin render-plugin=%(preview)s alt="%(icon_alt)s "'
u'title="%(icon_alt)s" id="%(id)d">%(content)s</cms-plugin>'
)
plugin_attrs['preview'] = 'true' if preview else 'false'
else:
plugin_tag = (
u'<cms-plugin alt="%(icon_alt)s "'
u'title="%(icon_alt)s" id="%(id)d">%(content)s</cms-plugin>'
)
return plugin_tag % plugin_attrs
def plugin_tags_to_id_list(text, regex=OBJ_ADMIN_RE):
def _find_plugins():
for tag in regex.finditer(text):
plugin_id = tag.groupdict().get('pk')
if plugin_id:
yield plugin_id
return [int(id) for id in _find_plugins()]
def _plugin_tags_to_html(text, output_func):
"""
Convert plugin object 'tags' into the form for public site.
context is the template context to use, placeholder is the placeholder name
"""
plugins_by_id = get_plugins_from_text(text)
def _render_tag(m):
try:
plugin_id = int(m.groupdict()['pk'])
obj = plugins_by_id[plugin_id]
except KeyError:
# Object must have been deleted. It cannot be rendered to
# end user so just remove it from the HTML altogether
return u''
else:
obj._render_meta.text_enabled = True
return output_func(obj, m)
return OBJ_ADMIN_RE.sub(_render_tag, text)
def plugin_tags_to_user_html(text, context):
def _render_plugin(obj, match):
return _render_cms_plugin(obj, context)
return _plugin_tags_to_html(text, output_func=_render_plugin)
def plugin_tags_to_admin_html(text, context):
def _render_plugin(obj, match):
plugin_content = _render_cms_plugin(obj, context)
return plugin_to_tag(obj, content=plugin_content, admin=True)
return _plugin_tags_to_html(text, output_func=_render_plugin)
def plugin_tags_to_db(text):
def _strip_plugin_content(obj, match):
return plugin_to_tag(obj)
return _plugin_tags_to_html(text, output_func=_strip_plugin_content)
def replace_plugin_tags(text, id_dict, regex=OBJ_ADMIN_RE):
plugins_by_id = CMSPlugin.objects.in_bulk(id_dict.values())
def _replace_tag(m):
try:
plugin_id = int(m.groupdict()['pk'])
new_id = id_dict[plugin_id]
plugin = plugins_by_id[new_id]
except KeyError:
# Object must have been deleted. It cannot be rendered to
# end user, or edited, so just remove it from the HTML
# altogether
return u''
return plugin_to_tag(plugin)
return regex.sub(_replace_tag, text)
def get_plugins_from_text(text, regex=OBJ_ADMIN_RE):
from cms.utils.plugins import downcast_plugins
plugin_ids = plugin_tags_to_id_list(text, regex)
plugins = CMSPlugin.objects.filter(pk__in=plugin_ids).select_related('placeholder')
plugin_list = downcast_plugins(plugins, select_placeholder=True)
return dict((plugin.pk, plugin) for plugin in plugin_list)
"""
The following class is taken from https://github.com/jezdez/django/compare/feature/staticfiles-templatetag
and should be removed and replaced by the django-core version in 1.4
"""
default_storage = 'django.contrib.staticfiles.storage.StaticFilesStorage'
class ConfiguredStorage(LazyObject):
def _setup(self):
from django.conf import settings
self._wrapped = get_storage_class(getattr(settings, 'STATICFILES_STORAGE', default_storage))()
configured_storage = ConfiguredStorage()
def static_url(path):
'''
Helper that prefixes a URL with STATIC_URL and cms
'''
if not path:
return ''
return configured_storage.url(os.path.join('', path))
|
bsd-3-clause
| -6,806,602,773,429,382,000
| 33.49162
| 106
| 0.660026
| false
| 3.646781
| false
| false
| false
|
carpyncho/feets
|
feets/extractors/ext_q31.py
|
1
|
3876
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# The MIT License (MIT)
# Copyright (c) 2017 Juan Cabral
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# =============================================================================
# DOC
# =============================================================================
""""""
# =============================================================================
# IMPORTS
# =============================================================================
import numpy as np
from .core import Extractor
# =============================================================================
# EXTRACTOR CLASS
# =============================================================================
class Q31(Extractor):
r"""
**Q31** (:math:`Q_{3-1}`)
:math:`Q_{3-1}` is the difference between the third quartile, :math:`Q_3`,
and the first quartile, :math:`Q_1`, of a raw light curve.
:math:`Q_1` is a split between the lowest 25% and the highest 75% of data.
:math:`Q_3` is a split between the lowest 75% and the highest 25% of data.
.. code-block:: pycon
>>> fs = feets.FeatureSpace(only=['Q31'])
>>> features, values = fs.extract(**lc_normal)
>>> dict(zip(features, values))
{'Q31': 1.3320376563134508}
References
----------
.. [kim2014epoch] Kim, D. W., Protopapas, P., Bailer-Jones, C. A.,
Byun, Y. I., Chang, S. W., Marquette, J. B., & Shin, M. S. (2014).
The EPOCH Project: I. Periodic Variable Stars in the EROS-2 LMC
Database. arXiv preprint Doi:10.1051/0004-6361/201323252.
"""
data = ["magnitude"]
features = ["Q31"]
def fit(self, magnitude):
q31 = np.percentile(magnitude, 75) - np.percentile(magnitude, 25)
return {"Q31": q31}
class Q31Color(Extractor):
r"""
**Q31_color** (:math:`Q_{3-1|B-R}`)
:math:`Q_{3-1}` applied to the difference between both bands of a light
curve (B-R).
.. code-block:: pycon
>>> fs = feets.FeatureSpace(only=['Q31_color'])
>>> features, values = fs.extract(**lc_normal)
>>> dict(zip(features, values))
{'Q31_color': 1.8840489594535512}
References
----------
.. [kim2014epoch] Kim, D. W., Protopapas, P., Bailer-Jones, C. A.,
Byun, Y. I., Chang, S. W., Marquette, J. B., & Shin, M. S. (2014).
The EPOCH Project: I. Periodic Variable Stars in the EROS-2 LMC
Database. arXiv preprint Doi:10.1051/0004-6361/201323252.
"""
data = ["aligned_magnitude", "aligned_magnitude2"]
features = ["Q31_color"]
def fit(self, aligned_magnitude, aligned_magnitude2):
N = len(aligned_magnitude)
b_r = aligned_magnitude[:N] - aligned_magnitude2[:N]
q31_color = np.percentile(b_r, 75) - np.percentile(b_r, 25)
return {"Q31_color": q31_color}
|
mit
| -5,410,018,966,500,149,000
| 33.300885
| 79
| 0.566563
| false
| 3.774099
| false
| false
| false
|
xinghalo/DMInAction
|
src/tensorflow/recommend/ops.py
|
1
|
2022
|
from src import tensorflow as tf
def inference_svd(user_batch, item_batch, user_num, item_num, dim=5, device="/cpu:0"):
with tf.device("/cpu:0"):
bias_global = tf.get_variable("bias_global", shape=[])
w_bias_user = tf.get_variable("embd_bias_user", shape=[user_num])
w_bias_item = tf.get_variable("embd_bias_item", shape=[item_num])
# embedding_lookup 就是在w_bias_user 查找user_batch中表示的信息
bias_user = tf.nn.embedding_lookup(w_bias_user, user_batch, name="bias_user")
bias_item = tf.nn.embedding_lookup(w_bias_item, item_batch, name="bias_item")
w_user = tf.get_variable("embd_user", shape=[user_num, dim],
initializer=tf.truncated_normal_initializer(stddev=0.02))
w_item = tf.get_variable("embd_item", shape=[item_num, dim],
initializer=tf.truncated_normal_initializer(stddev=0.02))
embd_user = tf.nn.embedding_lookup(w_user, user_batch, name="embedding_user")
embd_item = tf.nn.embedding_lookup(w_item, item_batch, name="embedding_item")
with tf.device(device):
infer = tf.reduce_sum(tf.multiply(embd_user, embd_item), 1)
infer = tf.add(infer, bias_global)
infer = tf.add(infer, bias_user)
infer = tf.add(infer, bias_item, name="svd_inference")
regularizer = tf.add(tf.nn.l2_loss(embd_user), tf.nn.l2_loss(embd_item), name="svd_regularizer")
return infer, regularizer
def optimization(infer, regularizer, rate_batch, learning_rate=0.001, reg=0.1, device="/cpu:0"):
global_step = tf.train.get_global_step()
assert global_step is not None
with tf.device(device):
cost_l2 = tf.nn.l2_loss(tf.subtract(infer, rate_batch))
penalty = tf.constant(reg, dtype=tf.float32, shape=[], name="l2")
cost = tf.add(cost_l2, tf.multiply(regularizer, penalty))
train_op = tf.train.AdamOptimizer(learning_rate).minimize(cost, global_step=global_step)
return cost, train_op
|
apache-2.0
| -2,948,722,271,632,747,000
| 56.142857
| 104
| 0.643
| false
| 3.154574
| false
| false
| false
|
robin900/gspread-dataframe
|
tests/mock_worksheet.py
|
1
|
1986
|
import os.path
import json
import re
from gspread.models import Cell
from gspread_dataframe import _cellrepr
def contents_of_file(filename, et_parse=True):
with open(os.path.join(os.path.dirname(__file__), filename), "r") as f:
return json.load(f)
SHEET_CONTENTS_FORMULAS = contents_of_file("sheet_contents_formulas.json")
SHEET_CONTENTS_EVALUATED = contents_of_file("sheet_contents_evaluated.json")
CELL_LIST = [
Cell(row=i + 1, col=j + 1, value=value)
for i, row in enumerate(contents_of_file("cell_list.json"))
for j, value in enumerate(row)
]
CELL_LIST_STRINGIFIED = [
Cell(
row=i + 1,
col=j + 1,
value=_cellrepr(
value,
allow_formulas=True,
string_escaping=re.compile(r"3e50").match,
),
)
for i, row in enumerate(contents_of_file("cell_list.json"))
for j, value in enumerate(row)
]
_without_index = contents_of_file("cell_list.json")
for _r in _without_index:
del _r[0]
CELL_LIST_STRINGIFIED_NO_THINGY = [
Cell(
row=i + 1,
col=j + 1,
value=_cellrepr(
value,
allow_formulas=True,
string_escaping=re.compile(r"3e50").match,
),
)
for i, row in enumerate(_without_index)
for j, value in enumerate(row)
]
class MockWorksheet(object):
def __init__(self):
self.row_count = 10
self.col_count = 10
self.id = "fooby"
self.title = "gspread dataframe test"
self.spreadsheet = MockSpreadsheet()
class MockSpreadsheet(object):
def values_get(self, *args, **kwargs):
if (
kwargs.get("params", {}).get("valueRenderOption")
== "UNFORMATTED_VALUE"
):
return SHEET_CONTENTS_EVALUATED
if kwargs.get("params", {}).get("valueRenderOption") == "FORMULA":
return SHEET_CONTENTS_FORMULAS
if __name__ == "__main__":
from gspread_dataframe import *
ws = MockWorksheet()
|
mit
| -4,786,977,959,139,163,000
| 24.792208
| 76
| 0.598691
| false
| 3.315526
| false
| false
| false
|
theonaun/theo_site
|
app_surgeo/services/rest_api.py
|
1
|
2418
|
import json
from django.http import HttpResponse
from .calculations import surgeo_model
from .calculations import surname_model
from .calculations import geocode_model
from .calculations import forename_model
from .hmac_utility import verify_message
class RestAPI(object):
'''Takes queries and gets results.'''
@classmethod
def input_query(cls, request):
query_dict = request.GET
# Come back to this and do HMAC
function_dict = {'forename_query': cls.forename_query,
'surgeo_query': cls.surgeo_query,
'geocode_query': cls.geocode_query,
'surname_query': cls.surname_query}
try:
''' TODO DEBUG ONLY... LEAVING HMAC verification off.
query_string = query_dict.urlencode()
truncated_query_string = query_string.partition('&hmac=')[0]
hmac_string = query_dict['hmac']
message_verified = verify_message(request.user,
truncated_query_string,
hmac_string)
if not message_verified:
return HttpResponse('Unauthorized', status=401)
'''
function_string = query_dict['function']
function = function_dict[function_string]
result = function(query_dict)
return result
except Exception:
return False
@classmethod
def forename_query(cls, argument_dict):
forename = argument_dict['forename_input']
result = forename_model(forename)
json_string = json.dumps(dict(result))
return json_string
@classmethod
def surgeo_query(cls, argument_dict):
surname = argument_dict['surname_input']
zcta = argument_dict['zcta_input']
result = surgeo_model(surname, zcta)
json_string = json.dumps(dict(result))
return json_string
@classmethod
def geocode_query(cls, argument_dict):
zcta = argument_dict['zcta_input']
result = geocode_model(zcta)
json_string = json.dumps(dict(result))
return json_string
@classmethod
def surname_query(cls, argument_dict):
surname = argument_dict['surname_input']
result = surname_model(surname)
json_string = json.dumps(dict(result))
return json_string
|
mit
| -262,319,776,951,719,100
| 33.056338
| 72
| 0.593879
| false
| 4.317857
| false
| false
| false
|
kodi-czsk/plugin.video.online-files
|
resources/lib/fastshare.py
|
1
|
5924
|
# -*- coding: UTF-8 -*-
#/*
# * Copyright (C) 2012 Libor Zoubek
# *
# *
# * This Program is free software; you can redistribute it and/or modify
# * it under the terms of the GNU General Public License as published by
# * the Free Software Foundation; either version 2, or (at your option)
# * any later version.
# *
# * This Program is distributed in the hope that it will be useful,
# * but WITHOUT ANY WARRANTY; without even the implied warranty of
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# * GNU General Public License for more details.
# *
# * You should have received a copy of the GNU General Public License
# * along with this program; see the file COPYING. If not, write to
# * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
# * http://www.gnu.org/copyleft/gpl.html
# *
# */
import re,urllib,urllib2,cookielib,random,util,sys,os,traceback
from provider import ContentProvider
from provider import ResolveException
class FastshareContentProvider(ContentProvider):
def __init__(self,username=None,password=None,filter=None,tmp_dir='.'):
ContentProvider.__init__(self,'fastshare.cz','http://www.fastshare.cz/',username,password,filter,tmp_dir)
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cookielib.LWPCookieJar()))
urllib2.install_opener(opener)
def capabilities(self):
return ['search','resolve']
def search(self,keyword):
return self.list('?term='+urllib.quote(keyword))
def list(self,url):
result = []
page = util.request(self._url(url))
data = util.substr(page,'<div class=\"search','<footer')
for m in re.finditer('<div class=\"search-result-box(.+?)</a>',data,re.IGNORECASE | re.DOTALL ):
it = m.group(1)
link = re.search('<a href=([^ ]+)',it,re.IGNORECASE | re.DOTALL)
name = re.search('title=\"([^\"]+)',it,re.IGNORECASE | re.DOTALL)
img = re.search('<img src=\"([^\"]+)',it,re.IGNORECASE | re.DOTALL)
size = re.search('<div class=\"fs\">([^<]+)',it,re.IGNORECASE | re.DOTALL)
time = re.search('<div class=\"vd\">([^<]+)',it,re.IGNORECASE | re.DOTALL)
if name and link:
item = self.video_item()
item['title'] = name.group(1)
if size:
item['size'] = size.group(1).strip()
if time:
item['length'] = time.group(1).strip()
item['url'] = self._url(link.group(1))
item['img'] = self._url(img.group(1))
self._filter(result,item)
next = re.search('<a href=\"(?P<url>[^\"]+)[^>]+>dal',data,re.IGNORECASE | re.DOTALL)
if next:
item = self.dir_item()
item['type'] = 'next'
item['url'] = next.group('url')
result.append(item)
return result
def resolve(self,item,captcha_cb=None,select_cb=None):
item = item.copy()
util.init_urllib()
url = self._url(item['url'])
page = ''
try:
opener = urllib2.OpenerDirector()
opener.add_handler(urllib2.HTTPHandler())
opener.add_handler(urllib2.UnknownHandler())
urllib2.install_opener(opener)
request = urllib2.Request(url)
request.add_header('User-Agent',util.UA)
response= urllib2.urlopen(request)
page = response.read()
response.close()
except urllib2.HTTPError, e:
traceback.print_exc()
return
data = util.substr(page,'<form method=post target=\"iframe_dwn\"','</form>')
action = re.search('action=(?P<url>[^>]+)',data,re.IGNORECASE | re.DOTALL)
img = re.search('<img src=\"(?P<url>[^\"]+)',data,re.IGNORECASE | re.DOTALL)
if img and action:
sessid=[]
for cookie in re.finditer('(PHPSESSID=[^\;]+)',response.headers.get('Set-Cookie'),re.IGNORECASE | re.DOTALL):
sessid.append(cookie.group(1))
# we have to download image ourselves
image = util.request(self._url(img.group('url')),headers={'Referer':url,'Cookie':sessid[-1]})
img_file = os.path.join(self.tmp_dir,'captcha.png')
util.save_data_to_file(image,img_file)
code = None
if captcha_cb:
code = captcha_cb({'id':'0','img':img_file})
if not code:
self.info('No captcha received, exit')
return
request = urllib.urlencode({'code':code})
req = urllib2.Request(self._url(action.group('url')),request)
req.add_header('User-Agent',util.UA)
req.add_header('Referer',url)
req.add_header('Cookie',sessid[-1])
try:
resp = urllib2.urlopen(req)
if resp.code == 302:
file_url = resp.headers.get('location')
else:
file_url = resp.geturl()
if file_url.find(action.group('url')) > 0:
msg = resp.read()
resp.close()
js_msg = re.search('alert\(\'(?P<msg>[^\']+)',msg,re.IGNORECASE | re.DOTALL)
if js_msg:
raise ResolveException(js_msg.group('msg'))
self.error(msg)
raise ResolveException('Nelze ziskat soubor, zkuste to znovu')
resp.close()
if file_url.find('data') >=0 or file_url.find('download_free') > 0:
item['url'] = file_url
return item
self.error('wrong captcha, retrying')
return self.resolve(item,captcha_cb,select_cb)
except urllib2.HTTPError:
traceback.print_exc()
return
|
gpl-2.0
| 3,914,445,946,651,833,000
| 43.878788
| 121
| 0.549629
| false
| 3.866841
| false
| false
| false
|
ngageoint/scale
|
scale/recipe/seed/recipe_definition.py
|
1
|
21647
|
"""Defines the class for managing a recipe definition"""
from __future__ import unicode_literals
import json
import os
from django.db.models import Q
from job.configuration.data.exceptions import InvalidConnection
from job.configuration.interface.scale_file import ScaleFileDescription
from job.deprecation import JobConnectionSunset
from job.handlers.inputs.file import FileInput
from job.handlers.inputs.files import FilesInput
from job.handlers.inputs.property import PropertyInput
from job.models import JobType
from job.seed.types import SeedInputFiles, SeedInputJson
from jsonschema import validate
from jsonschema.exceptions import ValidationError
from recipe.configuration.data.exceptions import InvalidRecipeConnection
from recipe.configuration.definition.exceptions import InvalidDefinition
from recipe.handlers.graph import RecipeGraph
DEFAULT_VERSION = '2.0'
SCHEMA_FILENAME = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'schema/recipe_definition_2_0.json')
with open(SCHEMA_FILENAME) as schema_file:
RECIPE_DEFINITION_SCHEMA = json.load(schema_file)
class RecipeDefinition(object):
"""Represents the definition for a recipe. The definition includes the recipe inputs, the jobs that make up the
recipe, and how the inputs and outputs of those jobs are connected together.
"""
def __init__(self, definition):
"""Creates a recipe definition object from the given dictionary. The general format is checked for correctness,
but the actual job details are not checked for correctness.
:param definition: The recipe definition
:type definition: dict
:raises InvalidDefinition: If the given definition is invalid
"""
self._definition = definition
self._input_files_by_name = {} # Name -> `job.seed.types.SeedInputFiles`
self._input_json_by_name = {} # Name -> `job.seed.types.SeedInputJson`
self._jobs_by_name = {} # Name -> job dict
self._property_validation_dict = {} # Property Input name -> required
self._input_file_validation_dict = {} # File Input name -> (required, multiple, file description)
try:
validate(definition, RECIPE_DEFINITION_SCHEMA)
except ValidationError as ex:
raise InvalidDefinition('Invalid recipe definition: %s' % unicode(ex))
self._populate_default_values()
if not self._definition['version'] == DEFAULT_VERSION:
raise InvalidDefinition('%s is an unsupported version number' % self._definition['version'])
for input_file in self._get_input_files():
name = input_file['name']
if name in self._input_files_by_name:
raise InvalidDefinition('Invalid recipe definition: %s is a duplicate input data name' % name)
self._input_files_by_name[name] = SeedInputFiles(input_file)
for input_json in self._get_input_json():
name = input_json['name']
if name in self._input_json_by_name or name in self._input_files_by_name:
raise InvalidDefinition('Invalid recipe definition: %s is a duplicate input data name' % name)
self._input_json_by_name[name] = SeedInputJson(input_json)
for job_dict in self._definition['jobs']:
name = job_dict['name']
if name in self._jobs_by_name:
raise InvalidDefinition('Invalid recipe definition: %s is a duplicate job name' % name)
self._jobs_by_name[name] = job_dict
self._create_validation_dicts()
self._validate_job_dependencies()
self._validate_no_dup_job_inputs()
self._validate_recipe_inputs()
def _get_inputs(self):
return self._definition.get('inputs', {})
def _get_input_files(self):
return self._get_inputs().get('files', {})
def _get_seed_input_files(self):
"""
:return: typed instance of Input Files
:rtype: [:class:`job.seed.types.SeedInputFiles`]
"""
return [SeedInputFiles(x) for x in self._get_input_files()]
def _get_input_json(self):
return self._get_inputs().get('json', {})
def _get_seed_input_json(self):
"""
:return: typed instance of Input JSON
:rtype: [:class:`job.seed.types.SeedInputJson`]
"""
return [SeedInputJson(x) for x in self._get_input_json()]
def get_dict(self):
"""Returns the internal dictionary that represents this recipe definition
:returns: The internal dictionary
:rtype: dict
"""
return self._definition
def get_graph(self):
"""Returns the recipe graph for this definition
:returns: The recipe graph
:rtype: :class:`recipe.handlers.graph.RecipeGraph`
"""
graph = RecipeGraph()
for input_file in self._get_seed_input_files():
if input_file.multiple:
graph_input = FilesInput(input_file.name, input_file.required)
else:
graph_input = FileInput(input_file.name, input_file.required)
graph.add_input(graph_input)
for input_json in self._get_seed_input_json():
graph.add_input(PropertyInput(input_json.name, input_json.required))
for job_name in self._jobs_by_name:
job_dict = self._jobs_by_name[job_name]
job_type = job_dict['job_type']
job_type_name = job_type['name']
job_type_version = job_type['version']
graph.add_job(job_name, job_type_name, job_type_version)
for recipe_input_dict in job_dict['recipe_inputs']:
recipe_input_name = recipe_input_dict['recipe_input']
job_input_name = recipe_input_dict['job_input']
graph.add_recipe_input_connection(recipe_input_name, job_name, job_input_name)
for job_name in self._jobs_by_name:
job_dict = self._jobs_by_name[job_name]
for dependency_dict in job_dict['dependencies']:
dependency_name = dependency_dict['name']
dependency_connections = []
for conn_dict in dependency_dict['connections']:
conn_input = conn_dict['input']
job_output = conn_dict['output']
dependency_connections.append((job_output, conn_input))
graph.add_dependency(dependency_name, job_name, dependency_connections)
return graph
def get_job_types(self, lock=False):
"""Returns a set of job types for each job in the recipe
:param lock: Whether to obtain select_for_update() locks on the job type models
:type lock: bool
:returns: Set of referenced job types
:rtype: set[:class:`job.models.JobType`]
"""
filters = []
for job_type_key in self.get_job_type_keys():
job_type_filter = Q(name=job_type_key[0], version=job_type_key[1])
filters = filters | job_type_filter if filters else job_type_filter
if filters:
job_type_query = JobType.objects.all()
if lock:
job_type_query = job_type_query.select_for_update().order_by('id')
return {job_type for job_type in job_type_query.filter(filters)}
return set()
def get_job_type_keys(self):
"""Returns a set of tuples that represent keys for each job in the recipe
:returns: Set of referenced job types as a tuple of (name, version)
:rtype: set[(str, str)]
"""
job_type_keys = set()
for job_dict in self._jobs_by_name.itervalues():
if 'job_type' in job_dict:
job_type = job_dict['job_type']
if 'name' in job_type and 'version' in job_type:
job_type_keys.add((job_type['name'], job_type['version']))
return job_type_keys
def get_job_type_map(self):
"""Returns a mapping of job name to job type for each job in the recipe
:returns: Dictionary with the recipe job name of each job mapping to its job type
:rtype: dict of str -> :class:`job.models.JobType`
"""
results = {}
job_types = self.get_job_types()
job_type_map = {(job_type.name, job_type.version): job_type for job_type in job_types}
for job_name, job_dict in self._jobs_by_name.iteritems():
if 'job_type' in job_dict:
job_type = job_dict['job_type']
if 'name' in job_type and 'version' in job_type:
job_type_key = (job_type['name'], job_type['version'])
if job_type_key in job_type_map:
results[job_name] = job_type_map[job_type_key]
return results
def get_jobs_to_create(self):
"""Returns the list of job names and types to create for the recipe, in the order that they should be created
:returns: List of tuples with each job's name and type
:rtype: [(str, :class:`job.models.JobType`)]
"""
results = []
job_type_map = self.get_job_type_map()
ordering = self.get_graph().get_topological_order()
for job_name in ordering:
job_tuple = (job_name, job_type_map[job_name])
results.append(job_tuple)
return results
def validate_connection(self, recipe_conn):
"""Validates the given recipe connection to ensure that the connection will provide sufficient data to run a
recipe with this definition
:param recipe_conn: The recipe definition
:type recipe_conn: :class:`recipe.configuration.data.recipe_connection.LegacyRecipeConnection`
:returns: A list of warnings discovered during validation
:rtype: list[:class:`recipe.configuration.data.recipe_data.ValidationWarning`]
:raises :class:`recipe.configuration.data.exceptions.InvalidRecipeConnection`: If there is a configuration
problem
"""
warnings = []
warnings.extend(recipe_conn.validate_input_files(self._input_file_validation_dict))
warnings.extend(recipe_conn.validate_properties(self._property_validation_dict))
# Check all recipe jobs for any file outputs
file_outputs = False
for job_type in self.get_job_types():
if job_type.get_job_interface().get_file_output_names():
file_outputs = True
break
# Make sure connection has a workspace if the recipe has any output files
if file_outputs and not recipe_conn.has_workspace():
raise InvalidRecipeConnection('No workspace provided for output files')
return warnings
def validate_data(self, recipe_data):
"""Validates the given data against the recipe definition
:param recipe_data: The recipe data
:type recipe_data: :class:`recipe.seed.recipe_data.RecipeData`
:returns: A list of warnings discovered during validation.
:rtype: list[:class:`recipe.configuration.data.recipe_data.ValidationWarning`]
:raises :class:`recipe.configuration.data.exceptions.InvalidRecipeData`: If there is a configuration problem
"""
warnings = []
warnings.extend(recipe_data.validate_input_files(self._input_file_validation_dict))
warnings.extend(recipe_data.validate_input_json(self._property_validation_dict))
# Check all recipe jobs for any file outputs
file_outputs = False
for job_type in self.get_job_types():
if job_type.get_job_interface().get_file_output_names():
file_outputs = True
break
# If there is at least one file output, we must have a workspace to store the output(s)
if file_outputs:
warnings.extend(recipe_data.validate_workspace())
return warnings
def validate_job_interfaces(self):
"""Validates the interfaces of the recipe jobs in the definition to ensure that all of the input and output
connections are valid
:returns: A list of warnings discovered during validation.
:rtype: list[:class:`job.configuration.data.job_data.ValidationWarning`]
:raises :class:`recipe.configuration.definition.exceptions.InvalidDefinition`:
If there are any invalid job connections in the definition
"""
# Query for job types
job_types_by_name = self.get_job_type_map() # Job name in recipe -> job type model
for job_name, job_data in self._jobs_by_name.iteritems():
if job_name not in job_types_by_name:
if 'job_type' in job_data:
job_type = job_data['job_type']
if 'name' in job_type and 'version' in job_type:
raise InvalidDefinition('Unknown job type: (%s, %s)' % (job_type['name'], job_type['version']))
else:
raise InvalidDefinition('Missing job type name or version: %s' % job_name)
else:
raise InvalidDefinition('Missing job type declaration: %s' % job_name)
warnings = []
for job_name in self._jobs_by_name:
job_dict = self._jobs_by_name[job_name]
warnings.extend(self._validate_job_interface(job_dict, job_types_by_name))
return warnings
def _add_recipe_inputs_to_conn(self, job_conn, recipe_inputs):
"""Populates the given connection for a job with its recipe inputs
:param job_conn: The job's connection
:type job_conn: :class:`job.configuration.data.job_connection.JobConnection` or
:class:`job.data.job_connection.SeedJobConnection`
:param recipe_inputs: List of recipe inputs used for the job
:type recipe_inputs: list of dict
"""
for recipe_dict in recipe_inputs:
recipe_input = recipe_dict['recipe_input']
job_input = recipe_dict['job_input']
if recipe_input in self._input_json_by_name:
job_conn.add_property(job_input)
elif recipe_input in self._input_files_by_name:
input_file = self._input_files_by_name[recipe_input]
job_conn.add_input_file(job_input, input_file.multiple, input_file.media_types, not input_file.required,
input_file.partial)
def _create_validation_dicts(self):
"""Creates the validation dicts required by recipe_data to perform its validation"""
for input in self._get_seed_input_json():
self._property_validation_dict[input.name] = input.required
for input in self._get_seed_input_files():
file_desc = ScaleFileDescription()
for media_type in input.media_types:
file_desc.add_allowed_media_type(media_type)
self._input_file_validation_dict[input.name] = (input.required,
True if input.multiple else False,
file_desc)
def _populate_default_values(self):
"""Goes through the definition and populates any missing values with defaults
"""
for input_file in self._get_input_files():
if 'required' not in input_file:
input_file['required'] = True
if 'multiple' not in input_file:
input_file['multiple'] = False
if 'partial' not in input_file:
input_file['partial'] = False
if 'mediaTypes' not in input_file:
input_file['mediaTypes'] = []
for input_json in self._get_input_json():
if 'required' not in input_json:
input_json['required'] = True
for job_dict in self._definition['jobs']:
if not 'recipe_inputs' in job_dict:
job_dict['recipe_inputs'] = []
if not 'dependencies' in job_dict:
job_dict['dependencies'] = []
for dependency_dict in job_dict['dependencies']:
if not 'connections' in dependency_dict:
dependency_dict['connections'] = []
def _validate_job_interface(self, job_dict, job_types_by_name):
"""Validates the input connections for the given job in the recipe definition
:param job_dict: The job dictionary
:type job_dict: dict
:param job_types_by_name: Dict mapping all job names in the recipe to their job type models
:type job_types_by_name: dict
:returns: A list of warnings discovered during validation.
:rtype: list[:class:`job.configuration.data.job_data.ValidationWarning`]
:raises :class:`recipe.configuration.definition.exceptions.InvalidDefinition`:
If there are any invalid job connections in the definition
"""
job_type = job_types_by_name[job_dict['name']]
# Job connection will represent data to be passed to the job to validate
job_conn = JobConnectionSunset.create(job_type.get_job_interface())
# Assume a workspace is provided, this will be verified when validating the recipe data
job_conn.add_workspace()
# Populate connection with data that will come from recipe inputs
self._add_recipe_inputs_to_conn(job_conn, job_dict['recipe_inputs'])
# Populate connection with data that will come from job dependencies
warnings = []
for dependency_dict in job_dict['dependencies']:
dependency_name = dependency_dict['name']
job_type = job_types_by_name[dependency_name]
for conn_dict in dependency_dict['connections']:
conn_input = conn_dict['input']
job_output = conn_dict['output']
job_type.get_job_interface().add_output_to_connection(job_output, job_conn, conn_input)
job_type = job_types_by_name[job_dict['name']]
try:
warnings.extend(job_type.get_job_interface().validate_connection(job_conn))
except InvalidConnection as ex:
raise InvalidDefinition(unicode(ex))
return warnings
def _validate_job_dependencies(self):
"""Validates that every job dependency is listed in jobs and that there are no cyclic dependencies
:raises InvalidDefinition: If there is an undefined job or a cyclic dependency
"""
# Make sure all dependencies are defined
for job_dict in self._definition['jobs']:
job_name = job_dict['name']
for dependency_dict in job_dict['dependencies']:
dependency_name = dependency_dict['name']
if dependency_name not in self._jobs_by_name:
msg = 'Invalid recipe definition: Job %s has undefined dependency %s' % (job_name, dependency_name)
raise InvalidDefinition(msg)
# Ensure no cyclic dependencies
for job_dict in self._definition['jobs']:
job_name = job_dict['name']
dependencies_to_check = set()
dependencies_to_check.add(job_name)
while dependencies_to_check:
next_layer = set()
for dependency in dependencies_to_check:
job_dict = self._jobs_by_name[dependency]
for dependency_dict in job_dict['dependencies']:
dependency_name = dependency_dict['name']
if dependency_name == job_name:
msg = 'Invalid recipe definition: Job %s has a circular dependency' % job_name
raise InvalidDefinition(msg)
next_layer.add(dependency_name)
dependencies_to_check = next_layer
def _validate_no_dup_job_inputs(self):
"""Validates that there are no duplicate inputs for any job
:raises InvalidDefinition: If there is a duplicate input
"""
for job_dict in self._definition['jobs']:
job_name = job_dict['name']
input_names = set()
for recipe_dict in job_dict['recipe_inputs']:
name = recipe_dict['job_input']
if name in input_names:
msg = 'Invalid recipe definition: Job %s has duplicate input %s' % (job_name, name)
raise InvalidDefinition(msg)
input_names.add(name)
for dependency_dict in job_dict['dependencies']:
for conn_dict in dependency_dict['connections']:
name = conn_dict['input']
if name in input_names:
msg = 'Invalid recipe definition: Job %s has duplicate input %s' % (job_name, name)
raise InvalidDefinition(msg)
input_names.add(name)
def _validate_recipe_inputs(self):
"""Validates that the recipe inputs used when listing the jobs are defined in the input data section
:raises InvalidDefinition: If there is an undefined recipe input
"""
for job_dict in self._definition['jobs']:
job_name = job_dict['name']
for recipe_dict in job_dict['recipe_inputs']:
recipe_input = recipe_dict['recipe_input']
if recipe_input not in self._input_files_by_name and recipe_input not in self._input_json_by_name:
msg = 'Invalid recipe definition: Job %s has undefined recipe input %s' % (job_name, recipe_input)
raise InvalidDefinition(msg)
|
apache-2.0
| -336,845,178,525,268,600
| 43.541152
| 120
| 0.612464
| false
| 4.280601
| true
| false
| false
|
dnlcrl/TensorFlow-Playground
|
1.tutorials/2.Deep MNIST for Experts/mnist.py
|
1
|
3226
|
import tensorflow as tf
# download and install the data automatically
import input_data
# download dataset or open
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
# launch interactive session
sess = tf.InteractiveSession()
# placeholders
x = tf.placeholder("float", shape=[None, 784])
y_ = tf.placeholder("float", shape=[None, 10])
# variables
W = tf.Variable(tf.zeros([784, 10]))
b = tf.Variable(tf.zeros([10]))
# initialize variables
sess.run(tf.initialize_all_variables())
# implement regression model
y = tf.nn.softmax(tf.matmul(x, W) + b)
cross_entropy = -tf.reduce_sum(y_*tf.log(y))
# train the model
train_step = tf.train.GradientDescentOptimizer(0.01).minimize(cross_entropy)
for i in range(1000):
batch = mnist.train.next_batch(50)
train_step.run(feed_dict={x: batch[0], y_: batch[1]})
# evaluate the model
correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
print accuracy.eval(feed_dict={x: mnist.test.images, y_: mnist.test.labels})
# weight and bias inizialitaazion
def weight_variable(shape):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
def bias_variable(shape):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
# convolution and pooling
def conv2d(x, W):
return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
def max_pool_2x2(x):
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
# first conv layer
W_conv1 = weight_variable([5, 5, 1, 32])
b_conv1 = bias_variable([32])
x_image = tf.reshape(x, [-1, 28, 28, 1])
h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)
h_pool1 = max_pool_2x2(h_conv1)
h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)
h_pool1 = max_pool_2x2(h_conv1)
# second layer
W_conv2 = weight_variable([5, 5, 32, 64])
b_conv2 = bias_variable([64])
h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)
h_pool2 = max_pool_2x2(h_conv2)
# densely connected layer
W_fc1 = weight_variable([7 * 7 * 64, 1024])
b_fc1 = bias_variable([1024])
h_pool2_flat = tf.reshape(h_pool2, [-1, 7*7*64])
h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)
# dropout
keep_prob = tf.placeholder("float")
h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)
# readout layer
W_fc2 = weight_variable([1024, 10])
b_fc2 = bias_variable([10])
y_conv = tf.nn.softmax(tf.matmul(h_fc1_drop, W_fc2) + b_fc2)
# train and evaluate
cross_entropy = -tf.reduce_sum(y_*tf.log(y_conv))
train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
correct_prediction = tf.equal(tf.argmax(y_conv, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
sess.run(tf.initialize_all_variables())
for i in range(20000):
batch = mnist.train.next_batch(50)
if i % 100 == 0:
train_accuracy = accuracy.eval(
feed_dict={x: batch[0], y_: batch[1], keep_prob: 1.0})
print "step %d, training accuracy %g" % (i, train_accuracy)
train_step.run(feed_dict={x: batch[0], y_: batch[1], keep_prob: 0.5})
print "test accuracy %g" % accuracy.eval(feed_dict={x: mnist.test.images, y_: mnist.test.labels, keep_prob: 1.0})
|
mit
| 4,093,565,697,744,812,000
| 28.87037
| 113
| 0.676689
| false
| 2.607922
| false
| false
| false
|
p473lr/i-urge-mafia-gear
|
HP Code Wars Documents/2014/Solutions/prob02_CheckDigit.py
|
1
|
2339
|
#!/usr/bin/env python
#CodeWars 2014
#
#Check Digits
#
# There are many situations where we exchange a number with someone. In some cases we need
# to be sure that the number we gave them was received correctly. This is especially
# important for credit cards, serial numbers, and product bar code numbers.
# A check digit is used to ensure that a sequence of numbers was transmitted or
# entered correctly without human error. This extra digit helps verify that a tired
# programmer didn't switch numbers (ex. 12 -> 15), reverse a pair of numbers
# (ex. 34 -> 43) or otherwise alter the sequence. The different algorithms used
# to calculate a check digit determine what types of errors it will catch.
#
# For UPC there's a specific algorithm that's used to catch 100% of single digit errors
# and 89% of transposition errors. Your task is to calculate the missing check digit for
# the given list of UPCs.
#
# First, add all the digits in the odd-numbered positions together and multiply the
# result by three. Then add the digits in the even-numbered positions to the result.
# Next, find the modulo 10 of the sum. The modulo operation calculates the remainder
# after dividing the sum by 10. Finally subtract if from 10 to obtain the check digit.
#
# The first line of the input will contain the number of partial UPCs that follow.
# Each UPC will be on it's own line with spaces between all the digits.
#
# 7
# 0 3 6 0 0 0 2 9 1 4 5
# 0 7 3 8 5 2 0 0 9 3 8
# 0 4 1 2 2 0 1 8 9 0 4
# 0 3 7 0 0 0 2 0 2 1 4
# 7 6 5 6 6 8 2 0 2 0 2
# 0 4 1 2 2 0 6 7 0 4 0
# 0 4 1 2 2 0 6 7 0 0 0
#
#
# 0 3 6 0 0 0 2 9 1 4 5 2
# 0 7 3 8 5 2 0 0 9 3 8 5
# 0 4 1 2 2 0 1 8 9 0 4 5
# 0 3 7 0 0 0 2 0 2 1 4 1
# 7 6 5 6 6 8 2 0 2 0 2 8
# 0 4 1 2 2 0 6 7 0 4 0 6
# 0 4 1 2 2 0 6 7 0 0 0 0
#
import sys
print ("Enter number of lines. Then 11 digits for each line.")
count = int(sys.stdin.readline())
while (count > 0):
count -= 1
line = sys.stdin.readline().rstrip('\n')
currentDigit=1
checkDigit=0
for c in line:
if (c.isdigit()):
value = int(c)
checkDigit += value
if (currentDigit % 2 == 1):
checkDigit += value+value # Add odd positions a total of 3 times.
currentDigit += 1
checkDigit = checkDigit % 10
print (line, (10-checkDigit)%10)
|
apache-2.0
| -2,976,268,853,443,760,600
| 34.984615
| 91
| 0.666524
| false
| 3.289733
| false
| false
| false
|
shpakoo/YAP
|
StepsLibrary.py
|
1
|
60173
|
########################################################################################
## This file is a part of YAP package of scripts. https://github.com/shpakoo/YAP
## Distributed under the MIT license: http://www.opensource.org/licenses/mit-license.php
## Copyright (c) 2011-2013 Sebastian Szpakowski
########################################################################################
#################################################
## A library of "steps" or program wrappers to construct pipelines
## Pipeline steps orchestration, grid management and output handling.
#################################################
import sys, tempfile, shlex, glob, os, stat, hashlib, time, datetime, re, curses
from threading import *
from subprocess import *
from MothurCommandInfoWrapper import *
from collections import defaultdict
from collections import deque
from random import *
from Queue import *
import smtplib
from email.mime.text import MIMEText
from email.MIMEMultipart import MIMEMultipart
from email.MIMEBase import MIMEBase
from email.MIMEText import MIMEText
from email.Utils import COMMASPACE, formatdate
from email import Encoders
_author="Sebastian Szpakowski"
_date="2012/09/20"
_version="Version 2"
#################################################
## Classes
##
class BufferedOutputHandler(Thread):
def __init__(self, usecurses=False):
Thread.__init__(self)
self.shutdown=False
self.cache = deque()
self.registered=0
self.ids = list()
self.wrap = 140
self.starttime = time.time()
#### init log
try:
self.otptfile = open("logfile.txt", 'a')
self.toPrint("-----", "GLOBAL", "Appending to a logfile.txt...")
except:
self.otptfile = open("logfile.txt", 'w')
self.toPrint("-----", "GLOBAL", "Creating a new logfile.txt...")
command = " ".join(sys.argv)
self.otptfile.write("command: %s\n" % command)
#### init output (curses)
self.usecurses = usecurses
if (self.usecurses):
self.stdscr=curses.initscr()
curses.savetty()
curses.noecho()
curses.cbreak()
curses.curs_set(0)
self.textbuffer= list()
self.stdscr.clear()
self.stdscr.refresh()
self.cursestrackbuffer = 100
self.scrollpad = curses.newpad(self.cursestrackbuffer*2, self.wrap*2)
self.spacerpad = curses.newpad(1,1000)
self.updatepad = curses.newpad(10,1000)
self.rows, self.cols = self.stdscr.getmaxyx()
else:
self.stdscr=None
self.start()
def run(self):
self.toPrint("-----", "GLOBAL", "Setting up the pipeline...")
self.flush()
time.sleep(5)
while activeCount()>3 or self.registered>0 or len(self.cache) > 0:
self.flush()
time.sleep(1)
self.flush()
endtime = time.time()
text = "+%s [fin]" % (str(datetime.timedelta(seconds=round(endtime-self.starttime,0))).rjust(17))
self.toPrint("-----", "GLOBAL", text)
command = "%spython %straverser.py" % (binpath, scriptspath)
p = Popen(shlex.split(command), stdout = PIPE, stderr = PIPE, close_fds=True)
dot, err = p.communicate()
p.wait()
x = open("workflow.dot", "w")
x.write(dot)
x.write("\n")
x.close()
for format in ["svg", "svgz", "png", "pdf"]:
command = "%sdot -T%s -o workflow.%s" % (dotpath, format, format)
p = Popen(shlex.split(command), stdin = PIPE, stdout = PIPE, stderr = PIPE, close_fds=True)
out, err = p.communicate(dot)
p.wait()
self.toPrint("-----", "GLOBAL", "Check out workflow.{svg,png,jpg} for an overview of what happened.")
self.flush()
self.otptfile.close()
self.closeDisplay()
self.mailLog()
def register(self, id):
self.registered+=1
self.ids.append(id)
def deregister(self):
self.registered-=1
def collapseIDs(self, text ):
for id in self.ids:
if len(id)>5:
text = re.sub(id, "[{0}~]".format(id[:5]), text)
return (text)
def flush(self):
while len(self.cache) > 0:
id, name, line = self.cache.popleft()
tag = "[{0}] {1:<20} > ".format( id[:5], name)
line = "{0!s}".format(line)
line = self.collapseIDs(line)
otpt = "{0}{1}".format(tag, line[:self.wrap])
self.otptfile.write("{0}{1}\n".format(tag, line))
line = line[self.wrap:]
self.outputScroll(otpt)
while len(line)>=self.wrap:
otpt = "{0}\t{1}".format(tag, line[:self.wrap])
line = line[self.wrap:]
self.outputScroll(otpt)
if len(line)>0:
otpt = "{0:<30}\t\t{1}".format("", line)
line = line
self.outputScroll(otpt)
self.redrawScreen()
def mailLog(self):
log = loadLines("logfile.txt")
log.reverse()
paths = os.getcwd()
paths = "%s/" % (paths)
dirs = glob.glob("*OUTPUT*")
dirs.sort()
for d in dirs:
paths = "%s\n\t%s/*" % (paths, d)
header = "Hi,\nYAP has just finished. Most, if not all, of your data should be in:\n\n%s\n\n-see the log below just to make sure...\nThe attached work-flow graph can be opened in your browser.\nYours,\n\n~YAP" % (paths)
log = "".join(log)
msgtext = "%s\n\n<LOG>\n\n%s\n</LOG>\n\n" % (header, log)
try:
me = __email__
toaddr = [me]
msg = MIMEMultipart()
msg['To'] = COMMASPACE.join(toaddr)
msg['Date'] = formatdate(localtime=True)
msg['Subject'] = '[AUTOMATED] YAP is done.'
if me != __admin__:
ccaddr = [__admin__]
msg['BCC'] = COMMASPACE.join(ccaddr)
toaddr = toaddr + ccaddr
msg.attach(MIMEText(msgtext))
files = ["workflow.pdf"]
for f in files:
try:
part = MIMEBase('application', "octet-stream")
part.set_payload( open(f,"rb").read() )
Encoders.encode_base64(part)
part.add_header('Content-Disposition', 'attachment; filename="%s"' % os.path.basename(f))
msg.attach(part)
except:
pass
s = smtplib.SMTP('mail.jcvi.org')
s.sendmail(me, toaddr , msg.as_string())
s.quit()
except:
pass
def redrawScreen(self):
try:
y,x = self.stdscr.getmaxyx()
### enough screen to print:
if y>20 and x>20:
if len(self.textbuffer) < (y-10):
self.scrollpad.refresh(0, 0, 0, 0, y-10, x-5)
else:
self.scrollpad.refresh(self.cursestrackbuffer-y+10 , 0, 0, 0, y-10, x-5)
self.updatepad.refresh(0, 0, y-8, 10 , y-3, x-5)
### when screen too small
else:
self.scrollpad.refresh(0,0,0,0,0,0)
self.updatepad.refresh(0,0,0,0,0,0)
except:
self.closeDisplay()
self.usecurses=False
#
def toPrint(self, id, name, line):
self.cache.append((id, name, line))
def outputScroll(self, k):
if self.usecurses:
self.textbuffer.append("%s\n" %(k))
self.scrollpad.clear()
for k in self.textbuffer[-self.cursestrackbuffer:]:
self.scrollpad.addstr(k)
else:
print k
def outputUpdate(self,k):
if self.usecurses:
self.updatepad.clear()
for k in k.strip().split("\n"):
self.updatepad.addstr("%s\n" % k)
def closeDisplay(self):
if self.usecurses:
self.stdscr.clear()
self.stdscr.refresh()
curses.curs_set(1)
curses.nocbreak()
curses.echo()
curses.resetty()
curses.endwin()
class TaskQueueStatus(Thread):
def __init__(self, update=1, maxnodes=10):
Thread.__init__(self)
self.active=True
self.maxnodes = maxnodes
self.available = self.maxnodes
self.update = update
#### queue of grid jobs to run
self.scheduled = Queue()
#### to keep track of things popped off the queue
self.processing = dict()
#### inventory of what ran
#### tuple (jid, status) indexed by command
#### status: new/running/done/remove
#### new upon registering
#### running when submitted to the grid
#### done when completed
self.registered = dict()
#### inventory of completed jobs
self.bestqueue = "default.q"
self.pollqueues()
self.running=0
self.stats=dict()
self.previous =""
self.start()
def run(self):
BOH.outputUpdate("Setting up the grid...")
print "Setting up grid..."
time.sleep(5)
while activeCount()>3 or self.running>0 or self.scheduled.qsize()>0:
self.pollfinished()
self.pollqueues()
self.pollrunning()
self.dispatch()
self.cleanup()
BOH.outputUpdate("%s" % (self))
#print self
time.sleep(self.update)
BOH.outputUpdate("%s\nGrid Offline." % (self))
print self
print "Queue status shutting down."
def cleanup(self):
toremove = set()
for key, tup in self.registered.items():
id, status = tup
if status == "remove":
toremove.add(key)
for key in toremove:
del self.registered[key]
def flagRemoval(self, task):
id, status = self.registered[task.getUniqueID()]
if status =="done":
self.registered[task.getUniqueID()] = [id, "remove"]
else:
print "cannot flag yet:", id, status
def pollfinished(self):
# donejobs = set()
#
# ### only 100 recent jobs shown, which could be a problem ;-)
# p = Popen(shlex.split("qstat -s z"), stdout=PIPE, stderr=PIPE, close_fds=True)
# p.wait()
# out,err = p.communicate()
#
# lines = out.split("\n")
# tmp = set()
# if len(lines)>2:
# for line in lines[2:]:
# line = line.strip().split()
# if len(line)>0:
# donejobs.add(line[0])
#
#if len(donejobs)>0:
for key, tup in self.registered.items():
id, status = tup
#if (status == "running") and (id in donejobs):
if (status == "running") and (self.isJobDone(id)):
tmp = self.registered[key][1]= "done"
self.processing[key].setCompleted()
self.available += 1
del self.processing[key]
def isJobDone(self, jid):
p = Popen(shlex.split("qstat -j %s" % jid), stdout=PIPE, stderr=PIPE, close_fds=True)
p.wait()
out,err = p.communicate()
return err.find("jobs do not exist")>-1
def pollqueues(self):
command="qstat -g c"
p = Popen(shlex.split(command), stdout=PIPE, stderr=PIPE, close_fds=True )
p.wait()
out,err = p.communicate()
if err.find("neither submit nor admin host")==-1:
queues = defaultdict(float)
out = out.strip().split("\n")
fullqueues = set()
#cache queue information
for q in out[2:]:
queue, cqload, used, res, avail, total, acds, cdsu = q.split()
avail = float(avail)
total = float(total)
if total>0:
queues[queue] = avail
if avail==0:
fullqueues.add(queue)
# determine which queue is the best
#for k in ("default.q", "medium.q", "fast.q", "himem.q"):
#for k in ("fast.q", "medium.q", "default.q"):
#for k in ("himem.q", "medium.q", "default.q"):
if ("medium.q" in fullqueues) and ("default.q" in fullqueues) and "himem" in queues.keys() :
if queues["himem.q"]>0:
self.bestqueue = "himem.q"
else:
self.bestqueue = "medium.q"
else:
for k in ("medium.q", "default.q"):
if queues[k] >= queues[self.bestqueue]:
self.bestqueue = k
### sanity check, this should match the counters
def pollrunning(self):
tmp=defaultdict(int)
for jid, value in self.registered.values():
tmp[value]+=1
self.stats = tmp
self.running = self.stats["running"]
def dispatch(self):
while self.nodesAvailable():
if not self.scheduled.empty():
tmp = self.scheduled.get()
self.processing[tmp.getUniqueID()]=tmp
#print "submitting", tmp.getUniqueID()
jid = tmp.submit()
#print jid
if jid==-1:
print "???", tmp
self.registered[tmp.getUniqueID()] = [tmp.getGridId(), "running"]
self.available-=1
else:
break
def pickQ(self):
return self.bestqueue
def register(self, task):
self.scheduled.put(task)
self.registered[task.getUniqueID()]=[-1, "new"]
def shutdown(self):
self.active=False
print "Queue status shutting down..."
def nodesAvailable(self):
return (self.available > 0)
def __str__(self):
otpt ="Currently running/waiting: %s/%s\n" % (self.running, self.scheduled.qsize())
otpt ="%savailable/total: %s/%s" % (otpt, self.available, self.maxnodes)
# for key, tup in self.registered.items():
# id, status = tup
# if id != -1:
# otpt = "%s\n\t%s\t%s\t%s" % (otpt, id, status, key[0:10])
for key, val in self.stats.items():
otpt = "%s\n\t%s\t%s" % (otpt, key, val)
otpt = "%s\n\nbest queue: %s" % (otpt, self.bestqueue)
return (otpt)
#################################################
### a thread that will track of a qsub job
### templates adapted to JCVIs grid
###
class GridTask():
def __init__(self, template="default.q", command = "", name="default", cpu="1", dependson=list(), cwd=".", debug=False):
self.gridjobid=-1
self.completed=False
self.queue=template
self.inputcommand = command
self.cwd=cwd
self.project = __projectid__
self.email = __email__
### remove *e##, *pe## *o## *po##
self.retainstreams=" -o /dev/null -e /dev/null "
### debug flag
self.debugflag = debug
### the only queue that has more than 4 CPUs...
if int(cpu)>4:
self.queue = "himem.q"
if len(dependson)>0:
holdfor = "-hold_jid "
for k in dependson:
holdfor = "%s%s," % (holdfor, k.getJobid())
holdfor=holdfor.strip(",")
else:
holdfor = ""
### keep po pe o e streams for debugging purposes
if self.debugflag:
self.retainstreams=""
### to avoid long command problems, create a script with the command, and invoke that instead of the command directyly.
px = "tmp.%s.%s.%s.%s." % (randrange(1,100),randrange(1,100),randrange(1,100),randrange(1,100))
sx = ".%s.%s.%s.%s.sh" % (randrange(1,100),randrange(1,100),randrange(1,100),randrange(1,100))
##### to avoid too many opened files OSError
pool_open_files.acquire()
### bounded semaphore should limit throttle the files opening for tasks created around the same time
scriptfile, scriptfilepath = tempfile.mkstemp(suffix=sx, prefix=px, dir=self.cwd, text=True)
os.close(scriptfile)
self.scriptfilepath = scriptfilepath
os.chmod(self.scriptfilepath, 0777 )
input= "%s\n" % (self.inputcommand)
scriptfile = open(self.scriptfilepath, "w")
scriptfile.write(input)
scriptfile.close()
pool_open_files.release()
####
self.templates=dict()
self.templates["himem.q"] = 'qsub %s -P %s -N jh.%s -cwd -pe threaded %s -l "himem" -M %s -m a %s "%s" ' % (self.retainstreams, self.project, name, cpu, self.email, holdfor, self.scriptfilepath)
self.templates["default.q"] = 'qsub %s -P %s -N jd.%s -cwd -pe threaded %s -M %s -m a %s "%s" ' % (self.retainstreams, self.project, name, cpu, self.email, holdfor, self.scriptfilepath)
self.templates["fast.q"] = 'qsub %s -P %s -N jf.%s -cwd -pe threaded %s -l "fast" -M %s -m a %s "%s" ' % (self.retainstreams, self.project, name,cpu, self.email, holdfor, self.scriptfilepath)
self.templates["medium.q"] = 'qsub %s -P %s -N jm.%s -cwd -pe threaded %s -l "medium" -M %s -m a %s "%s" ' % (self.retainstreams, self.project, name, cpu, self.email, holdfor, self.scriptfilepath)
self.templates["himemCHEAT"] = 'qsub %s -P %s -N jH.%s -cwd -pe threaded %s -l "himem" -M %s -m a %s "%s" ' % (self.retainstreams, self.project, name, 1, self.email, holdfor, self.scriptfilepath)
self.templates["mpi"] = 'qsub %s -P %s -N jP.%s -cwd -pe orte %s -M %s -m a %s mpirun -np %s "%s" ' % (self.retainstreams, self.project, name, cpu, cpu, self.email, holdfor, self.scriptfilepath )
self.command = ""
QS.register(self);
def submit(self):
if not self.queue in self.templates.keys():
self.queue = QS.pickQ()
self.command = self.templates[self.queue]
#print self.command
p = Popen(shlex.split(self.command), stdout=PIPE, stderr=PIPE, cwd=self.cwd, close_fds=True)
p.wait()
out, err = p.communicate()
err = err.strip()
out = out.strip()
if err!="":
print err
if out.endswith("has been submitted"):
self.gridjobid = out.split(" ")[2]
else:
print ">>>", out
print "#FAIL"
return (self.getGridId())
def getGridId(self):
return self.gridjobid
def getUniqueID(self):
return "%s_%s_%s" % (id(self), self.cwd, self.inputcommand)
def setCompleted(self):
self.completed=True
try:
if not self.debugflag:
os.remove(self.scriptfilepath)
except OSError, error:
print( "%s already gone" % self.scriptfilepath)
QS.flagRemoval(self)
def isCompleted(self):
return (self.completed)
def wait(self):
while not self.isCompleted():
time.sleep(0.1)
#################################################
### Iterator over input fasta file.
### Only reading when requested
### Useful for very large FASTA files
### with many sequences
class FastaParser:
def __init__ (self, x):
self.filename = x
self.fp = open(x, "r")
self.currline = ""
self.currentFastaName = ""
self.currentFastaSequence = ""
self.lastitem=False
def __iter__(self):
return(self)
#####
def next(self):
for self.currline in self.fp:
if self.currline.startswith(">"):
self.currline = self.currline[1:]
if self.currentFastaName == "":
self.currentFastaName = self.currline
else:
otpt = (self.currentFastaName.strip(), self.currentFastaSequence.strip())
self.currentFastaName = self.currline
self.currentFastaSequence = ""
self.previoustell = self.fp.tell()
return (otpt)
else:
self.addSequence(self.currline)
if not self.lastitem:
self.lastitem=True
return (self.currentFastaName.strip(), self.currentFastaSequence.strip())
else:
raise StopIteration
def addSequence(self, x):
self.currentFastaSequence = "%s%s" % (self.currentFastaSequence, x.strip())
def __str__():
return ("reading file: %s" %self.filename)
#################################################
### Iterator over input file.
### every line is converted into a dictionary with variables referred to by their
### header name
class GeneralPurposeParser:
def __init__(self, file, skip=0, sep="\t"):
self.filename = file
self.fp = open(self.filename, "rU")
self.sep = sep
self.skip = skip
self.linecounter = 0
self.currline=""
while self.skip>0:
self.next()
self.skip-=1
def __iter__(self):
return (self)
def next(self):
otpt = dict()
for currline in self.fp:
currline = currline.strip().split(self.sep)
self.currline = currline
self.linecounter = self.linecounter + 1
return(currline)
raise StopIteration
def __str__(self):
return "%s [%s]\n\t%s" % (self.filename, self.linecounter, self.currline)
#################################################
### The mother of all Steps:
###
class DefaultStep(Thread):
def __init__(self):
#### thread init
Thread.__init__(self)
self.random = uniform(0, 10000)
self.name = ("%s[%s]" % (self.name, self.random))
#### hash of the current step-path (hash digest of previous steps + current inputs + arguments?)
self.workpathid = ""
#### path where the step stores its files
self.stepdir = ""
#### what needs to be completed for this step to proceed
#### a list of steps
self.previous = list()
#### mapping type - path for files
self.inputs = defaultdict(set)
#### mapping type - name for files
self.outputs = defaultdict(set)
#### mapping arg val for program's arguments
self.arguments= dict()
#### ID of the step...
self.stepname = ""
#### flag for completion
self.completed = False
self.completedpreviously=False
self.failed = False
#### keep track of time elapsed
self.starttime = 0
self.endtime = 0
#### special flag, some steps might not want to delete the inputs (argcheck)
self.removeinputs = True
####
def setInputs(self, x):
for k,v in x.items():
for elem in v:
self.inputs[k].add(elem)
def setArguments(self, x):
for k,v in x.items():
if v=="":
v=" "
self.arguments[k] = v
def setPrevious(self, x):
if not type(x) is list:
self.previous.append(x)
else:
for elem in x:
self.previous.append(elem)
def setName(self, x):
self.stepname=x
def run(self):
self.init()
if self.failed:
#self.message("Error detected... ")
BOH.deregister()
self.completed=True
elif not self.isDone():
try:
self.performStep()
self.finalize()
except Exception, inst:
self.message("...")
self.message( type(inst))
self.message( inst)
BOH.deregister()
self.completed=True
self.failed=True
else:
self.message("Completed (previously).")
BOH.deregister()
self.completed=True
self.completedpreviously=True
def performStep():
self.message("in a step...")
def init(self):
redo=False
### wait for previous steps to finish
for k in self.previous:
while not k.isDone():
#self.message( "waiting" )
time.sleep(1)
if k.hasFailed():
self.failed=True
redo=redo or (not k.isDonePreviously())
#self.message("needs a redo %s" % (redo))
if not self.failed:
### time stamp
self.starttime = time.time()
#### hash of the current step-path (hash digest of previous steps + current inputs + arguments?)
self.workpathid = self.makeWorkPathId()
####
### output handler
BOH.register(self.workpathid)
###
#self.message("Initializing %s %s" % (self.workpathid, self.name))
#### create directories if necessary
self.stepdir =""
self.prepareDir(redo=redo)
def makeWorkPathId(self):
tmp = list()
tmp.append(self.stepname)
if self.previous!=None:
for k in self.previous:
while k.getWorkPathId()==-1:
time.wait(1)
tmp.extend([k.getWorkPathId()])
for k,v in self.inputs.items():
tmp.extend(["%s=%s" % (k, ",".join(v) ) ] )
for k,v in self.arguments.items():
tmp.extend(["%s=%s" % (k, v) ] )
tmp.sort()
tmp = "\n".join(tmp)
workpathid = hashlib.sha224(tmp).hexdigest()[0:5]
return (workpathid)
def getWorkPathId(self):
return (self.workpathid)
def prepareDir(self, redo=False):
### make step's directory
self.stepdir = "Step_%s_%s" % (self.stepname, self.workpathid)
flush_old = False
try:
os.mkdir(self.stepdir)
except OSError, error:
self.message( "Step directory already exists...")
flush_old=True
if redo:
if flush_old:
self.message("Updating...")
k = "rm -r *"
task = GridTask(template="pick", name="redo_clean", command=k, cpu=1, cwd = self.stepdir)
task.wait()
else:
###supposedly no old step-data to flush
pass
else:
### has analysis been done already?
try:
self.parseManifest()
self.completed=True
self.completedpreviously=True
self.message("Using data generated previously...")
except IOError, inst:
#self.message("Will make new manifest...")
pass
except Exception, inst:
self.message("****ERROR***")
self.message(type(inst))
self.message(inst.args)
self.message(inst)
self.message("************")
def finalize(self):
if not self.failed:
self.categorizeAndTagOutputs()
self.makeManifest()
self.endtime = time.time()
self.message( "+%s\t[Done]" % (str(datetime.timedelta(seconds=round(self.endtime-self.starttime,0))).rjust(17)) )
else:
self.endtime = time.time()
self.message( "+%s\t[Fail]" % (str(datetime.timedelta(seconds=round(self.endtime-self.starttime,0))).rjust(17)) )
self.completed=True
BOH.deregister()
def makeManifest(self):
m = open("%s/%s.manifest" % (self.stepdir, self.workpathid), "w")
for type, files in self.inputs.items():
if len(files)>0:
m.write("input\t%s\t%s\n" % (type, ",".join(files)) )
for arg, val in self.arguments.items():
m.write("argument\t%s\t%s\n" % (arg, val ) )
for type, files in self.outputs.items():
if len(files)>0:
m.write("output\t%s\t%s\n" % (type, ",".join(files)) )
m.close()
def determineType(self, filename):
filename = filename.strip().split(".")
extension = filename[-1]
preextension = filename[-2]
if preextension == "scrap":
return "scrap"
elif preextension == "align" and extension == "report":
return "alignreport"
elif extension == "dist" and preextension == "phylip":
return "phylip"
elif extension == "dist":
return "column"
elif preextension == "tax" and extension =="summary":
return "taxsummary"
elif preextension == "cdhit" and extension =="clstr":
return "cdhitclstr"
elif preextension == "bak" and extension =="clstr":
return "cdhitbak"
elif extension == "cdhit":
return "fasta"
elif extension in ["align", "fna", "fa", "seq", "aln"]:
return "fasta"
elif extension == "qual":
return "qfile"
elif extension == "tax":
return "taxonomy"
elif extension == "names":
return "name"
elif extension == "groups":
return "group"
elif extension == "files":
return "file"
elif extension in ["tre", "tree", "dnd"]:
return "tre"
### sge job files
elif re.search("po\d{3}", extension) != None:
return "po"
elif re.search("pe\d{3}", extension) != None:
return "pe"
elif re.search("o\d{3}", extension) != None:
return "o"
elif re.search("e\d{3}", extension) != None:
return "e"
else:
return extension
def categorizeAndTagOutputs(self):
inputs = [x.split("/")[-1] for x in unlist( self.inputs.values()) ]
for file in glob.glob("%s/*" % self.stepdir):
file = file.split("/")[-1]
if file in inputs:
if self.removeinputs:
command = "unlink %s" % (file)
p = Popen(shlex.split(command), stdout=PIPE, stderr=PIPE, cwd=self.stepdir, close_fds=True)
out,err = p.communicate()
p.wait()
else:
self.message("kept %s" % file)
#pass
elif not file.endswith("manifest"):
#self.message( "output: %s" % (file))
### make sure that every output file except for the manifest starts with the workpathID
file = file.split(".")
if len(file[0]) == len(self.workpathid):
newfilename = "%s.%s" % (self.workpathid, ".".join(file[1:]))
else:
newfilename = "%s.%s" % (self.workpathid, ".".join(file[0:]))
if ".".join(file) != newfilename:
k="mv %s %s" % (".".join(file), newfilename)
p = Popen(shlex.split(k), stdout=PIPE, stderr=PIPE, cwd=self.stepdir, close_fds=True)
out,err = p.communicate()
p.wait()
self.outputs[self.determineType(newfilename)].add(newfilename)
def find(self, arg, ln=True, original=False):
files=list()
if not original:
if len(self.inputs[arg])==0:
tmp = {arg: self.getOutputs(arg)}
self.setInputs(tmp)
else:
tmp = {arg: self.getOriginal(arg)}
self.setInputs(tmp)
files = self.inputs[arg]
toreturn=list()
for file in files:
if self.isVar(file):
toreturn.append(file[5:])
else:
tmp = file.strip().split("/")[-1]
if (ln):
command = "cp -s %s %s" % (file, tmp )
else:
command = "cp %s %s" % (file, tmp )
p = Popen(shlex.split(command), stdout=PIPE, stderr=PIPE, cwd=self.stepdir, close_fds=True )
out,err = p.communicate()
p.wait()
toreturn.append(tmp)
#unique
toreturn = set(toreturn)
return list(toreturn)
def isVar(self,x):
return x.startswith("[var]")
def getOutputs(self, arg):
if self.outputs.has_key(arg):
otpt = list()
for x in unlist(self.outputs[arg]):
if self.isVar(x):
otpt.append(x)
else:
otpt.append("../%s/%s" % (self.stepdir, x))
return otpt
elif self.previous!=None:
otpt = list()
for k in self.previous:
otpt.extend(k.getOutputs(arg))
return otpt
else:
return list()
def getOriginal(self, arg):
if self.previous == None:
return self.getOutputs(arg)
else:
current = self.getOutputs(arg)
otpt = list()
for k in self.previous:
otpt.extend(k.getOriginal(arg))
if len(otpt)>0:
return otpt
else:
return current
def parseManifest(self):
fp = open("%s/%s.manifest" % (self.stepdir, self.workpathid), "r")
lines=fp.readlines()
fp.close()
for line in lines:
line = line.strip("\n").split("\t")
if line[0] == "output":
type = line[1]
files = line[2].split(",")
for file in files:
self.outputs[type].add(file)
elif line[0] == "input":
type = line[1]
files = line[2].split(",")
for file in files:
self.inputs[type].add(file)
elif line[0] == "argument":
if len(line)==2:
self.arguments[line[1]] = " "
else:
self.arguments[line[1]]=line[2]
def message(self, text):
if type(text) == list:
for line in text:
BOH.toPrint(self.workpathid, self.stepname, line)
else:
BOH.toPrint(self.workpathid, self.stepname, text)
def isDone(self):
return self.completed
def isDonePreviously(self):
return self.completedpreviously
def hasFailed(self):
return self.failed
def getInputValue(self, arg):
if self.arguments.has_key(arg):
return self.arguments[arg]
else:
return None
def setOutputValue(self, arg, val):
self.outputs[arg] = ["[var]%s" % (val)]
def __str__(self):
otpt = "%s\t%s" % (self.stepname, self.name)
for val in self.previous:
otpt += "%s\n%s" % (otpt, val.__str__())
#otpt = "\n".join(set(otpt.strip().split("\n")))
return otpt
class FileImport(DefaultStep):
def __init__(self, INS):
DefaultStep.__init__(self)
self.setInputs(INS)
#self.setArguments(ARGS)
#self.setPrevious(PREV)
self.setName("FILE_input")
self.start()
def performStep(self):
for type in self.inputs.keys():
files = self.inputs[type]
for file in files:
pool_open_files.acquire()
file = file.split("~")
if len(file)>1:
file, newname = file
tmp = file.strip().split("/")[-1]
k = "cp %s %s.%s" % (file, newname, type)
else:
file = file[0]
tmp = file.strip().split("/")[-1]
k ="cp %s imported.%s" % (file, tmp)
p = Popen(shlex.split(k), stdout=PIPE, stderr=PIPE, cwd=self.stepdir, close_fds=True)
self.message(k)
out,err = p.communicate()
p.wait()
pool_open_files.release()
class ArgumentCheck(DefaultStep):
def __init__(self, SHOW, PREV):
ARGS = {"show":SHOW}
DefaultStep.__init__(self)
#self.setInputs(INS)
self.setArguments(ARGS)
self.setPrevious(PREV)
self.setName("ArgCheck")
#self.nodeCPUs=nodeCPUs
self.removeinputs=False
self.start()
def performStep(self):
x = self.getInputValue("show")
if x!=None:
for type in x.split(","):
for file in self.find(type):
self.message("%s: %s" % (type,file))
class OutputStep(DefaultStep):
def __init__(self, NAME, SHOW, PREV):
ARGS = {"show":SHOW}
DefaultStep.__init__(self)
#self.setInputs(INS)
self.setArguments(ARGS)
self.setPrevious(PREV)
self.setName("OUTPUT_%s" % (NAME))
#self.nodeCPUs=nodeCPUs
self.removeinputs=False
self.start()
def performStep(self):
x = self.getInputValue("show")
if x!=None:
for type in x.split(","):
for file in self.find(type.strip(), ln = False):
self.message("%s: %s" % (type,file))
class SFFInfoStep(DefaultStep):
def __init__(self, INS, ARGS, PREV):
DefaultStep.__init__(self)
self.setInputs(INS)
self.setArguments(ARGS)
self.setPrevious(PREV)
self.setName("sffinfo")
self.start()
def performStep(self):
steps = list()
for sff in self.find("sff"):
k = "/usr/local/bin/sffinfo -s %s > %s.fasta" % (sff, sff)
self.message(k)
task = GridTask(template="pick", name=self.stepname, command=k, cpu=1, dependson=list(), cwd = self.stepdir)
steps.append(task)
k = "/usr/local/bin/sffinfo -q %s > %s.qual" % (sff, sff)
self.message(k)
task = GridTask(template="pick", name=self.stepname, command=k, cpu=1, dependson=list(), cwd = self.stepdir)
steps.append(task)
k = "/usr/local/bin/sffinfo -f %s > %s.flow" % (sff, sff)
self.message(k)
task = GridTask(template="pick", name=self.stepname, command=k, cpu=1, dependson=list(), cwd = self.stepdir)
steps.append(task)
for s in steps:
s.wait()
class MothurStep(DefaultStep):
def __init__(self, NM, nodeCPUs, INS, ARGS, PREV):
DefaultStep.__init__(self)
self.setInputs(INS)
self.setArguments(ARGS)
self.setPrevious(PREV)
self.setName(NM)
self.nodeCPUs=nodeCPUs
self.start()
def makeCall(self):
FORCE = self.getInputValue("force")
x = MOTHUR.getCommandInfo(self.stepname)
#self.message(self.inputs)
if FORCE != None :
TYPES = FORCE.strip().split(",")
else:
TYPES = x.getInputs()
mothurargs = list()
for TYPE in TYPES:
#### on occasion, mothur overwrites the original file - namely names file
#### FALSE creates a copy
#### TRUE creates a link
if TYPE=="name":
tmp = self.find(TYPE, False)
else:
tmp = self.find(TYPE, True)
if len(tmp)>0:
mothurargs.append ("%s=%s" % (TYPE, "-".join(tmp)))
else:
if x.isRequired(TYPE):
self.message("Required argument '%s' not found!" % (TYPE))
raise Exception
else:
self.message("Optional argument '%s' not found, skipping" % (TYPE))
for arg, val in self.arguments.items():
if x.isAnArgument(arg):
mothurargs.append("%s=%s" % (arg, val))
elif arg=="find":
for a in val.strip().split(","):
self.message(a)
valstoinsert = self.find(a)
self.message(valstoinsert)
if len(valstoinsert)>0:
mothurargs.append("%s=%s" % (a, "-".join(valstoinsert)))
else:
self.message("skipping '%s' - not found" % (a))
else:
self.message("skipping '%s', as it is not an argument for %s" % (arg, self.stepname))
### method is parallelizable,
if x.isAnArgument("processors") and "processors" not in self.arguments.keys():
mothurargs.append("%s=%s" % ("processors", self.nodeCPUs ))
self.message("Will run on %s processors" % (self.nodeCPUs))
himemflag=False
### steps requiring lots of memory
if self.stepname in ("clearcut", "align.seq"):
himemflag=True
self.message("Needs lots of memory")
command = "%s(%s)" % (self.stepname, ", ".join(mothurargs))
return (command, x.isAnArgument("processors"), himemflag)
def performStep(self):
call, parallel, himem = self.makeCall()
k = "%smothur \"#%s\"" % (mothurpath, call)
if self.stepname =="remove.groups" and k.find("groups=)")>-1:
self.message("no groups to remove.")
else:
self.message(k)
if (parallel and self.nodeCPUs>1):
#task = GridTask(template=defaulttemplate, name=self.stepname, command=k, cpu=self.nodeCPUs, dependson=list(), cwd = self.stepdir)
task = GridTask(template="pick", name=self.stepname, command=k, cpu=self.nodeCPUs, dependson=list(), cwd = self.stepdir)
#elif (himem):
# task = GridTask(template="himem.q", name=self.stepname, command=k, cpu=self.nodeCPUs, dependson=list(), cwd = self.stepdir)
else:
task = GridTask(template="pick", name=self.stepname, command=k, cpu=1, dependson=list(), cwd = self.stepdir)
task.wait()
self.parseLogfile()
def parseLogfile(self):
for f in glob.glob("%s/*.logfile" % (self.stepdir)):
line = ""
for line in loadLines(f):
### UCHIME throws an error when it does not find chimeras, even though it completes.
if line.find ("ERROR")>-1 and line.find("uchime")==-1:
self.failed=True
### last line
if line.find("quit()")==-1:
self.failed=True
class MothurSHHH(DefaultStep):
def __init__(self, PREV, nodeCPUs):
DefaultStep.__init__(self)
#self.setInputs(INS)
#self.setArguments(ARGS)
self.setPrevious(PREV)
self.setName("MPyro")
####
####self.nodeCPUs=nodeCPUs
self.nodeCPUs=4
self.start()
def performStep(self):
tasks = list()
TOC = self.find("file")
flows = self.find("flow")
TOC = loadLines("%s/%s" % (self.stepdir, TOC[0]))
TOC = [ ".".join(x.strip().split(".")[1:]) for x in TOC]
# for f in flows:
# tmp = ".".join(f.split(".")[1:])
#
# if tmp in TOC:
#
# ### split tmp into 10,000 lines chunks
# k = "split -l 7000 -a 3 %s %s.split." % (f, f)
# task = GridTask(template="pick", name="MPyroSplit", command=k, cpu=1, cwd = self.stepdir, debug=False)
# tasks.append(task)
# else:
# self.message("skipping %s" % (f))
#
# self.message("splitting %s file(s)" % len(tasks))
#
# for task in tasks:
# task.wait()
################################################
tasks = list()
#for chunk in glob.glob("%s/*.split.*" % (self.stepdir)):
# chunk = chunk.split("/")[-1]
#self.message(chunk)
# call = "shhh.flows(flow=%s, processors=%s, maxiter=100, large=10000)" % (chunk, self.nodeCPUs)
for f in flows:
tmp = ".".join(f.split(".")[1:])
if tmp in TOC:
call = "shhh.flows(flow=%s, processors=%s, maxiter=100, large=10000)" % (f, self.nodeCPUs)
k = "%smothur \"#%s\"" % (mothurpath, call)
self.message(k)
task = GridTask(template="pick", name="Mpyro", command=k, cpu=self.nodeCPUs, cwd = self.stepdir, debug=True)
tasks.append(task)
if len(tasks)==0:
self.failed=True
self.message("processing %s file(s)" % len(tasks))
for task in tasks:
task.wait()
class LUCYcheck(DefaultStep):
def __init__(self, nodeCPUs, PREV):
DefaultStep.__init__(self)
self.nodeCPUs=nodeCPUs
#self.setInputs(INS)
#self.setArguments(ARGS)
self.setPrevious(PREV)
self.setName("LUCY_check")
self.nodeCPUs=nodeCPUs
if self.nodeCPUs>32:
self.nodeCPUs=30
self.start()
def performStep(self):
f = self.find("fasta")[0]
q = self.find("qfile")[0]
statinfo = os.stat("%s/%s" % (self.stepdir, f))
#self.message(statinfo.st_size)
if statinfo.st_size==0:
self.message("%s is empty." % f)
self.failed=True
else:
k ="%s/lucy -error 0.002 0.002 -bracket 20 0.002 -debug -xtra %s -output %s.fastalucy %s.qfilelucy %s %s" % (binpath, self.nodeCPUs, f,q, f,q)
self.message(k)
if self.nodeCPUs>2:
task = GridTask(template=defaulttemplate, name=self.stepname, command=k, cpu=self.nodeCPUs, dependson=list(), cwd = self.stepdir)
else:
task = GridTask(template="pick", name=self.stepname, command=k, cpu=self.nodeCPUs, dependson=list(), cwd = self.stepdir)
task.wait()
class LUCYtrim(DefaultStep):
def __init__(self, PREV):
DefaultStep.__init__(self)
#self.setInputs(INS)
#self.setArguments(ARGS)
self.setPrevious(PREV)
self.setName("LUCY_trim")
#self.nodeCPUs=nodeCPUs
self.start()
def performStep(self):
f = self.find("fastalucy")[0]
q = self.find("qfilelucy")[0]
k = "%spython %s/fastAtrimmer.py -l %s %s %s " % (binpath, scriptspath, f.split(".")[0], f, q)
self.message(k)
task = GridTask(template="pick", name=self.stepname, command=k, cpu=1, cwd = self.stepdir)
task.wait()
class MatchGroupsToFasta(DefaultStep):
def __init__(self, INS, PREV):
DefaultStep.__init__(self)
self.setInputs(INS)
#self.setArguments(ARGS)
self.setPrevious(PREV)
self.setName("MatchGroups")
#self.nodeCPUs=nodeCPUs
self.start()
def performStep(self):
tasks = list()
f = self.find("fasta")
f = f[0]
g = self.find("group")
g = g[0]
n = self.find("name")
if len(n)>0:
n = "-n %s" % (n[0])
else:
n = ""
k = "%spython %s/MatchGroupsToFasta.py %s -f %s -g %s -o %s.matched.group" % (binpath, scriptspath, n, f, g, ".".join(g.split(".")[:-1]))
self.message(k)
task = GridTask(template="pick", name=self.stepname, command=k, cpu=1, cwd = self.stepdir)
task.wait()
class MatchGroupsToList(DefaultStep):
def __init__(self, INS, PREV):
DefaultStep.__init__(self)
self.setInputs(INS)
#self.setArguments(ARGS)
self.setPrevious(PREV)
self.setName("MatchGroups")
#self.nodeCPUs=nodeCPUs
self.start()
def performStep(self):
tasks = list()
f = self.find("list")
f = f[0]
g = self.find("group")
g = g[0]
k = "%spython %s/MatchGroupsToFasta.py -l %s -g %s -o %s.matched.group" % (binpath, scriptspath, f, g, ".".join(g.split(".")[:-1]))
self.message(k)
task = GridTask(template="pick", name=self.stepname, command=k, cpu=1, cwd = self.stepdir)
task.wait()
class FileMerger(DefaultStep):
def __init__(self, TYPES, PREV, prefix="files"):
ARGS = {"types": TYPES}
DefaultStep.__init__(self)
#self.setInputs(INS)
self.setArguments(ARGS)
self.setPrevious(PREV)
self.setName("FILE_cat")
#self.nodeCPUs=nodeCPUs
self.prefix = prefix
self.start()
def performStep(self):
tasks = list()
for t in self.getInputValue("types").strip().split(","):
files = self.find(t)
if len(files)>0 and len(files)<25:
k = "cat %s > %s.x%s.merged.%s" % (" ".join(files), self.prefix, len(files), t)
self.message(k)
task = GridTask(template="pick", name="cat", command=k, cpu=1, cwd = self.stepdir)
tasks.append(task)
elif len(files)>=25:
k = "cat *.%s* > %s.x%s.merged.%s" % (t, self.prefix, len(files), t)
self.message(k)
task = GridTask(template="pick", name=self.stepname, command=k, cpu=1, cwd = self.stepdir)
tasks.append(task)
#else:
# self.failed=True
for task in tasks:
task.wait()
time.sleep(1)
class FileSort(DefaultStep):
def __init__(self, TYPES, PREV):
ARGS = {"types": TYPES}
DefaultStep.__init__(self)
#self.setInputs(INS)
self.setArguments(ARGS)
self.setPrevious(PREV)
self.setName("FILE_sort")
#self.nodeCPUs=nodeCPUs
self.start()
def performStep(self):
tasks = list()
for t in self.getInputValue("types").strip().split(","):
files = self.find(t)
if len(files)>0:
k = "sort -n %s > files_x%s.sorted.%s" % (" ".join(files), len(files), t)
self.message(k)
task = GridTask(template="pick", name=self.stepname, command=k, cpu=1, cwd = self.stepdir)
tasks.append(task)
for task in tasks:
task.wait()
time.sleep(1)
class FileType(DefaultStep):
def __init__(self, ARGS, PREV):
DefaultStep.__init__(self)
#self.setInputs(INS)
self.setArguments(ARGS)
self.setPrevious(PREV)
self.setName("FILE_type")
#self.nodeCPUs=nodeCPUs
self.start()
def performStep(self):
tasks = list()
for input, output in self.arguments.items():
files = self.find(input)
for file in files:
outname = "%s.%s" % (file, output)
k = "cp %s %s" % (file, outname)
self.message(k)
task = GridTask(template="pick", name=self.stepname, command=k, cpu=1, cwd = self.stepdir)
tasks.append(task)
for task in tasks:
task.wait()
time.sleep(1)
class CleanFasta(DefaultStep):
def __init__(self, INS, PREV):
DefaultStep.__init__(self)
self.setInputs(INS)
#self.setArguments(ARGS)
self.setPrevious(PREV)
self.setName("CleanFasta")
#self.nodeCPUs=nodeCPUs
self.start()
def performStep(self):
tasks = list()
f = self.find("fasta")
f = f[0]
k = "%spython %s/CleanFasta.py -i %s -o %s.dash_stripped.fasta" % (binpath, scriptspath,f, ".".join(f.split(".")[:-1]))
self.message(k)
task = GridTask(template="pick", name=self.stepname, command=k, cpu=1, cwd = self.stepdir, debug=False)
task.wait()
class MakeNamesFile(DefaultStep):
def __init__(self, PREV):
DefaultStep.__init__(self)
#self.setInputs(INS)
#self.setArguments(ARGS)
self.setPrevious(PREV)
self.setName("FILE_names")
#self.nodeCPUs=nodeCPUs
self.start()
def performStep(self):
files = self.find("fasta")
for f in files:
self.message("Creating 'names' file for sequences in {0}".format( f))
newname = f.strip().split(".")[:-1]
newname = "%s.names" % (".".join(newname))
otpt = open("%s/%s" % (self.stepdir,newname ), 'w')
for head, seq in FastaParser("%s/%s" % (self.stepdir, f)):
head = head.strip().split()[0]
otpt.write("%s\t%s\n" % (head, head))
otpt.close()
if len(files)==0:
self.message("No files to generate NAMES...")
class MakeGroupsFile(DefaultStep):
def __init__(self, PREV, id):
ARGS = {"groupid": id}
DefaultStep.__init__(self)
#self.setInputs(INS)
self.setArguments(ARGS)
self.setPrevious(PREV)
self.setName("FILE_groups")
#self.nodeCPUs=nodeCPUs
self.start()
def performStep(self):
files = self.find("fasta")
for f in files:
id = self.getInputValue("groupid")
self.message("Creating 'groups' file; '{0}' for sequences in {1}".format(id, f))
newname = f.strip().split(".")[:-1]
newname = "%s.groups" % (".".join(newname))
otpt = open("%s/%s" % (self.stepdir, newname ), 'w')
for head, seq in FastaParser("%s/%s" % (self.stepdir, f)):
head = head.strip().split()[0]
otpt.write("%s\t%s\n" % (head, id))
otpt.close()
if len(files)==0:
self.message("No files to generate GROUPS...")
class MakeQualFile(DefaultStep):
def __init__(self, PREV, q):
ARGS = {"qual": q}
DefaultStep.__init__(self)
#self.setInputs(INS)
self.setArguments(ARGS)
self.setPrevious(PREV)
self.setName("FILE_qfile")
#self.nodeCPUs=nodeCPUs
self.start()
def performStep(self):
f = self.find("fasta")[0]
q = self.getInputValue("qual")
self.message("Creating 'qual' file; '{0}' for sequences in {1}".format(q, f))
newname = f.strip().split(".")[:-1]
newname = "%s.qual" % (".".join(newname))
otpt = open("%s/%s" % (self.stepdir, newname ), 'w')
for head, seq in FastaParser("%s/%s" % (self.stepdir, f)):
otpt.write(">%s\n" % (head))
for k in seq:
otpt.write("%s " % (q))
otpt.write("\n")
otpt.close()
class AlignmentSummary(DefaultStep):
def __init__(self, ARGS, PREV):
DefaultStep.__init__(self)
#self.setInputs(INS)
self.setArguments(ARGS)
self.setPrevious(PREV)
self.setName("AlignmentSummary")
#self.nodeCPUs=nodeCPUs
self.start()
def performStep(self):
self.project = __projectid__
self.mailaddress = __email__
f = self.find("fasta")[0]
ref = self.getInputValue("ref")
if ref == None:
ref="e_coli2"
th = self.getInputValue("thresh")
if th == None:
th="0.1"
self.message("summarizing an alignment in %s" % (f) )
k = "%spython %s/alignmentSummary.py -P %s -M %s -t 500 -p %s -i %s -o %s.alsum -T %s -x %s" % (binpath, scriptspath, self.project, self.mailaddress, ref, f,f, th, binpath)
self.message(k)
task = GridTask(template="pick", name=self.stepname, command=k, cwd = self.stepdir, debug=True)
task.wait()
for file in glob.glob("%s/*AlignmentSummary.o*"% (self.stepdir)):
x = loadLines(file)[-1].strip().split("\t")
self.message("Potential trimming coordinates: %s - %s [peak = %s] [thresh = %s]" % (x[1], x[3], x[5], x[7]) )
self.setOutputValue("trimstart", x[1])
self.setOutputValue("trimend", x[3])
self.setOutputValue("trimthresh", x[7])
#self.failed = True
class AlignmentPlot(DefaultStep):
def __init__(self, ARGS, PREV):
DefaultStep.__init__(self)
#self.setInputs(INS)
self.setArguments(ARGS)
self.setPrevious(PREV)
self.setName("AlignmentPlot")
#self.nodeCPUs=nodeCPUs
self.start()
def performStep(self):
f = self.find("alsum")[0]
ref = self.getInputValue("ref")
if ref == None:
ref="e_coli"
trimstart = self.getInputValue("trimstart")
if trimstart==None:
trimstart=0
elif trimstart=="find":
trimstart = self.find("trimstart")[0]
trimend = self.getInputValue("trimend")
if trimend == None:
trimend=0
elif trimend == "find":
trimend = self.find("trimend")[0]
trimthresh = self.getInputValue("trimthresh")
if trimthresh == None:
trimthresh=0
elif trimthresh == "find":
trimthresh = self.find("trimthresh")[0]
self.message("Adding trimmig marks at: %s - %s" % (trimstart, trimend))
tmp = open("%s/alsum.r" % (self.stepdir), "w")
tmp.write("source(\"%s/alignmentSummary.R\")\n" % (scriptspath))
tmp.write("batch2(\"%s\", ref=\"%s\", trimstart=%s, trimend=%s, thresh=%s )\n" % (f, ref, trimstart, trimend, trimthresh))
tmp.close()
k = "%sR CMD BATCH alsum.r" % (binpath)
task = GridTask(template="pick", name=self.stepname, command=k, cwd = self.stepdir)
task.wait()
class GroupRetriever(DefaultStep):
def __init__(self, ARGS, PREV):
DefaultStep.__init__(self)
#self.setInputs(INS)
self.setArguments(ARGS)
self.setPrevious(PREV)
self.setName("GroupCheck")
self.start()
def performStep(self):
minimum = self.getInputValue("mingroupmembers")
if minimum==None:
minimum=0
group = self.find("group")[0]
groups = defaultdict(int)
otpt = open("{0}/{1}.groupstats".format(self.stepdir, group), "w")
for line in loadLines("%s/%s" % (self.stepdir, group)):
x = line.strip().split("\t")[1]
groups[x]+=1
keys = sorted(groups, key=groups.get)
keys.reverse()
passinggroups=list()
failinggroups = list()
for k in keys:
v = groups[k]
if v>=minimum:
flag="ok"
passinggroups.append(k)
else:
flag="x"
failinggroups.append(k)
self.message("{0:<25}:{1:>10}:{2}".format( k, v, flag))
otpt.write("{0}\t{1}\t{2}\n".format(k,v, flag))
otpt.close()
if len(passinggroups)==0:
self.message("There are not enough reads to analyze. See documentation for -g [currently set to {0}] and -x arguments.".format(minimum))
self.failed=True
if self.getInputValue("report") in [None, "passing"]:
groupnames = "-".join(passinggroups)
else:
groupnames = "-".join(failinggroups)
self.setOutputValue("groups", groupnames)
class CDHIT_454(DefaultStep):
def __init__(self, nodeCPUs, ARGS, PREV):
DefaultStep.__init__(self)
if ARGS.has_key("T"):
self.nodeCPUs = ARGS["T"]
else:
self.nodeCPUs=nodeCPUs
ARGS["T"]=self.nodeCPUs
#self.setInputs(INS)
self.setArguments(ARGS)
self.setPrevious(PREV)
self.setName("CDHIT_454")
self.start()
def performStep(self):
args = ""
for arg, val in self.arguments.items():
args = "%s -%s %s" % (args, arg, val)
fs = self.find("fasta")
if len(fs)==0:
fs.extend(self.find("mate1"))
fs.extend(self.find("mate2"))
tasks=list()
for f in fs:
k ="%scd-hit-454 -i %s -o %s.cdhit %s" % (cdhitpath, f, f, args)
self.message(k)
task = GridTask(template=defaulttemplate, name=self.stepname, command=k, cpu=self.nodeCPUs, dependson=list(), cwd = self.stepdir, debug=False)
# if self.nodeCPUs>2:
# task = GridTask(template=defaulttemplate, name=self.stepname, command=k, cpu=self.nodeCPUs, dependson=list(), cwd = self.stepdir, debug=True)
# else:
# task = GridTask(template="himem.q", name=self.stepname, command=k, cpu=self.nodeCPUs, dependson=list(), cwd = self.stepdir, debug=True)
tasks.append(task)
for task in tasks:
task.wait()
class CDHIT_EST(DefaultStep):
def __init__(self, nodeCPUs, ARGS, PREV):
DefaultStep.__init__(self)
if ARGS.has_key("T"):
self.nodeCPUs = ARGS["T"]
else:
self.nodeCPUs=nodeCPUs
ARGS["T"]=self.nodeCPUs
#self.setInputs(INS)
self.setArguments(ARGS)
self.setPrevious(PREV)
self.setName("CDHIT_EST")
self.start()
def performStep(self):
f = self.find("fasta")[0]
args = ""
dist = 1
for arg, val in self.arguments.items():
args = "%s -%s %s" % (args, arg, val)
if arg == "c":
dist = dist - (float(val))
k ="%scd-hit-est -i %s -o %s._%s_.cdhit %s" % (cdhitpath, f, f, dist, args)
self.message(k)
task = GridTask(template="pick", name=self.stepname, command=k, cpu=self.nodeCPUs, dependson=list(), cwd = self.stepdir, debug=False)
# if self.nodeCPUs>2:
# task = GridTask(template="defaulttemplate", name=self.stepname, command=k, cpu=self.nodeCPUs, dependson=list(), cwd = self.stepdir, debug=True)
# else:
# task = GridTask(template="himem.q", name=self.stepname, command=k, cpu=self.nodeCPUs, dependson=list(), cwd = self.stepdir, debug=True)
task.wait()
class CDHIT_Perls(DefaultStep):
def __init__(self, ARGS, PREV):
DefaultStep.__init__(self)
#self.setInputs(INS)
self.setArguments(ARGS)
self.setPrevious(PREV)
self.setName("CDHITperls")
#self.nodeCPUs=nodeCPUs
self.start()
def performStep(self):
x = self.find("cdhitclstr")
tasks=list()
for cluster in x:
k = "%sclstr2tree.pl %s > %s.tre" % (cdhitpath, cluster, cluster)
self.message(k)
task = GridTask(template="pick", name=self.stepname, command=k, cwd = self.stepdir, debug=False)
tasks.append(task)
k = "%sclstr_size_histogram.pl %s > %s.hist.tab.txt " % (cdhitpath, cluster, cluster)
self.message(k)
task = GridTask(template="pick", name=self.stepname, command=k, cwd = self.stepdir, debug=False)
tasks.append(task)
k = "%sclstr_size_stat.pl %s > %s.size.tab.txt" % (cdhitpath, cluster, cluster)
self.message(k)
task = GridTask(template="pick", name=self.stepname, command=k, cwd = self.stepdir, debug=False)
tasks.append(task)
for task in tasks:
task.wait()
class CDHIT_Mothurize(DefaultStep):
def __init__(self, ARGS, PREV):
DefaultStep.__init__(self)
#self.setInputs(INS)
self.setArguments(ARGS)
self.setPrevious(PREV)
self.setName("CDHIT_Mothurise")
self.start()
def performStep(self):
### mode can be "name" or None (to produce a name file)
### mode can be "#.## (to produce a labeled ra/sa/list combo)
m = self.getInputValue("mode")
if m == None:
m = "name"
modeswitch = "-o %s" % (m)
### is there an optional names file?
n = self.find("name")
if len(n)>0:
n = n[0]
nameswitch = "-n %s" % (n)
else:
nameswitch = ""
### is there a required cluster file
clst = self.find("cdhitclstr")
if len(clst)>0:
k = "%spython %sCDHIT_mothurize_clstr.py -c %s %s %s" % (binpath, scriptspath, clst[0], nameswitch, modeswitch)
self.message(k)
task = GridTask(template="pick", name=self.stepname, command=k, dependson=list(), cwd = self.stepdir, debug=False)
task.wait()
else:
self.failed=True
class R_defaultplots(DefaultStep):
def __init__(self, INS, ARGS, PREV):
DefaultStep.__init__(self)
self.setInputs(INS)
self.setArguments(ARGS)
self.setPrevious(PREV)
self.setName("R_plots")
self.start()
def performStep(self):
f = self.find("taxsummary")
anno = self.find("annotation")[0]
tasks = list()
script = open("%s/script.r" % (self.stepdir), "w")
script.write("""source("%sConsTaxonomyPlots.R")\n""" % (scriptspath))
for file in f:
dist = ".%s"% (self.getInputValue("dist"))
if file.find(dist)>-1 and file.find("seq")>-1 :
script.write("""makeDefaultBatchOfPlots("%s", "%s", fileprefix="SEQnum")\n""" % (anno, file))
elif file.find(dist)>-1 and file.find("otu")>-1 :
script.write("""makeDefaultBatchOfPlots("%s", "%s", fileprefix="OTUnum")\n""" % (anno, file))
script.close()
k = "%sR CMD BATCH script.r" % (binpath)
self.message(k)
task = GridTask(template="pick", name=self.stepname, command=k, dependson=list(), cwd = self.stepdir)
task.wait()
class R_OTUplots(DefaultStep):
def __init__(self, INS, ARGS, PREV):
DefaultStep.__init__(self)
self.setInputs(INS)
self.setArguments(ARGS)
self.setPrevious(PREV)
self.setName("R_plots_otu")
self.start()
def performStep(self):
####OTUS
f = self.find("fasta")
tasks = list()
#script = open("%s/script.r" % (self.stepdir), "w")
#script.write("""source("%sOtuReadPlots.r")\n""" % (scriptspath))
for file in f:
if file.find("annotated.fasta")>0:
k = """grep ">" %s | awk '{FS = "|"; OFS="\t"} {print $4, $5}' > %s.otustats""" % (file, file)
task = GridTask(template="pick", name=self.stepname, command=k, dependson=list(), cwd = self.stepdir, debug=False)
tasks.append(task)
#script.write("""makeBatch("%s.otustats")\n""" % (file))
####COVERAGE
f = self.find("clcassemblystats")
#for file in f:
#script.write("""makeBatchCoverage("%s")\n""" % (file))
#script.close()
### make sure all conversions are complete
for task in tasks:
task.wait()
k = "%sR CMD BATCH %sOtuReadPlots.r" % (binpath, scriptspath)
self.message(k)
task = GridTask(template="pick", name=self.stepname, command=k, dependson=list(), cwd = self.stepdir)
task.wait()
class R_rarefactions(DefaultStep):
def __init__(self, INS, ARGS, PREV):
DefaultStep.__init__(self)
self.setInputs(INS)
self.setArguments(ARGS)
self.setPrevious(PREV)
self.setName("R_rarefactions")
self.start()
def performStep(self):
for k in "r_nseqs,rarefaction,r_simpson,r_invsimpson,r_chao,r_shannon,r_shannoneven,r_coverage".strip().split(","):
f = self.find(k)
k = "%sR CMD BATCH %srarefactions.R" % (binpath, scriptspath)
self.message(k)
task = GridTask(template="pick", name=self.stepname, command=k, dependson=list(), cwd = self.stepdir)
task.wait()
class AlignmentTrim(DefaultStep):
def __init__(self, INS, ARGS, PREV):
DefaultStep.__init__(self)
self.setInputs(INS)
self.setArguments(ARGS)
self.setPrevious(PREV)
self.setName("AlignmentTrim")
self.start()
def performStep(self):
f = self.find("fasta")[0]
args = ""
for arg, val in self.arguments.items():
if val.startswith("find"):
val=self.find(val.split(":")[1])[0]
args = "%s -%s %s" % (args, arg, val)
k ="%spython %salignmentTrimmer.py %s -I %s" % (binpath, scriptspath, args, f)
self.message(k)
task = GridTask(template="pick", name=self.stepname, command=k, cpu=1, dependson=list(), cwd = self.stepdir)
task.wait()
class AnnotateClusters(DefaultStep):
def __init__(self, INS, ARGS, PREV):
DefaultStep.__init__(self)
self.setInputs(INS)
self.setArguments(ARGS)
self.setPrevious(PREV)
self.setName("Annotate")
self.start()
def performStep(self):
l = self.find("list")
t = self.find("taxonomy")
f = self.find("fasta")
g = self.find("group")
self.message(l)
self.message(t)
self.message(f)
self.message(g)
if len(l)==0 or len(t)==0 or len(f)==0 or len(g) == 0:
self.failed=True
else:
tasks=list()
for fasta in f:
dist = fasta.split("_")[-2]
for tax in t:
if tax.find(dist)>-1 and tax.find("otu")==-1:
k = "%spython %sRetrieve.py %s %s %s %s %s" % (binpath, scriptspath, dist, l[0], tax, g[0], fasta)
self.message(k)
task = GridTask(template="pick", name=self.stepname, command=k, cpu=1, dependson=list(), cwd = self.stepdir)
tasks.append(task)
for task in tasks:
task.wait()
#################################################
## Functions
##
################################################
### Read in a file and return a list of lines
###
def loadLines(x):
try:
fp = open(x, "r")
cont=fp.readlines()
fp.close()
#print "%s line(s) loaded." % (len(cont))
except:
cont=""
#print "%s cannot be opened, does it exist? " % ( x )
return cont
def unlist(struct):
for x in struct:
if type(x) is tuple or type(x) is list or type(x) is set :
for y in unlist(x):
yield y
else:
yield x
def init(id, e, maxnodes = 250, update=0.1):
global __projectid__
global __email__
global __admin__
global BOH
global MOTHUR
global QS
__projectid__ = id
__email__ = e
__admin__ = 'sszpakow@gmail.com'
BOH = BufferedOutputHandler()
MOTHUR = MothurCommandInfo(path=mothurpath)
QS = TaskQueueStatus(update = update, maxnodes=maxnodes)
return (BOH)
def revComp(string):
global transtab
string=string.upper()
#reverse
string = string [::-1]
return string.translate(transtab)
#################################################
## Arguments
##
#################################################
## Begin
##
from string import maketrans
inttab= "ACGTN"
outtab = "TGCAN"
transtab = maketrans(inttab, outtab)
pool_open_files = BoundedSemaphore(value=4, verbose=False)
mothurpath = "/usr/local/devel/ANNOTATION/sszpakow/YAP/bin/mothur-current/"
cdhitpath = "/usr/local/devel/ANNOTATION/sszpakow/YAP/bin/cdhit-current/"
scriptspath = "/usr/local/devel/ANNOTATION/sszpakow/YAP/scripts/"
binpath = "/usr/local/devel/ANNOTATION/sszpakow/YAP/bin/"
dotpath = "/usr/local/packages/graphviz/bin/"
defaulttemplate = "default.q"
#################################################
## Finish
#################################################
|
mit
| 3,567,784,365,733,237,000
| 26.780702
| 226
| 0.617004
| false
| 2.781152
| false
| false
| false
|
CuBoulder/atlas
|
atlas/backup_operations.py
|
1
|
1820
|
"""
atlas.backup_operations
~~~~
Commands that run on servers to create, restore, and remove backups.
Instance methods:
Delete - Local - Remove backup files.
"""
import logging
import os
from datetime import datetime
from time import time
from atlas import utilities
from atlas.config import (ENVIRONMENT, INSTANCE_ROOT, WEB_ROOT, CORE_WEB_ROOT_SYMLINKS,
NFS_MOUNT_FILES_DIR, NFS_MOUNT_LOCATION, SAML_AUTH,
SERVICE_ACCOUNT_USERNAME, SERVICE_ACCOUNT_PASSWORD, VARNISH_CONTROL_KEY,
SMTP_PASSWORD, WEBSERVER_USER_GROUP, ATLAS_LOCATION, SITE_DOWN_PATH,
SSH_USER, BACKUP_PATH)
from atlas.config_servers import (SERVERDEFS, ATLAS_LOGGING_URLS, API_URLS,
VARNISH_CONTROL_TERMINALS, BASE_URLS)
# Setup a sub-logger. See tasks.py for longer comment.
log = logging.getLogger('atlas.backup_operations')
def backup_delete(item):
"""Remove backup files from servers
Arguments:
item {string} -- Backup item to remove
"""
log.debug('Backup | Delete | Item - %s', item)
log.info('Backup | Delete | Item - %s ', item['_id'])
instance = utilities.get_single_eve('sites', item['site'], item['site_version'])
pretty_filename = '{0}_{1}'.format(
instance['sid'], item['backup_date'].strftime("%Y-%m-%d-%H-%M-%S"))
pretty_database_filename = '{0}.sql'.format(pretty_filename)
database_path = '{0}/backups/{1}'.format(BACKUP_PATH, pretty_database_filename)
pretty_files_filename = '{0}.tar.gz'.format(pretty_filename)
files_path = '{0}/backups/{1}'.format(BACKUP_PATH, pretty_files_filename)
os.remove(files_path)
os.remove(database_path)
log.info('Backup | Delete | Complete | Item - %s', item['_id'])
|
mit
| -1,822,666,200,972,725,500
| 36.916667
| 98
| 0.639011
| false
| 3.611111
| false
| false
| false
|
michaelrice/gotland
|
gotland/rabbit/api.py
|
1
|
18344
|
# Copyright 2014 Michael Rice <michael@michaelrice.org>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import json
try:
from urllib import quote, quote_plus
except ImportError:
from urllib.parse import quote, quote_plus
import requests
from requests.auth import HTTPBasicAuth
class Client(object):
def __init__(self, end_point="http://localhost:15672/api/",
username="guest", password="guest"):
"""Client connection info for the rabbitmq_management API
Usage::
myapi = api(username="sam",password="secure")
"""
self.end_point = end_point
self.auth = HTTPBasicAuth(username, password)
def _get_data(self, path, **kwargs):
"""Lots of work to do here. Literally doing the least possible
to just get something functional. Need to add error handling,
and raise proper exceptions"""
params = None
if 'params' in kwargs:
params = kwargs.get("params")
response = requests.get(path, auth=self.auth, params=params)
if response.status_code != 200:
return
return response.json()
def _send_data(self, path, data=None, request_type='PUT'):
data = json.dumps(data)
if data == 'null':
data = None
headers = {
"Content-type": "application/json",
"Accept": "application/json"
}
if request_type is 'PUT':
response = requests.put(path, data, headers=headers, auth=self.auth)
elif request_type is 'DELETE':
response = requests.delete(path, auth=self.auth, headers=headers,
data=data)
else:
response = requests.post(path, data=data, headers=headers,
auth=self.auth)
if response.status_code == 204:
return
return response.json()
def check_aliveness(self, vhost='/'):
"""Check aliveness of a given vhost. By default / will be checked.
Usage::
myapi = api()
if not myapi.check_aliveness():
handle_down_event()
"""
path = self.end_point + "aliveness-test/" + quote_plus(vhost)
data = self._get_data(path)
if data is None:
return False
try:
if data.get("status") != "ok":
return False
return True
except KeyError:
return False
def get_overview(self):
"""Various random bits of information that describe the
whole system."""
path = self.end_point + "overview"
data = self._get_data(path)
return data
def get_nodes(self):
"""A list of nodes in the RabbitMQ cluster."""
path = self.end_point + "nodes"
data = self._get_data(path)
return data
def get_node_info(self, node_name, get_memory=False):
"""An individual node in the RabbitMQ cluster. Add "get_memory=true"
to get memory statistics."""
path = self.end_point + "nodes/" + node_name
params = None
if get_memory:
params = {"memory": "true"}
data = self._get_data(path, params=params)
return data
def get_extensions(self):
"""A list of extensions to the management plugin"""
path = self.end_point + "extensions"
data = self._get_data(path)
return data
def get_connections(self):
"""A list of all open connections."""
path = self.end_point + "connections"
data = self._get_data(path)
return data
def get_connections_name(self, name):
"""Gets info for an individual connection"""
name = quote(name)
path = self.end_point + "connections/{0}".format(name)
data = self._get_data(path)
return data
def get_channels(self):
"""List of all channels"""
path = self.end_point + "channels"
data = self._get_data(path)
return data
def get_channels_name(self, channel=None):
"""Info about a specific channel"""
channel = quote(channel)
path = self.end_point + "channels/{0}".format(channel)
data = self._get_data(path)
return data
def get_exchanges(self):
"""List of all exchanges"""
path = self.end_point + "exchanges"
data = self._get_data(path)
return data
def get_exchanges_vhost(self, vhost='/'):
"""List of all exchanges on a given vhost"""
path = self.end_point + "exchanges/{0}".format(quote_plus(vhost))
data = self._get_data(path)
return data
def get_exchanges_name_vhost(self, vhost='/', exchange_name=None):
"""Gets info about a given echange (name) on a given vhost"""
vhost = quote_plus(vhost)
path = self.end_point + "exchanges/{0}/{1}".format(vhost, exchange_name)
return self._get_data(path)
def get_bindings_for_exchange(self, vhost='/', exchange_name=None,
stype="source"):
"""A list of all bindings in which a given exchange is the source."""
path = self.end_point + "exchanges/{0}/{1}/bindings/{2}"
path = path.format(quote_plus(vhost), exchange_name, stype)
return self._get_data(path)
def get_queues(self):
"""A list of all queues on the server"""
path = self.end_point + "queues"
return self._get_data(path)
def get_queues_by_vhost(self, vhost='/'):
"""A list of all queues in a given virtual host."""
path = self.end_point + "queues/{0}".format(quote_plus(vhost))
return self._get_data(path)
def get_queue_by_name(self, queue_name=None, vhost='/'):
"""Inforation about an individual queue. Takes optional vhost param
Checks / as the default vhost"""
vhost = quote_plus(vhost)
path = self.end_point + "queues/{0}/{1}".format(vhost, queue_name)
return self._get_data(path)
def get_bindings_by_queue(self, queue_name=None, vhost='/'):
"""A list of all bindings on a given queue. Takes an optional
vhost param. The default vhost is /"""
path = self.end_point + "queues/{0}/{1}/bindings"
path = path.format(quote_plus(vhost), queue_name)
return self._get_data(path)
def get_bindings(self):
"""A list of all bindings."""
path = self.end_point + "bindings"
return self._get_data(path)
def get_bindings_by_vhost(self, vhost='/'):
"""A list of all bindings in a given virtual host."""
path = self.end_point + "bindings/{0}".format(quote_plus(vhost))
return self._get_data(path)
def get_bindings_between_exchange_and_queue(self, queue_name=None,
exchange_name=None, vhost='/'):
"""A list of all bindings between an exchange and a queue.
Remember, an exchange and a queue can be bound together many times!
"""
path = self.end_point + "bindings/{0}/e/{1}/q/{2}"
path = path.format(quote_plus(vhost), exchange_name, queue_name)
return self._get_data(path)
def update_bindings_between_exchange_and_queue(self):
"""A list of all bindings between an exchange and a queue.
Remember, an exchange and a queue can be bound together many times!
To create a new binding, POST to this URI. You will need a body looking
something like this:
{"routing_key":"my_routing_key","arguments":[]}
All keys are optional. The response will contain a Location header
telling you the URI of your new binding."""
pass
def get_binding_between_exchange_and_queue(self, queue_name=None,
exchange_name=None, vhost='/'):
"""
An individual binding between an exchange and a queue.
The props part of the URI is a "name" for the binding composed of
its routing key and a hash of its arguments.
"""
path = self.end_point + "bindings/{0}/e/{1}/q/{2}/props"
path = path.format(quote_plus(vhost), exchange_name, queue_name)
return self._get_data(path)
def get_bindings_between_exchanges(self, exchange_name_s=None,
exchange_name_d=None, stype="destination", vhost='/'):
"""A list of all bindings between two exchanges. Similar to the list
of all bindings between an exchange and a queue, above.
stype can be either "destination" or "props"
"""
path = self.end_point + "bindings/{0}/e/{1}/e/{2}/{3}"
vhost = quote_plus(vhost)
path = path.format(vhost, exchange_name_s, exchange_name_d, stype)
return self._get_data(path)
def get_vhosts(self):
"""Return a list of all vhosts"""
path = self.end_point + "vhosts"
return self._get_data(path)
def get_vhost_by_name(self, vhost='/'):
"""An individual virtual host. As a virtual host only has a name,
you do not need an HTTP body when PUTing one of these.
"""
path = self.end_point + "vhosts/{0}".format(quote_plus(vhost))
return self._get_data(path)
def get_premissions_by_vhost(self, vhost='/'):
"""A list of all permissions for a given virtual host."""
vhost = quote_plus(vhost)
path = self.end_point + "vhosts/{0}/permissions".format(vhost)
return self._get_data(path)
def get_users(self):
"""A list of all users"""
path = self.end_point + "users"
return self._get_data(path)
def get_user_by_name(self, username="guest"):
"""Info about an individual user"""
path = self.end_point + "users/{0}".format(username)
return self._get_data(path)
def get_user_permissions(self, username="guest"):
"""A list of all permissions for a given user."""
path = self.end_point + "users/{0}/permissions".format(username)
return self._get_data(path)
def whoami(self):
"""Details of the currently authenticated user."""
path = self.end_point + "whoami"
return self._get_data(path)
def get_permissions(self):
"""A list of all permissions for all users."""
path = self.end_point + "permissions"
return self._get_data(path)
def get_user_permissions_by_vhost(self, username="guest", vhost='/'):
"""An individual permission of a user and virtual host."""
vhost = quote_plus(vhost)
path = self.end_point + "permissions/{0}/{1}".format(vhost, username)
return self._get_data(path)
def get_parameters(self):
"""A list of all parameters."""
path = self.end_point + "parameters"
return self._get_data(path)
def get_parameters_by_component(self, component=None):
"""A list of all parameters for a given component."""
path = self.end_point + "parameters/{0}".format(component)
return self._get_data(path)
def get_parameters_by_component_by_vhost(self, component=None,
vhost='/'):
"""A list of all parameters for a given component and virtual host"""
vhost = quote_plus(vhost)
path = self.end_point + "parameters/{1}/{0}".format(vhost, component)
return self._get_data(path)
def get_parameter_for_vhost_by_component_name(self, component=None,
parameter_name=None, vhost='/'):
"""Get an individual parameter value from a given vhost & component"""
path = self.end_point + "parameters/{1}/{0}/{2}"
path = path.format(quote_plus(vhost), component, parameter_name)
return self._get_data(path)
def get_policies(self):
"""A list of all policies"""
path = self.end_point + "policies"
return self._get_data(path)
def get_policies_by_vhost(self, vhost='/'):
"""A list of all policies in a given virtual host."""
path = self.end_point + "policies/{0}".format(quote_plus(vhost))
return self._get_data(path)
def get_policy_for_vhost_by_name(self, name=None, vhost='/'):
"""Information about an individual policy"""
vhost = quote_plus(vhost)
path = self.end_point + "policies/{0}/{1}".format(vhost, name)
return self._get_data(path)
def create_exchange_on_vhost(self, exchange_name=None,
body={}, vhost='/'):
"""An individual exchange. To PUT an exchange, you will need a body
looking something like this:
{
"type":"direct",
"auto_delete":false,
"durable":true,
"internal":false,
"name": "mytest",
"arguments":[]
}
"""
vhost = quote_plus(vhost)
path = self.end_point + "exchanges/{0}/{1}".format(vhost, exchange_name)
return self._send_data(path, data=body)
def create_queue_on_vhost(self, queue_name=None, body={}, vhost='/'):
"""An individual queue. To PUT a queue, you will need a body looking
something like this:
{
"auto_delete":false,
"durable":true,
"arguments":[],
"node":"rabbit@localnode-1"
}
"""
vhost = quote_plus(vhost)
path = self.end_point + "queues/{0}/{1}".format(vhost, queue_name)
return self._send_data(path, data=body)
def create_vhost(self, vhost):
"""An individual virtual host. As a virtual host only has a name,
you do not need an HTTP body when PUTing one of these."""
path = self.end_point + "vhosts/{0}".format(quote_plus(vhost))
return self._send_data(path)
def create_user(self, username, body={}):
"""An individual user. To PUT a user, you will need a body looking
something like this:
{
"password":"secret",
"tags":"administrator"
}
or:
{
"password_hash":"2lmoth8l4H0DViLaK9Fxi6l9ds8=",
"tags":"administrator"
}
The tags key is mandatory. Either password or password_hash must be
set. Setting password_hash to "" will ensure the user cannot use a
password to log in. tags is a comma-separated list of tags for the
user. Currently recognised tags are "administrator", "monitoring"
and "management".
"""
path = self.end_point + "users/{0}".format(username)
return self._send_data(path, data=body)
def grant_permissions_on_vhost(self, body={}, username=None,
vhost='/'):
"""An individual permission of a user and virtual host. To PUT a
permission, you will need a body looking something like this:
{
"configure":".*",
"write":".*",
"read":".*"
}
All keys are mandatory.
"""
vhost = quote_plus(vhost)
path = self.end_point + "permissions/{0}/{1}".format(vhost, username)
return self._send_data(path, data=body)
def update_parameter(self, component=None, body={}, parameter_name=None,
vhost='/'):
"""An individual parameter. To PUT a parameter, you will need a body
looking something like this:
{
"vhost": "/",
"component":"federation",
"name":"local_username",
"value":"guest"
}
"""
vhost = quote_plus(vhost)
path = "parameters/{1}/{0}/{2}".format(vhost, component, parameter_name)
return self._send_data(path, data=body)
def update_policies(self, policy_name=None, body={}, vhost='/'):
"""An individual policy. To PUT a policy, you will need a body
looking something like this:
{
"pattern":"^amq.",
"definition": {
"federation-upstream-set":"all"
},
"priority":0
}
policies/vhost/name
"""
vhost = quote_plus(vhost)
path = self.end_point + "policies/{0}/{1}".format(vhost, policy_name)
return self._send_data(path, data=body)
def delete_connection(self, name=None, reason=None):
"""Removes a connection by name, with an optional reason"""
path = self.end_point + "connections/" + name
self._send_data(path, request_type='DELETE')
def delete_exchange(self, exchange_name=None, vhost='/'):
"""Delete an exchange from a vhost"""
vhost = quote_plus(vhost)
path = self.end_point + "exchanges/{0}/{1}".format(vhost, exchange_name)
self._send_data(path, request_type='DELETE')
def delete_queue(self, queue_name=None, vhost='/'):
"""Delete a queue from a vhost"""
vhost = quote_plus(vhost)
path = self.end_point + "queues/{0}/{1}".format(vhost, queue_name)
self._send_data(path, request_type='DELETE')
def delete_contents_from_queue(self, queue_name=None, vhost='/'):
"""Delete the contents of a queue. If no vhost name is given the
defult / will be used"""
path = self.end_point + "queues/{0}/{1}/contents"
path = path.format(quote_plus(vhost), queue_name)
self._send_data(path, request_type='DELETE')
#def delete_thing(self):
# """An individual binding between an exchange and a queue. The props
# part of the URI is a "name" for the binding composed of its routing
# key and a hash of its arguments."""
def delete_vhost(self, vhost):
"""Delete a given vhost"""
path = self.end_point + "vhosts/{0}".format(quote_plus(vhost))
self._send_data(path, request_type='DELETE')
def delete_user(self, user=None):
"""Delete a given user"""
path = self.end_point + "users/{0}".format(user)
self._send_data(path, request_type='DELETE')
|
apache-2.0
| -7,473,669,924,650,975,000
| 37.216667
| 80
| 0.590656
| false
| 3.911301
| false
| false
| false
|
clchiou/garage
|
py/g1/http/http2_servers/g1/http/http2_servers/parts.py
|
1
|
1361
|
import g1.networks.servers.parts
from g1.apps import parameters
from g1.apps import utils
from g1.bases import labels
from .. import http2_servers # pylint: disable=relative-beyond-top-level
from . import nghttp2
SERVER_LABEL_NAMES = (
# Input.
'application',
# Private.
('server', g1.networks.servers.parts.SERVER_LABEL_NAMES),
)
def define_server(module_path=None, **kwargs):
module_path = module_path or http2_servers.__name__
module_labels = labels.make_nested_labels(module_path, SERVER_LABEL_NAMES)
setup_server(
module_labels,
parameters.define(module_path, make_server_params(**kwargs)),
)
return module_labels
def setup_server(module_labels, module_params):
g1.networks.servers.parts.setup_server(module_labels.server, module_params)
utils.define_maker(
# Although this is called a server, from the perspective of
# g1.networks.servers.SocketServer, this is a handler.
http2_servers.HttpServer,
{
'server_socket': module_labels.server.socket,
'application': module_labels.application,
'return': module_labels.server.handler,
},
)
def make_server_params(**kwargs):
kwargs.setdefault('protocols', (nghttp2.NGHTTP2_PROTO_VERSION_ID, ))
return g1.networks.servers.parts.make_server_params(**kwargs)
|
mit
| 8,790,086,334,072,161,000
| 30.651163
| 79
| 0.686995
| false
| 3.591029
| false
| false
| false
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.