code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
|---|---|---|---|---|---|
import cv2
import numpy as np
import sys, time, math
data = []
for j in range(1,6):
for i in range(1,12):
img = cv2.imread('calibration/'+str(12*i)+'x'+str(16*j)+'.png',1)
if img==None:
print "cannot open ",filename
else:
#img[:,:,1] = 0
gimg = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
circles = cv2.HoughCircles(gimg,cv2.cv.CV_HOUGH_GRADIENT,1,
minDist = 1000,param1=250,param2=7,minRadius=12,maxRadius=18)
for k in circles[0,:]:
# draw the outer circle
cv2.circle(img,(k[0],k[1]),k[2],(0,255,0),2)
# draw the center of the circle
cv2.circle(img,(k[0],k[1]),2,(0,0,255),3)
circles = np.uint16(np.around(circles))
circles = circles[0][0]
data.append([circles[0], 472 - circles[1], 12*i, 16*j])
cv2.imshow('detected',img)
cv2.waitKey(0)
cv2.imwrite('detected/'+str(12*i)+'x'+str(16*j)+'.png',img)
myFile=open('calibrationData.txt', 'w')
[myFile.write(str(x)+'\n') for x in data]
myFile.close()
cv2.destroyAllWindows()
|
skyleradams/tim-howard
|
Vision/calibrationCodeAndImages/vis.py
|
Python
|
mit
| 976
|
import csv
import itertools
import random
import ast
import sys
#usage
# python parseResults.py results.txt
fname = '../results/nullutterance_results.txt'
file_names = [fname]
itemfile = open("items.txt")
items = [" ".join(l.rstrip().split()) for l in itemfile.readlines()]
itemfile.close()
#print items
lines = []
results = []
wresults = []
files = [open(fn) for fn in file_names]
for f in files:
lines.extend([l.rstrip() for l in f.readlines()])
#print lines
def getReducedAlternatives(alts):
basic = ""
lownum = ""
highnum = ""
extra = ""
twowords = ""
threewords = ""
if "some,all,none" in alts:
basic = "0_basic"
if "one,two,three" in alts:
lownum = "1_lownum"
if "eleven" in alts:
highnum = "3_highnum"
if "many" in alts:
extra = "2_extra"
if "almostall" in alts:
twowords = "4_twowords"
if "lessthanhalf" in alts:
threewords = "5_threewords"
return "".join([basic,lownum,extra,highnum,twowords,threewords])
headers = ["Item","QUD","State","Alternatives","SpeakerOptimality","PosteriorProbability"]
k = 0
mcnt = 0
condcnt = 0
priorcnt = -1
while k < len(lines):
if lines[k] == "alternatives":
if priorcnt < 89:
priorcnt = priorcnt+1
else:
priorcnt = 0
# mcnt = mcnt + 1
k = k + 1
alts = getReducedAlternatives(lines[k])
k = k + 1
# priors = lines[k].split(",")
k = k + 1
qud = lines[k].split(",")[1]
k = k + 1
spopt = lines[k].split(",")[1]
k = k + 1
pairs = lines[k].split(",,")
# print pairs
# print k
ssize = pairs[0].split(",")
prob = pairs[1].split(",")
for j in range(len(ssize)):
# print priorcnt
# print len(items)
results.append([items[priorcnt],qud, ssize[j], alts, spopt, prob[j]])
k = k + 1
elif lines[k].startswith("speaker-opt"):
spopt = lines[k].split(",")[1]
k = k + 1
pairs = lines[k].split(",,")
#print pairs
ssize = pairs[0].split(",")
prob = pairs[1].split(",")
for j in range(len(ssize)):
results.append([items[priorcnt],qud, ssize[j], alts, spopt, prob[j]])
k = k + 1
elif lines[k].startswith("qud"):
qud = lines[k].split(",")[1]
k = k + 1
spopt = lines[k].split(",")[1]
k = k + 1
pairs = lines[k].split(",,")
#print pairs
ssize = pairs[0].split(",")
prob = pairs[1].split(",")
for j in range(len(ssize)):
results.append([items[priorcnt],qud, ssize[j], alts, spopt, prob[j]])
k = k + 1
else:
#print lines[k]
print "this shouldn't be happening"
print priorcnt
print items[priorcnt]
#print results
for r in results:
inner_dict = dict(zip(headers,r))
wresults.append(inner_dict)
oname = '../results/data/parsed_nullutterance_results.tsv'
w = csv.DictWriter(open(oname, 'wb'),fieldnames=headers,restval="NA",delimiter="\t")
w.writeheader()
w.writerows(wresults)
|
thegricean/sinking-marbles
|
models/complex_prior/smoothed_unbinned15/scripts/parseNullUtteranceResults.py
|
Python
|
mit
| 2,789
|
# coding: utf-8
import pytest
from statemachine import StateMachine, State
from statemachine import exceptions
class MyModel(object):
"A class that can be used to hold arbitrary key/value pairs as attributes."
def __init__(self, **kwargs):
for k, v in kwargs.items():
setattr(self, k, v)
def __repr__(self):
return "{}({!r})".format(type(self).__name__, self.__dict__)
def test_machine_repr(campaign_machine):
model = MyModel()
machine = campaign_machine(model)
assert repr(machine) == "CampaignMachine(model=MyModel({'state': 'draft'}), " \
"state_field='state', current_state='draft')"
def test_machine_should_be_at_start_state(campaign_machine):
model = MyModel()
machine = campaign_machine(model)
assert [s.value for s in campaign_machine.states] == ['closed', 'draft', 'producing']
assert [t.identifier for t in campaign_machine.transitions] == ['add_job', 'deliver', 'produce'] # noqa: E501
assert model.state == 'draft'
assert machine.current_state == machine.draft
def test_machine_should_only_allow_only_one_initial_state():
class CampaignMachine(StateMachine):
"A workflow machine"
draft = State('Draft', initial=True)
producing = State('Being produced')
closed = State('Closed', initial=True) # Should raise an Exception when instantiated
add_job = draft.to(draft) | producing.to(producing)
produce = draft.to(producing)
deliver = producing.to(closed)
with pytest.raises(exceptions.InvalidDefinition):
model = MyModel()
CampaignMachine(model)
def test_should_change_state(campaign_machine):
model = MyModel()
machine = campaign_machine(model)
assert model.state == 'draft'
assert machine.current_state == machine.draft
machine.produce()
assert model.state == 'producing'
assert machine.current_state == machine.producing
def test_should_run_a_transition_that_keeps_the_state(campaign_machine):
model = MyModel()
machine = campaign_machine(model)
assert model.state == 'draft'
assert machine.current_state == machine.draft
machine.add_job()
assert model.state == 'draft'
assert machine.current_state == machine.draft
machine.produce()
assert model.state == 'producing'
assert machine.current_state == machine.producing
machine.add_job()
assert model.state == 'producing'
assert machine.current_state == machine.producing
def test_should_change_state_with_multiple_machine_instances(campaign_machine):
model1 = MyModel()
model2 = MyModel()
machine1 = campaign_machine(model1)
machine2 = campaign_machine(model2)
assert machine1.current_state == campaign_machine.draft
assert machine2.current_state == campaign_machine.draft
p1 = machine1.produce
p2 = machine2.produce
p2()
assert machine1.current_state == campaign_machine.draft
assert machine2.current_state == campaign_machine.producing
p1()
assert machine1.current_state == campaign_machine.producing
assert machine2.current_state == campaign_machine.producing
@pytest.mark.parametrize('current_state, transition', [
('draft', 'deliver'),
('closed', 'add_job'),
])
def test_call_to_transition_that_is_not_in_the_current_state_should_raise_exception(
campaign_machine, current_state, transition):
model = MyModel(state=current_state)
machine = campaign_machine(model)
assert machine.current_state.value == current_state
with pytest.raises(exceptions.TransitionNotAllowed):
machine.run(transition)
def test_machine_should_list_allowed_transitions_in_the_current_state(campaign_machine):
model = MyModel()
machine = campaign_machine(model)
assert model.state == 'draft'
assert [t.identifier for t in machine.allowed_transitions] == ['add_job', 'produce']
machine.produce()
assert model.state == 'producing'
assert [t.identifier for t in machine.allowed_transitions] == ['add_job', 'deliver']
deliver = machine.allowed_transitions[1]
deliver()
assert model.state == 'closed'
assert machine.allowed_transitions == []
def test_machine_should_run_a_transition_by_his_key(campaign_machine):
model = MyModel()
machine = campaign_machine(model)
assert model.state == 'draft'
machine.run('add_job')
assert model.state == 'draft'
assert machine.current_state == machine.draft
machine.run('produce')
assert model.state == 'producing'
assert machine.current_state == machine.producing
def test_machine_should_raise_an_exception_if_a_transition_by_his_key_is_not_found(
campaign_machine):
model = MyModel()
machine = campaign_machine(model)
assert model.state == 'draft'
with pytest.raises(exceptions.InvalidTransitionIdentifier):
machine.run('go_horse')
def test_machine_should_use_and_model_attr_other_than_state(campaign_machine):
model = MyModel(status='producing')
machine = campaign_machine(model, state_field='status')
assert getattr(model, 'state', None) is None
assert model.status == 'producing'
assert machine.current_state == machine.producing
machine.deliver()
assert model.status == 'closed'
assert machine.current_state == machine.closed
def test_should_allow_validate_data_for_transition(campaign_machine):
model = MyModel()
machine = campaign_machine(model)
def custom_validator(*args, **kwargs):
if 'weapon' not in kwargs:
raise LookupError('Weapon not found.')
campaign_machine.produce.validators = [custom_validator]
with pytest.raises(LookupError):
machine.produce()
machine.produce(weapon='sword')
assert model.state == 'producing'
def test_should_allow_plug_an_event_on_running_a_transition(campaign_machine):
model = MyModel()
machine = campaign_machine(model)
def double(self, *args, **kwargs):
return kwargs.get('value', 0) * 2
campaign_machine.on_add_job = double
assert machine.add_job() == 0
assert machine.add_job(value=2) == 4
def test_should_check_if_is_in_status(campaign_machine):
model = MyModel()
machine = campaign_machine(model)
assert machine.is_draft
assert not machine.is_producing
assert not machine.is_closed
machine.produce()
assert not machine.is_draft
assert machine.is_producing
assert not machine.is_closed
machine.deliver()
assert not machine.is_draft
assert not machine.is_producing
assert machine.is_closed
def test_defined_value_must_be_assigned_to_models(campaign_machine_with_values):
model = MyModel()
machine = campaign_machine_with_values(model)
assert model.state == 1
machine.produce()
assert model.state == 2
machine.deliver()
assert model.state == 3
def test_state_machine_without_model(campaign_machine):
machine = campaign_machine()
assert machine.is_draft
assert not machine.is_producing
assert not machine.is_closed
machine.produce()
assert not machine.is_draft
assert machine.is_producing
assert not machine.is_closed
@pytest.mark.parametrize('model, machine_name, start_value', [
(None, 'campaign_machine', 'producing'),
(None, 'campaign_machine_with_values', 2),
(MyModel(), 'campaign_machine', 'producing'),
(MyModel(), 'campaign_machine_with_values', 2),
])
def test_state_machine_with_a_start_value(request, model, machine_name, start_value):
machine_cls = request.getfixturevalue(machine_name)
machine = machine_cls(model, start_value=start_value)
assert not machine.is_draft
assert machine.is_producing
assert not model or model.state == start_value
@pytest.mark.parametrize('model, machine_name, start_value', [
(None, 'campaign_machine', 'tapioca'),
(None, 'campaign_machine_with_values', 99),
(MyModel(), 'campaign_machine', 'tapioca'),
(MyModel(), 'campaign_machine_with_values', 99),
])
def test_state_machine_with_a_invalid_start_value(request, model, machine_name, start_value):
machine_cls = request.getfixturevalue(machine_name)
with pytest.raises(exceptions.InvalidStateValue):
machine_cls(model, start_value=start_value)
def test_should_not_create_instance_of_machine_without_states():
class EmptyMachine(StateMachine):
"An empty machine"
pass
with pytest.raises(exceptions.InvalidDefinition):
EmptyMachine()
def test_should_not_create_instance_of_machine_without_transitions():
class NoTransitionsMachine(StateMachine):
"A machine without transitions"
initial = State('initial')
with pytest.raises(exceptions.InvalidDefinition):
NoTransitionsMachine()
def test_perfectly_fine_machine_should_be_connected(traffic_light_machine):
model = MyModel()
machine = traffic_light_machine(model)
initial_state = [s for s in traffic_light_machine.states if s.initial][0]
disconnected_states = machine._disconnected_states(initial_state)
assert len(disconnected_states) == 0
def test_should_not_create_disconnected_machine():
class BrokenTrafficLightMachine(StateMachine):
"A broken traffic light machine"
green = State('Green', initial=True)
yellow = State('Yellow')
blue = State('Blue') # This state is unreachable
cycle = green.to(yellow) | yellow.to(green)
with pytest.raises(exceptions.InvalidDefinition) as e:
BrokenTrafficLightMachine()
assert 'Blue' in e.message
assert 'Green' not in e.message
def test_should_not_create_big_disconnected_machine():
class BrokenTrafficLightMachine(StateMachine):
"A broken traffic light machine"
green = State('Green', initial=True)
yellow = State('Yellow')
magenta = State('Magenta') # This state is unreachable
red = State('Red')
cyan = State('Cyan')
blue = State('Blue') # This state is also unreachable
cycle = green.to(yellow)
diverge = green.to(cyan) | cyan.to(red)
validate = yellow.to(green)
with pytest.raises(exceptions.InvalidDefinition) as e:
BrokenTrafficLightMachine()
assert 'Magenta' in e.message
assert 'Blue' in e.message
assert 'Cyan' not in e.message
def test_state_value_is_correct():
STATE_NEW = 0
STATE_DRAFT = 1
class ValueTestModel(StateMachine):
new = State(STATE_NEW, value=STATE_NEW, initial=True)
draft = State(STATE_DRAFT, value=STATE_DRAFT)
write = new.to(draft)
model = ValueTestModel()
assert model.new.value == STATE_NEW
assert model.draft.value == STATE_DRAFT
|
fgmacedo/python-statemachine
|
tests/test_statemachine.py
|
Python
|
mit
| 10,767
|
# -*- coding: utf-8 -*-
#
# Objective-Revision-Evaluation-Service documentation build configuration file, created by
# sphinx-quickstart on Mon Mar 23 09:13:08 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import shlex
import sys
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.viewcode',
'sphinx.ext.autodoc',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Objective-Revision-Evaluation-Service'
copyright = u'2015, halfak, he7d3r, ladsgroup'
author = u'halfak, he7d3r, ladsgroup'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1.0'
# The full version, including alpha/beta/rc tags.
release = '0.1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'Objective-Revision-Evaluation-Servicedoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'Objective-Revision-Evaluation-Service.tex', u'Objective-Revision-Evaluation-Service Documentation',
u'halfak, he7d3r, ladsgroup', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'objective-revision-evaluation-service', u'Objective-Revision-Evaluation-Service Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'Objective-Revision-Evaluation-Service', u'Objective-Revision-Evaluation-Service Documentation',
author, 'Objective-Revision-Evaluation-Service', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
|
he7d3r/ores
|
docs/conf.py
|
Python
|
mit
| 9,556
|
from django.shortcuts import render
from django.contrib.auth.models import User
from rest_framework.generics import ListCreateAPIView, RetrieveUpdateDestroyAPIView,ListAPIView
from rest_framework.permissions import IsAuthenticated
from api.serializers import UserSerializer, ArticleSerializer,ArticleDetailSerializer
from news.models import Article
# Create your views here.
class UserList(ListCreateAPIView):
queryset = User.objects.all()
serializer_class = UserSerializer
class BrandDetail(RetrieveUpdateDestroyAPIView):
queryset = User.objects.all()
serializer_class = UserSerializer
class NewsList(ListCreateAPIView):
queryset = Article.objects.all()
serializer_class = ArticleSerializer
class ArticleDetail(ListAPIView):
queryset = Article.objects.all()
serializer_class = ArticleDetailSerializer
|
munisisazade/developer_portal
|
api/views.py
|
Python
|
mit
| 839
|
import json
from django.views import generic
from django.utils.decorators import method_decorator
from django.http import HttpResponse
from ..models import User, Join, Event, Tag
from .decorators import login_decorator
from .utils import get_user_notifications
@method_decorator(login_decorator, name='get')
class ProfileView(generic.DetailView):
template_name = 'core/pages/profile.html'
model = User
def get_context_data(self, **kwargs):
user = User.objects.filter(pk=self.kwargs['pk'])[0]
joined_events_id = list(Join.objects.filter(user=user).values_list('event', flat=True))
joined_events = list(Event.objects.filter(id__in=joined_events_id).exclude(event_owner=user))
owned_events = list(Event.objects.filter(event_owner=user))
tags = Tag.objects.order_by('name')
interests = user.interest_tags.all()
context = {'user': user,
'joined_events': joined_events,
'owned_events': owned_events,
'tags': tags,
'interests': interests,
'notifications': get_user_notifications(self.request.user),
'same_user': user == self.request.user}
return context
def post(self, request, *args, **kwargs):
data = {}
user = User.objects.filter(pk=self.kwargs['pk'])[0]
if user != request.user:
data['result'] = False
else:
tag_ids = map(int, request.POST.getlist('selectedTags[]'))
tags = Tag.objects.filter(pk__in=tag_ids)
user.interest_tags = tags
data['result'] = True
return HttpResponse(json.dumps(data))
|
LorenzSelv/pinned
|
core/views/profile_view.py
|
Python
|
mit
| 1,744
|
#!/usr/bin/env python3
# Author: Severin Kaderli <severin.kaderli@gmail.com>
#
# Project Euler - Problem 12:
# What is the value of the first triangle number to have over
# five hundred divisors?
import math
def get_triangle_number_with_n_factors(n):
"""Return the value of the first triangle number with more than n factors."""
for i in range(1, 1000000):
triangle_number = get_triangle_number(i)
number_of_factors = get_number_of_factors(triangle_number)
if number_of_factors >= n:
return triangle_number
def get_triangle_number(n):
return ((n + 1) * n) // 2
def get_number_of_factors(n):
number_of_factors = 0
for i in range(1, int(math.sqrt(n)), 1):
if n % i == 0:
number_of_factors += 2
return number_of_factors
if __name__ == "__main__":
print(get_triangle_number_with_n_factors(500))
|
severinkaderli/Project-Euler-Python
|
012.py
|
Python
|
mit
| 818
|
"""Some standard gradient-based stochastic optimizers.
These are just standard routines that don't make any use of autograd,
though you could take gradients of these functions too if you want
to do meta-optimization."""
from __future__ import absolute_import
import autograd.numpy as np
from builtins import range
def sgd(grad, x, callback=None, num_iters=200, step_size=0.1, mass=0.9):
"""Stochastic gradient descent with momentum.
grad() must have signature grad(x, i), where i is the iteration number."""
velocity = np.zeros(len(x))
for i in range(num_iters):
g = grad(x, i)
if callback: callback(x, i, g)
velocity = mass * velocity - (1.0 - mass) * g
x += step_size * velocity
return x
def rmsprop(grad, x, callback=None, num_iters=100, step_size=0.1, gamma=0.9, eps = 10**-8):
"""Root mean squared prop: See Adagrad paper for details."""
avg_sq_grad = np.ones(len(x))
for i in range(num_iters):
g = grad(x, i)
if callback: callback(x, i, g)
avg_sq_grad = avg_sq_grad * gamma + g**2 * (1 - gamma)
x -= step_size * g/(np.sqrt(avg_sq_grad) + eps)
return x
def adam(grad, x, callback=None, num_iters=100,
step_size=0.001, b1=0.9, b2=0.999, eps=10**-8):
"""Adam as described in http://arxiv.org/pdf/1412.6980.pdf.
It's basically RMSprop with momentum and some correction terms."""
m = np.zeros(len(x))
v = np.zeros(len(x))
for i in range(num_iters):
g = grad(x, i)
if callback: callback(x, i, g)
m = (1 - b1) * g + b1 * m # First moment estimate.
v = (1 - b2) * (g**2) + b2 * v # Second moment estimate.
mhat = m / (1 - b1**(i + 1)) # Bias correction.
vhat = v / (1 - b2**(i + 1))
x -= step_size*mhat/(np.sqrt(vhat) + eps)
return x
|
barak/autograd
|
examples/optimizers.py
|
Python
|
mit
| 1,847
|
import numpy as np
import matplotlib.pyplot as plt
import sys
import pickle
import os
from pprint import pprint
from scipy.optimize import curve_fit
import math
from datetime import datetime
from matplotlib import dates
os.chdir('C:/Users/Sarah Hanna/Documents/Data/WHI long term record/coatings/')
file = open('MMD_and_coating_data.binpickl', 'r')
data = pickle.load(file)
file.close()
def lognorm(x_vals, A, w, xc):
return A/(np.sqrt(2*math.pi)*w*x_vals)*np.exp(-(np.log(x_vals/xc))**2/(2*w**2))
i=70
bins = []
while i <= 220:
i+=0.1
bins.append(i)
new_data = []
for row in data:
datetime = row[0]
cluster_no = row[1]
DpDc = row[2]
mass_distr = row[3]
mass_bins = np.array([row[0] for row in mass_distr])
masses = [row[1] for row in mass_distr]
temp = []
for mass in masses:
norm_mass = mass/np.max(masses)
temp.append(norm_mass)
masses_max1 = np.array(temp)
A=np.nan
w=np.nan
xc=np.nan
try:
popt, pcov = curve_fit(lognorm, mass_bins, masses_max1)
A=popt[0]
w=popt[1]
xc=popt[2]
fit_y_vals = []
for bin in bins:
fit_val = lognorm(bin, popt[0], popt[1], popt[2])
fit_y_vals.append(fit_val)
MMD = bins[np.argmax(fit_y_vals)]
except:
MMD = np.nan
#print MMD, xc, math.exp(math.log(xc)-(w**2)/2)
##plotting
#fig = plt.figure()
#
#ax1 = fig.add_subplot(111)
#ax1.scatter(mass_bins, masses_max1)
#ax1.plot(bins, fit_y_vals)
#plt.xlabel('VED (nm)')
#plt.ylabel('dM/dlog(VED)')
#ax1.set_xscale('log')
#
#plt.show()
new_data.append([datetime, cluster_no, DpDc, MMD])
GBPS = []
Cont = []
SPac = []
NPac = []
LRT = []
for line in new_data:
datetime = line[0]
cluster_no = line[1]
DpDc= line[2]
MMD = line[3]
if MMD >= 220 or MMD <=75:
MMD = np.nan
newline = [datetime, cluster_no, DpDc, MMD]
if cluster_no == 9:
GBPS.append(newline)
if cluster_no == 4:
Cont.append(newline)
if cluster_no in [6,8]:
SPac.append(newline)
if cluster_no in [2,7]:
LRT.append(newline)
if cluster_no in [1,3,5,10]:
NPac.append(newline)
GBPS_datetimes = [dates.date2num(row[0]) for row in GBPS]
GBPS_DpDc = [row[2] for row in GBPS]
GBPS_MMD = [row[3] for row in GBPS]
Cont_datetimes = [dates.date2num(row[0]) for row in Cont]
Cont_DpDc = [row[2] for row in Cont]
Cont_MMD = [row[3] for row in Cont]
NPac_datetimes = [dates.date2num(row[0]) for row in NPac]
NPac_DpDc = [row[2] for row in NPac]
NPac_MMD = [row[3] for row in NPac]
SPac_datetimes = [dates.date2num(row[0]) for row in SPac]
SPac_DpDc = [row[2] for row in SPac]
SPac_MMD = [row[3] for row in SPac]
LRT_datetimes = [dates.date2num(row[0]) for row in LRT]
LRT_DpDc = [row[2] for row in LRT]
LRT_MMD = [row[3] for row in LRT]
fire_span1_10s=datetime.strptime('2010/07/26 09:00', '%Y/%m/%d %H:%M') #jason's BC clear report
fire_span1_10f=datetime.strptime('2010/07/28 09:30', '%Y/%m/%d %H:%M')
fire_alpha = 0.25
fire_color = '#990000'
fig = plt.figure(figsize=(12,4))
hfmt = dates.DateFormatter('%b')
#hfmt = dates.DateFormatter('%m-%d')
display_month_interval = 1
startdate_2010 = '2010/05/31'
enddate_2010 = '2010/08/04'
startdate_2012 = '2012/03/29'
enddate_2012 = '2012/06/05'
colors = ['r','g','b','k','c','m','grey', 'orange', 'yellow']
ax7 = plt.subplot2grid((4,2), (0,0), colspan=1,rowspan = 2)
ax8 = plt.subplot2grid((4,2), (0,1), colspan=1,rowspan = 2, sharey=ax7)
ax9 = plt.subplot2grid((4,2), (2,0), colspan=1,rowspan = 2)
ax10 = plt.subplot2grid((4,2), (2,1), colspan=1,rowspan = 2, sharey=ax9)
ax7.scatter(GBPS_datetimes,GBPS_DpDc, marker = 'o', color = 'r', label='GBPS')
ax7.scatter(SPac_datetimes,SPac_DpDc, marker = 'o', color = 'g', label='SPac')
ax7.scatter(NPac_datetimes,NPac_DpDc, marker = 'o', color = 'c', label='NPac')
ax7.scatter(Cont_datetimes,Cont_DpDc, marker = 'o', color = 'm', label='NCan')
ax7.scatter(LRT_datetimes,LRT_DpDc, marker = 'o', color = 'b', label='WPac/Asia')
ax7.xaxis.set_major_locator(dates.MonthLocator(interval = display_month_interval))
ax7.xaxis.set_major_formatter(hfmt)
ax7.xaxis.set_visible(False)
ax7.yaxis.set_visible(True)
ax7.set_ylabel('Dp/Dc')
ax7.set_xlim(dates.date2num(datetime.strptime(startdate_2010, '%Y/%m/%d')), dates.date2num(datetime.strptime(enddate_2010, '%Y/%m/%d')))
ax7.axvspan(dates.date2num(fire_span1_10s),dates.date2num(fire_span1_10f), facecolor=fire_color, alpha=fire_alpha)
ax7.text(0.1, 0.8,'2010', transform=ax7.transAxes)
ax7.set_ylim(0.8,3.5)
ax9.scatter(GBPS_datetimes,GBPS_MMD, marker = '*', color = 'r', label='GBPS')
ax9.scatter(SPac_datetimes,SPac_MMD, marker = '*', color = 'g', label='SPac')
ax9.scatter(NPac_datetimes,NPac_MMD, marker = '*', color = 'c', label='NPac')
ax9.scatter(Cont_datetimes,Cont_MMD, marker = '*', color = 'm', label='NCan')
ax9.scatter(LRT_datetimes, LRT_MMD, marker = '*', color = 'b', label='WPac/Asia')
ax9.xaxis.set_major_locator(dates.MonthLocator(interval = display_month_interval))
ax9.set_xlim(dates.date2num(datetime.strptime(startdate_2010, '%Y/%m/%d')), dates.date2num(datetime.strptime(enddate_2010, '%Y/%m/%d')))
ax9.axvspan(dates.date2num(fire_span1_10s),dates.date2num(fire_span1_10f), facecolor=fire_color, alpha=fire_alpha)
ax9.xaxis.set_major_formatter(hfmt)
ax9.set_ylabel('MMD')
ax9.set_ylim(70,255)
ax8.scatter(GBPS_datetimes,GBPS_DpDc, marker = 'o', color = 'r', label='Georgia Basin/Puget Sound')
ax8.scatter(SPac_datetimes,SPac_DpDc, marker = 'o', color = 'g', label='S. Pacific')
ax8.scatter(NPac_datetimes,NPac_DpDc, marker = 'o', color = 'c', label='N. Pacific')
ax8.scatter(Cont_datetimes,Cont_DpDc, marker = 'o', color = 'm', label='N. Canada')
ax8.scatter(LRT_datetimes,LRT_DpDc, marker = 'o', color = 'b', label='W. Pacific/Asia')
ax8.xaxis.set_major_formatter(hfmt)
ax8.xaxis.set_major_locator(dates.MonthLocator(interval = display_month_interval))
ax8.xaxis.set_visible(False)
ax8.yaxis.set_visible(True)
ax8.yaxis.tick_right()
ax8.set_xlabel('month')
ax8.set_xlim(dates.date2num(datetime.strptime(startdate_2012, '%Y/%m/%d')), dates.date2num(datetime.strptime(enddate_2012, '%Y/%m/%d')))
ax8.text(0.1, 0.8,'2012', transform=ax8.transAxes)
ax8.set_ylim(0.8,3.5)
ax10.scatter(GBPS_datetimes,GBPS_MMD, marker = '*', color = 'r', label='GBPS')
ax10.scatter(SPac_datetimes,SPac_MMD, marker = '*', color = 'g', label='SPac')
ax10.scatter(NPac_datetimes,NPac_MMD, marker = '*', color = 'c', label='NPac')
ax10.scatter(Cont_datetimes,Cont_MMD, marker = '*', color = 'm', label='NCan')
ax10.scatter(LRT_datetimes, LRT_MMD, marker = '*', color = 'b', label='WPac/Asia')
ax10.xaxis.set_major_locator(dates.MonthLocator(interval = display_month_interval))
ax10.xaxis.set_major_formatter(hfmt)
ax10.yaxis.tick_right()
ax10.set_xlim(dates.date2num(datetime.strptime(startdate_2012, '%Y/%m/%d')), dates.date2num(datetime.strptime(enddate_2012, '%Y/%m/%d')))
ax10.set_ylim(70,255)
legend = ax8.legend(loc='upper right', bbox_to_anchor=(0.85, 1.6), ncol=3, numpoints=1)
plt.subplots_adjust(hspace=0.0)
plt.subplots_adjust(wspace=0.0)
os.chdir('C:/Users/Sarah Hanna/Documents/Data/WHI long term record/coatings/')
plt.savefig('timeseries-DpDc and MMD.png', bbox_extra_artists=(legend,), bbox_inches='tight')
plt.show()
|
annahs/atmos_research
|
WHI_long_term_plot_MMD_and_DpDc.py
|
Python
|
mit
| 7,204
|
#!/usr/bin/python -i
from circuits import Event, Component, Manager
class foo(Event):
"""foo Event"""
channels = ("a",)
class bar(Event):
"""bar Event"""
class A(Component):
channel = "a"
def foo(self):
return "Foo"
class B(Component):
channel = "b"
def foo(self):
return "Hello World!"
class C(Component):
channel = "c"
def foo(self):
return self.fire(bar())
def bar(self):
return "Bar"
def test():
m = Manager() + A() + B() + C()
while m:
m.flush()
# Rely on Event.channels
x = m.fire(foo())
m.flush()
assert x.value == "Foo"
# Explicitly specify the channel
x = m.fire(foo(), "b")
m.flush()
assert x.value == "Hello World!"
# Explicitly specify a set of channels
x = m.fire(foo(), "a", "b")
m.flush()
assert x.value == ["Foo", "Hello World!"]
# Rely on self.channel
x = m.fire(foo(), "c")
m.flush()
m.flush()
assert x.value == "Bar"
|
treemo/circuits
|
tests/core/test_channel_selection.py
|
Python
|
mit
| 1,020
|
import pytest
@pytest.fixture
def anchorless_yaml():
yield """
e:
f: f
h:
- j
- k
"""
@pytest.fixture
def anchored_yaml():
yield """
a: &anchor
b: c
d: 11
e:
<<: *anchor
f: f
h:
- j
- k
"""
|
Cognexa/cxflow
|
cxflow/tests/conftest.py
|
Python
|
mit
| 401
|
#!/usr/bin/env python3
import time
import sys
while True:
print("This is output!")
time.sleep(1)
|
Thezomg/gsc
|
test.py
|
Python
|
mit
| 107
|
# -*- coding: utf-8 -*-
"""
From this script, experiments for ImageNet pictures can be started.
See "configuration" below for the different possible settings.
The results are saved automatically to the folder ./results
It is recommended to run caffe in gpu mode when overlapping is set
to True, otherwise the calculation will take a very long time.
@author: Luisa M Zintgraf
"""
# the following is needed to avoid some error that can be thrown when
# using matplotlib.pyplot in a linux shell
import matplotlib
matplotlib.use('Agg')
# standard imports
import numpy as np
import time
import os
# most important script - relevance estimator
from prediction_difference_analysis import PredDiffAnalyser
# utilities
import utils_classifiers as utlC
import utils_data as utlD
import utils_sampling as utlS
import utils_visualise as utlV
import sensitivity_analysis_caffe as SA
# ------------------------ CONFIGURATION ------------------------
# -------------------> CHANGE SETTINGS HERE <--------------------
# pick neural network to run experiment for (alexnet, googlenet, vgg)
netname = 'googlenet'
# pick for which layers the explanations should be computet
# (names depend on network, output layer is usually called 'prob')
blobnames = ['prob']
#blobnames = ['conv1','conv2','conv3','conv4','conv5','fc6','fc7','fc8','prob'] # alexnet
#blobnames = ['conv1/7x7_s2', 'conv2/3x3_reduce', 'conv2/3x3', 'conv2/norm2', 'inception_3a/output', 'inception_3b/output', 'inception_4a/output', 'inception_4b/output', 'inception_4c/output', 'inception_4d/output', 'inception_4e/output','inception_5a/output', 'inception_5b/output', 'loss3/classifier', 'prob']
#blobnames = ['conv1_1', 'conv1_2', 'conv2_1', 'conv2_2', 'conv3_1', 'conv3_2', 'conv3_3', 'conv4_1', 'conv4_2', 'conv4_3', 'conv5_1', 'conv5_2', 'conv5_3', 'fc6', 'fc7', 'fc8', 'prob'] # vgg
# is caffe running in gpu mode?
gpu = True
# pick image indices which are analysed (in alphabetical order as in the ./data folder) [0,1,2,...]
# (if None, all images in './data' will be analysed)
test_indices = None
# window size (i.e., the size of the pixel patch that is marginalised out in each step)
win_size = 10 # k in alg 1 (see paper)
# indicate whether windows should be overlapping or not
overlapping = True
# settings for sampling
sampl_style = 'conditional' # choose: conditional / marginal
num_samples = 10
padding_size = 2 # important for conditional sampling,
# l = win_size+2*padding_size in alg 1
# (see paper)
# set the batch size - the larger, the faster computation will be
# (if caffe crashes with memory error, reduce the batch size)
batch_size = 128
# ------------------------ SET-UP ------------------------
utlC.set_caffe_mode(gpu=gpu)
net = utlC.get_caffenet(netname)
# get the data
X_test, X_test_im, X_filenames = utlD.get_imagenet_data(net=net)
# get the label names of the 1000 ImageNet classes
classnames = utlD.get_imagenet_classnames()
if not test_indices:
test_indices = [i for i in range(X_test.shape[0])]
# make folder for saving the results if it doesn't exist
path_results = './results/'
if not os.path.exists(path_results):
os.makedirs(path_results)
# ------------------------ EXPERIMENTS ------------------------
# change the batch size of the network to the given value
net.blobs['data'].reshape(batch_size, X_test.shape[1], X_test.shape[2], X_test.shape[3])
# target function (mapping input features to output probabilities)
target_func = lambda x: utlC.forward_pass(net, x, blobnames)
# for the given test indices, do the prediction difference analysis
for test_idx in test_indices:
# get the specific image (preprocessed, can be used as input to the target function)
x_test = X_test[test_idx]
# get the image for plotting (not preprocessed)
x_test_im = X_test_im[test_idx]
# prediction of the network
y_pred = np.argmax(utlC.forward_pass(net, x_test, ['prob']))
y_pred_label = classnames[y_pred]
# get the path for saving the results
if sampl_style == 'conditional':
save_path = path_results+'{}_{}_winSize{}_condSampl_numSampl{}_paddSize{}_{}'.format(X_filenames[test_idx],y_pred_label,win_size,num_samples,padding_size,netname)
elif sampl_style == 'marginal':
save_path = path_results+'{}_{}_winSize{}_margSampl_numSampl{}_{}'.format(X_filenames[test_idx],y_pred_label,win_size,num_samples,netname)
if os.path.exists(save_path+'.npz'):
print 'Results for ', X_filenames[test_idx], ' exist, will move to the next image. '
continue
print "doing test...", "file :", X_filenames[test_idx], ", net:", netname, ", win_size:", win_size, ", sampling: ", sampl_style
# compute the sensitivity map
layer_name = net.blobs.keys()[-2] # look at penultimate layer (like in Simonyan et al. (2013))
sensMap = SA.get_sens_map(net, x_test[np.newaxis], layer_name, np.argmax(target_func(x_test)[-1][0]))
start_time = time.time()
if sampl_style == 'conditional':
sampler = utlS.cond_sampler_imagenet(win_size=win_size, padding_size=padding_size, image_dims=net.crop_dims, netname=netname)
elif sampl_style == 'marginal':
sampler = utlS.marg_sampler_imagenet(X_test, net)
pda = PredDiffAnalyser(x_test, target_func, sampler, num_samples=num_samples, batch_size=batch_size)
pred_diff = pda.get_rel_vect(win_size=win_size, overlap=overlapping)
# plot and save the results
utlV.plot_results(x_test, x_test_im, sensMap, pred_diff[0], target_func, classnames, test_idx, save_path)
np.savez(save_path, *pred_diff)
print "--- Total computation took {:.4f} minutes ---".format((time.time() - start_time)/60)
|
lmzintgraf/DeepVis-PredDiff
|
experiments_imagenet.py
|
Python
|
mit
| 5,870
|
from rest_framework.serializers import ModelSerializer
from gcm.utils import get_device_model
Device = get_device_model()
class DeviceSerializer(ModelSerializer):
class Meta:
model = Device
exclude = ('id', 'creation_date', 'modified_date', 'is_active')
|
hugobrilhante/django-gcm-android-ios
|
gcm/serializers.py
|
Python
|
mit
| 278
|
#!/usr/local/bin/python
import re
import os
refsbib = open('refs.bib', 'r').read()
p = re.compile('@.+{(.*),')
g = p.findall(refsbib)
for f in g:
name = f + '.pdf'
if os.path.isfile(os.path.join('./live/files/', name)):
print("[OK] %s ready" % f)
else:
print("[--] %s not found" % f)
|
lukeolson/lukeo.cs.illinois.edu
|
check-files.py
|
Python
|
mit
| 315
|
"""Implementations of authorization abstract base class query_inspectors."""
# pylint: disable=invalid-name
# Method names comply with OSID specification.
# pylint: disable=no-init
# Abstract classes do not define __init__.
# pylint: disable=too-few-public-methods
# Some interfaces are specified as 'markers' and include no methods.
# pylint: disable=too-many-public-methods
# Number of methods are defined in specification
# pylint: disable=too-many-ancestors
# Inheritance defined in specification
# pylint: disable=too-many-arguments
# Argument signature defined in specification.
# pylint: disable=duplicate-code
# All apparent duplicates have been inspected. They aren't.
import abc
class AuthorizationQueryInspector:
"""The query inspector for examining authorization queries."""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def get_explicit_authorizations_terms(self):
"""Gets the explicit authorization query terms.
:return: the query terms
:rtype: ``osid.search.terms.BooleanTerm``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.search.terms.BooleanTerm
explicit_authorizations_terms = property(fget=get_explicit_authorizations_terms)
@abc.abstractmethod
def get_related_authorization_id_terms(self):
"""Gets the related authorization ``Id`` query terms.
:return: the query terms
:rtype: ``osid.search.terms.IdTerm``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.search.terms.IdTerm
related_authorization_id_terms = property(fget=get_related_authorization_id_terms)
@abc.abstractmethod
def get_related_authorization_terms(self):
"""Gets the related authorization query terms.
:return: the query terms
:rtype: ``osid.authorization.AuthorizationQueryInspector``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.authorization.AuthorizationQueryInspector
related_authorization_terms = property(fget=get_related_authorization_terms)
@abc.abstractmethod
def get_resource_id_terms(self):
"""Gets the resource ``Id`` query terms.
:return: the query terms
:rtype: ``osid.search.terms.IdTerm``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.search.terms.IdTerm
resource_id_terms = property(fget=get_resource_id_terms)
@abc.abstractmethod
def get_resource_terms(self):
"""Gets the resource query terms.
:return: the query terms
:rtype: ``osid.resource.ResourceQueryInspector``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.resource.ResourceQueryInspector
resource_terms = property(fget=get_resource_terms)
@abc.abstractmethod
def get_trust_id_terms(self):
"""Gets the trust ``Id`` query terms.
:return: the query terms
:rtype: ``osid.search.terms.IdTerm``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.search.terms.IdTerm
trust_id_terms = property(fget=get_trust_id_terms)
@abc.abstractmethod
def get_agent_id_terms(self):
"""Gets the agent ``Id`` query terms.
:return: the query terms
:rtype: ``osid.search.terms.IdTerm``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.search.terms.IdTerm
agent_id_terms = property(fget=get_agent_id_terms)
@abc.abstractmethod
def get_agent_terms(self):
"""Gets the agent query terms.
:return: the query terms
:rtype: ``osid.authentication.AgentQueryInspector``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.authentication.AgentQueryInspector
agent_terms = property(fget=get_agent_terms)
@abc.abstractmethod
def get_function_id_terms(self):
"""Gets the function ``Id`` query terms.
:return: the query terms
:rtype: ``osid.search.terms.IdTerm``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.search.terms.IdTerm
function_id_terms = property(fget=get_function_id_terms)
@abc.abstractmethod
def get_function_terms(self):
"""Gets the function query terms.
:return: the query terms
:rtype: ``osid.authorization.FunctionQueryInspector``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.authorization.FunctionQueryInspector
function_terms = property(fget=get_function_terms)
@abc.abstractmethod
def get_qualifier_id_terms(self):
"""Gets the qualifier ``Id`` query terms.
:return: the query terms
:rtype: ``osid.search.terms.IdTerm``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.search.terms.IdTerm
qualifier_id_terms = property(fget=get_qualifier_id_terms)
@abc.abstractmethod
def get_qualifier_terms(self):
"""Gets the qualifier query terms.
:return: the query terms
:rtype: ``osid.authorization.QualifierQueryInspector``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.authorization.QualifierQueryInspector
qualifier_terms = property(fget=get_qualifier_terms)
@abc.abstractmethod
def get_vault_id_terms(self):
"""Gets the vault ``Id`` query terms.
:return: the query terms
:rtype: ``osid.search.terms.IdTerm``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.search.terms.IdTerm
vault_id_terms = property(fget=get_vault_id_terms)
@abc.abstractmethod
def get_vault_terms(self):
"""Gets the vault query terms.
:return: the query terms
:rtype: ``osid.authorization.VaultQueryInspector``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.authorization.VaultQueryInspector
vault_terms = property(fget=get_vault_terms)
@abc.abstractmethod
def get_authorization_query_inspector_record(self, authorization_record_type):
"""Gets the authorization query inspector record corresponding to the given ``Authorization`` record ``Type``.
:param authorization_record_type: an authorization record type
:type authorization_record_type: ``osid.type.Type``
:return: the authorization query inspector record
:rtype: ``osid.authorization.records.AuthorizationQueryInspectorRecord``
:raise: ``NullArgument`` -- ``authorization_record_type`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unsupported`` -- ``has_record_type(authorization_record_type)`` is ``false``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.authorization.records.AuthorizationQueryInspectorRecord
class FunctionQueryInspector:
"""This is the query inspector for examining function queries."""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def get_qualifier_hierarchy_id_terms(self):
"""Gets the qualifier hierarchy ``Id`` query terms.
:return: the query terms
:rtype: ``osid.search.terms.IdTerm``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.search.terms.IdTerm
qualifier_hierarchy_id_terms = property(fget=get_qualifier_hierarchy_id_terms)
@abc.abstractmethod
def get_qualifier_hierarchy_terms(self):
"""Gets the qualifier hierarchy query terms.
:return: the query terms
:rtype: ``osid.hierarchy.HierarchyQueryInspector``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.hierarchy.HierarchyQueryInspector
qualifier_hierarchy_terms = property(fget=get_qualifier_hierarchy_terms)
@abc.abstractmethod
def get_authorization_id_terms(self):
"""Gets the authorization ``Id`` query terms.
:return: the query terms
:rtype: ``osid.search.terms.IdTerm``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.search.terms.IdTerm
authorization_id_terms = property(fget=get_authorization_id_terms)
@abc.abstractmethod
def get_authorization_terms(self):
"""Gets the authorization query terms.
:return: the query terms
:rtype: ``osid.authorization.AuthorizationQueryInspector``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.authorization.AuthorizationQueryInspector
authorization_terms = property(fget=get_authorization_terms)
@abc.abstractmethod
def get_vault_id_terms(self):
"""Gets the vault ``Id`` query terms.
:return: the query terms
:rtype: ``osid.search.terms.IdTerm``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.search.terms.IdTerm
vault_id_terms = property(fget=get_vault_id_terms)
@abc.abstractmethod
def get_vault_terms(self):
"""Gets the vault query terms.
:return: the query terms
:rtype: ``osid.authorization.VaultQueryInspector``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.authorization.VaultQueryInspector
vault_terms = property(fget=get_vault_terms)
@abc.abstractmethod
def get_function_query_inspector_record(self, function_record_type):
"""Gets the function query inspector record corresponding to the given ``Function`` record ``Type``.
:param function_record_type: a function record type
:type function_record_type: ``osid.type.Type``
:return: the function query inspector record
:rtype: ``osid.authorization.records.FunctionQueryInspectorRecord``
:raise: ``NullArgument`` -- ``function_record_type`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unsupported`` -- ``has_record_type(function_record_type)`` is ``false``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.authorization.records.FunctionQueryInspectorRecord
class QualifierQueryInspector:
"""This is the query inspector for examining qualifiers queries."""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def get_qualifier_hierarchy_id_terms(self):
"""Gets the qualifier hierarchy ``Id`` query terms.
:return: the query terms
:rtype: ``osid.search.terms.IdTerm``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.search.terms.IdTerm
qualifier_hierarchy_id_terms = property(fget=get_qualifier_hierarchy_id_terms)
@abc.abstractmethod
def get_qualifier_hierarchy_terms(self):
"""Gets the qualifier hierarchy query terms.
:return: the query terms
:rtype: ``osid.hierarchy.HierarchyQueryInspector``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.hierarchy.HierarchyQueryInspector
qualifier_hierarchy_terms = property(fget=get_qualifier_hierarchy_terms)
@abc.abstractmethod
def get_authorization_id_terms(self):
"""Gets the authorization ``Id`` query terms.
:return: the query terms
:rtype: ``osid.search.terms.IdTerm``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.search.terms.IdTerm
authorization_id_terms = property(fget=get_authorization_id_terms)
@abc.abstractmethod
def get_authorization_terms(self):
"""Gets the authorization query terms.
:return: the query terms
:rtype: ``osid.authorization.AuthorizationQueryInspector``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.authorization.AuthorizationQueryInspector
authorization_terms = property(fget=get_authorization_terms)
@abc.abstractmethod
def get_ancestor_qualifier_id_terms(self):
"""Gets the ancestor qualifier ``Id`` query terms.
:return: the query terms
:rtype: ``osid.search.terms.IdTerm``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.search.terms.IdTerm
ancestor_qualifier_id_terms = property(fget=get_ancestor_qualifier_id_terms)
@abc.abstractmethod
def get_ancestor_qualifier_terms(self):
"""Gets the ancestor qualifier query terms.
:return: the query terms
:rtype: ``osid.authorization.FunctionQueryInspector``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.authorization.FunctionQueryInspector
ancestor_qualifier_terms = property(fget=get_ancestor_qualifier_terms)
@abc.abstractmethod
def get_descendant_qualifier_id_terms(self):
"""Gets the descendant qualifier ``Id`` query terms.
:return: the query terms
:rtype: ``osid.search.terms.IdTerm``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.search.terms.IdTerm
descendant_qualifier_id_terms = property(fget=get_descendant_qualifier_id_terms)
@abc.abstractmethod
def get_descendant_qualifier_terms(self):
"""Gets the descendant qualifier query terms.
:return: the query terms
:rtype: ``osid.authorization.FunctionQueryInspector``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.authorization.FunctionQueryInspector
descendant_qualifier_terms = property(fget=get_descendant_qualifier_terms)
@abc.abstractmethod
def get_vault_id_terms(self):
"""Gets the vault ``Id`` query terms.
:return: the query terms
:rtype: ``osid.search.terms.IdTerm``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.search.terms.IdTerm
vault_id_terms = property(fget=get_vault_id_terms)
@abc.abstractmethod
def get_vault_terms(self):
"""Gets the vault query terms.
:return: the query terms
:rtype: ``osid.authorization.VaultQueryInspector``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.authorization.VaultQueryInspector
vault_terms = property(fget=get_vault_terms)
@abc.abstractmethod
def get_qualifier_query_inspector_record(self, qualifier_record_type):
"""Gets the qualifier query inspector record corresponding to the given ``Qualifier`` record ``Type``.
:param qualifier_record_type: a qualifier query inspector record type
:type qualifier_record_type: ``osid.type.Type``
:return: the qualifier query inspector record
:rtype: ``osid.authorization.records.QualifierQueryInspectorRecord``
:raise: ``NullArgument`` -- ``qualifier_record_type`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unsupported`` -- ``has_record_type(qualifier_record_type)`` is ``false``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.authorization.records.QualifierQueryInspectorRecord
class VaultQueryInspector:
"""This is the query inspector for examining vault queries."""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def get_function_id_terms(self):
"""Gets the function ``Id`` query terms.
:return: the query terms
:rtype: ``osid.search.terms.IdTerm``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.search.terms.IdTerm
function_id_terms = property(fget=get_function_id_terms)
@abc.abstractmethod
def get_function_terms(self):
"""Gets the function query terms.
:return: the query terms
:rtype: ``osid.authorization.FunctionQueryInspector``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.authorization.FunctionQueryInspector
function_terms = property(fget=get_function_terms)
@abc.abstractmethod
def get_qualifier_id_terms(self):
"""Gets the qualifier ``Id`` query terms.
:return: the query terms
:rtype: ``osid.search.terms.IdTerm``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.search.terms.IdTerm
qualifier_id_terms = property(fget=get_qualifier_id_terms)
@abc.abstractmethod
def get_qualifier_terms(self):
"""Gets the qualifier query terms.
:return: the query terms
:rtype: ``osid.authorization.QualifierQueryInspector``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.authorization.QualifierQueryInspector
qualifier_terms = property(fget=get_qualifier_terms)
@abc.abstractmethod
def get_authorization_id_terms(self):
"""Gets the authorization ``Id`` query terms.
:return: the query terms
:rtype: ``osid.search.terms.IdTerm``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.search.terms.IdTerm
authorization_id_terms = property(fget=get_authorization_id_terms)
@abc.abstractmethod
def get_authorization_terms(self):
"""Gets the authorization query terms.
:return: the query terms
:rtype: ``osid.authorization.AuthorizationQueryInspector``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.authorization.AuthorizationQueryInspector
authorization_terms = property(fget=get_authorization_terms)
@abc.abstractmethod
def get_ancestor_vault_id_terms(self):
"""Gets the ancestor vault ``Id`` query terms.
:return: the query terms
:rtype: ``osid.search.terms.IdTerm``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.search.terms.IdTerm
ancestor_vault_id_terms = property(fget=get_ancestor_vault_id_terms)
@abc.abstractmethod
def get_ancestor_vault_terms(self):
"""Gets the ancestor vault query terms.
:return: the query terms
:rtype: ``osid.authorization.VaultQueryInspector``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.authorization.VaultQueryInspector
ancestor_vault_terms = property(fget=get_ancestor_vault_terms)
@abc.abstractmethod
def get_descendant_vault_id_terms(self):
"""Gets the descendant vault ``Id`` query terms.
:return: the query terms
:rtype: ``osid.search.terms.IdTerm``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.search.terms.IdTerm
descendant_vault_id_terms = property(fget=get_descendant_vault_id_terms)
@abc.abstractmethod
def get_descendant_vault_terms(self):
"""Gets the descendant vault query terms.
:return: the query terms
:rtype: ``osid.authorization.VaultQueryInspector``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.authorization.VaultQueryInspector
descendant_vault_terms = property(fget=get_descendant_vault_terms)
@abc.abstractmethod
def get_vault_query_inspector_record(self, vault_record_type):
"""Gets the vault query inspector record corresponding to the given ``Vault`` record ``Type``.
:param vault_record_type: a vault query inspector record type
:type vault_record_type: ``osid.type.Type``
:return: the vault query inspector record
:rtype: ``osid.authorization.records.VaultQueryInspectorRecord``
:raise: ``NullArgument`` -- ``vault_record_type`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unsupported`` -- ``has_record_type(vault_record_type)`` is ``false``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.authorization.records.VaultQueryInspectorRecord
|
mitsei/dlkit
|
dlkit/abstract_osid/authorization/query_inspectors.py
|
Python
|
mit
| 20,768
|
#!/usr/bin/env python3
# Copyright (c) 2010 ArtForz -- public domain half-a-node
# Copyright (c) 2012 Jeff Garzik
# Copyright (c) 2010-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# mininode.py - Bitcoin P2P network half-a-node
#
# This python code was modified from ArtForz' public domain half-a-node, as
# found in the mini-node branch of http://github.com/jgarzik/pynode.
#
# NodeConn: an object which manages p2p connectivity to a bitcoin node
# NodeConnCB: a base class that describes the interface for receiving
# callbacks with network messages from a NodeConn
# CBlock, CTransaction, CBlockHeader, CTxIn, CTxOut, etc....:
# data structures that should map to corresponding structures in
# bitcoin/primitives
# msg_block, msg_tx, msg_headers, etc.:
# data structures that represent network messages
# ser_*, deser_*: functions that handle serialization/deserialization
import struct
import socket
import asyncore
import time
import sys
import random
from .util import hex_str_to_bytes, bytes_to_hex_str
from io import BytesIO
from codecs import encode
import hashlib
from threading import RLock
from threading import Thread
import logging
import copy
import bitdeal_scrypt
from test_framework.siphash import siphash256
BIP0031_VERSION = 60000
MY_VERSION = 80014 # past bip-31 for ping/pong
MY_SUBVERSION = b"/python-mininode-tester:0.0.3/"
MAX_INV_SZ = 50000
MAX_BLOCK_SIZE = 1000000
COIN = 100000000 # 1 btc in satoshis
NODE_NETWORK = (1 << 0)
NODE_GETUTXO = (1 << 1)
NODE_BLOOM = (1 << 2)
NODE_WITNESS = (1 << 3)
# Keep our own socket map for asyncore, so that we can track disconnects
# ourselves (to workaround an issue with closing an asyncore socket when
# using select)
mininode_socket_map = dict()
# One lock for synchronizing all data access between the networking thread (see
# NetworkThread below) and the thread running the test logic. For simplicity,
# NodeConn acquires this lock whenever delivering a message to to a NodeConnCB,
# and whenever adding anything to the send buffer (in send_message()). This
# lock should be acquired in the thread running the test logic to synchronize
# access to any data shared with the NodeConnCB or NodeConn.
mininode_lock = RLock()
# Serialization/deserialization tools
def sha256(s):
return hashlib.new('sha256', s).digest()
def ripemd160(s):
return hashlib.new('ripemd160', s).digest()
def hash256(s):
return sha256(sha256(s))
def ser_compact_size(l):
r = b""
if l < 253:
r = struct.pack("B", l)
elif l < 0x10000:
r = struct.pack("<BH", 253, l)
elif l < 0x100000000:
r = struct.pack("<BI", 254, l)
else:
r = struct.pack("<BQ", 255, l)
return r
def deser_compact_size(f):
nit = struct.unpack("<B", f.read(1))[0]
if nit == 253:
nit = struct.unpack("<H", f.read(2))[0]
elif nit == 254:
nit = struct.unpack("<I", f.read(4))[0]
elif nit == 255:
nit = struct.unpack("<Q", f.read(8))[0]
return nit
def deser_string(f):
nit = deser_compact_size(f)
return f.read(nit)
def ser_string(s):
return ser_compact_size(len(s)) + s
def deser_uint256(f):
r = 0
for i in range(8):
t = struct.unpack("<I", f.read(4))[0]
r += t << (i * 32)
return r
def ser_uint256(u):
rs = b""
for i in range(8):
rs += struct.pack("<I", u & 0xFFFFFFFF)
u >>= 32
return rs
def uint256_from_str(s):
r = 0
t = struct.unpack("<IIIIIIII", s[:32])
for i in range(8):
r += t[i] << (i * 32)
return r
def uint256_from_compact(c):
nbytes = (c >> 24) & 0xFF
v = (c & 0xFFFFFF) << (8 * (nbytes - 3))
return v
def deser_vector(f, c):
nit = deser_compact_size(f)
r = []
for i in range(nit):
t = c()
t.deserialize(f)
r.append(t)
return r
# ser_function_name: Allow for an alternate serialization function on the
# entries in the vector (we use this for serializing the vector of transactions
# for a witness block).
def ser_vector(l, ser_function_name=None):
r = ser_compact_size(len(l))
for i in l:
if ser_function_name:
r += getattr(i, ser_function_name)()
else:
r += i.serialize()
return r
def deser_uint256_vector(f):
nit = deser_compact_size(f)
r = []
for i in range(nit):
t = deser_uint256(f)
r.append(t)
return r
def ser_uint256_vector(l):
r = ser_compact_size(len(l))
for i in l:
r += ser_uint256(i)
return r
def deser_string_vector(f):
nit = deser_compact_size(f)
r = []
for i in range(nit):
t = deser_string(f)
r.append(t)
return r
def ser_string_vector(l):
r = ser_compact_size(len(l))
for sv in l:
r += ser_string(sv)
return r
def deser_int_vector(f):
nit = deser_compact_size(f)
r = []
for i in range(nit):
t = struct.unpack("<i", f.read(4))[0]
r.append(t)
return r
def ser_int_vector(l):
r = ser_compact_size(len(l))
for i in l:
r += struct.pack("<i", i)
return r
# Deserialize from a hex string representation (eg from RPC)
def FromHex(obj, hex_string):
obj.deserialize(BytesIO(hex_str_to_bytes(hex_string)))
return obj
# Convert a binary-serializable object to hex (eg for submission via RPC)
def ToHex(obj):
return bytes_to_hex_str(obj.serialize())
# Objects that map to bitcoind objects, which can be serialized/deserialized
class CAddress(object):
def __init__(self):
self.nServices = 1
self.pchReserved = b"\x00" * 10 + b"\xff" * 2
self.ip = "0.0.0.0"
self.port = 0
def deserialize(self, f):
self.nServices = struct.unpack("<Q", f.read(8))[0]
self.pchReserved = f.read(12)
self.ip = socket.inet_ntoa(f.read(4))
self.port = struct.unpack(">H", f.read(2))[0]
def serialize(self):
r = b""
r += struct.pack("<Q", self.nServices)
r += self.pchReserved
r += socket.inet_aton(self.ip)
r += struct.pack(">H", self.port)
return r
def __repr__(self):
return "CAddress(nServices=%i ip=%s port=%i)" % (self.nServices,
self.ip, self.port)
MSG_WITNESS_FLAG = 1<<30
class CInv(object):
typemap = {
0: "Error",
1: "TX",
2: "Block",
1|MSG_WITNESS_FLAG: "WitnessTx",
2|MSG_WITNESS_FLAG : "WitnessBlock",
4: "CompactBlock"
}
def __init__(self, t=0, h=0):
self.type = t
self.hash = h
def deserialize(self, f):
self.type = struct.unpack("<i", f.read(4))[0]
self.hash = deser_uint256(f)
def serialize(self):
r = b""
r += struct.pack("<i", self.type)
r += ser_uint256(self.hash)
return r
def __repr__(self):
return "CInv(type=%s hash=%064x)" \
% (self.typemap[self.type], self.hash)
class CBlockLocator(object):
def __init__(self):
self.nVersion = MY_VERSION
self.vHave = []
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
self.vHave = deser_uint256_vector(f)
def serialize(self):
r = b""
r += struct.pack("<i", self.nVersion)
r += ser_uint256_vector(self.vHave)
return r
def __repr__(self):
return "CBlockLocator(nVersion=%i vHave=%s)" \
% (self.nVersion, repr(self.vHave))
class COutPoint(object):
def __init__(self, hash=0, n=0):
self.hash = hash
self.n = n
def deserialize(self, f):
self.hash = deser_uint256(f)
self.n = struct.unpack("<I", f.read(4))[0]
def serialize(self):
r = b""
r += ser_uint256(self.hash)
r += struct.pack("<I", self.n)
return r
def __repr__(self):
return "COutPoint(hash=%064x n=%i)" % (self.hash, self.n)
class CTxIn(object):
def __init__(self, outpoint=None, scriptSig=b"", nSequence=0):
if outpoint is None:
self.prevout = COutPoint()
else:
self.prevout = outpoint
self.scriptSig = scriptSig
self.nSequence = nSequence
def deserialize(self, f):
self.prevout = COutPoint()
self.prevout.deserialize(f)
self.scriptSig = deser_string(f)
self.nSequence = struct.unpack("<I", f.read(4))[0]
def serialize(self):
r = b""
r += self.prevout.serialize()
r += ser_string(self.scriptSig)
r += struct.pack("<I", self.nSequence)
return r
def __repr__(self):
return "CTxIn(prevout=%s scriptSig=%s nSequence=%i)" \
% (repr(self.prevout), bytes_to_hex_str(self.scriptSig),
self.nSequence)
class CTxOut(object):
def __init__(self, nValue=0, scriptPubKey=b""):
self.nValue = nValue
self.scriptPubKey = scriptPubKey
def deserialize(self, f):
self.nValue = struct.unpack("<q", f.read(8))[0]
self.scriptPubKey = deser_string(f)
def serialize(self):
r = b""
r += struct.pack("<q", self.nValue)
r += ser_string(self.scriptPubKey)
return r
def __repr__(self):
return "CTxOut(nValue=%i.%08i scriptPubKey=%s)" \
% (self.nValue // COIN, self.nValue % COIN,
bytes_to_hex_str(self.scriptPubKey))
class CScriptWitness(object):
def __init__(self):
# stack is a vector of strings
self.stack = []
def __repr__(self):
return "CScriptWitness(%s)" % \
(",".join([bytes_to_hex_str(x) for x in self.stack]))
def is_null(self):
if self.stack:
return False
return True
class CTxInWitness(object):
def __init__(self):
self.scriptWitness = CScriptWitness()
def deserialize(self, f):
self.scriptWitness.stack = deser_string_vector(f)
def serialize(self):
return ser_string_vector(self.scriptWitness.stack)
def __repr__(self):
return repr(self.scriptWitness)
def is_null(self):
return self.scriptWitness.is_null()
class CTxWitness(object):
def __init__(self):
self.vtxinwit = []
def deserialize(self, f):
for i in range(len(self.vtxinwit)):
self.vtxinwit[i].deserialize(f)
def serialize(self):
r = b""
# This is different than the usual vector serialization --
# we omit the length of the vector, which is required to be
# the same length as the transaction's vin vector.
for x in self.vtxinwit:
r += x.serialize()
return r
def __repr__(self):
return "CTxWitness(%s)" % \
(';'.join([repr(x) for x in self.vtxinwit]))
def is_null(self):
for x in self.vtxinwit:
if not x.is_null():
return False
return True
class CTransaction(object):
def __init__(self, tx=None):
if tx is None:
self.nVersion = 1
self.vin = []
self.vout = []
self.wit = CTxWitness()
self.nLockTime = 0
self.sha256 = None
self.hash = None
else:
self.nVersion = tx.nVersion
self.vin = copy.deepcopy(tx.vin)
self.vout = copy.deepcopy(tx.vout)
self.nLockTime = tx.nLockTime
self.sha256 = tx.sha256
self.hash = tx.hash
self.wit = copy.deepcopy(tx.wit)
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
self.vin = deser_vector(f, CTxIn)
flags = 0
if len(self.vin) == 0:
flags = struct.unpack("<B", f.read(1))[0]
# Not sure why flags can't be zero, but this
# matches the implementation in bitcoind
if (flags != 0):
self.vin = deser_vector(f, CTxIn)
self.vout = deser_vector(f, CTxOut)
else:
self.vout = deser_vector(f, CTxOut)
if flags != 0:
self.wit.vtxinwit = [CTxInWitness() for i in range(len(self.vin))]
self.wit.deserialize(f)
self.nLockTime = struct.unpack("<I", f.read(4))[0]
self.sha256 = None
self.hash = None
def serialize_without_witness(self):
r = b""
r += struct.pack("<i", self.nVersion)
r += ser_vector(self.vin)
r += ser_vector(self.vout)
r += struct.pack("<I", self.nLockTime)
return r
# Only serialize with witness when explicitly called for
def serialize_with_witness(self):
flags = 0
if not self.wit.is_null():
flags |= 1
r = b""
r += struct.pack("<i", self.nVersion)
if flags:
dummy = []
r += ser_vector(dummy)
r += struct.pack("<B", flags)
r += ser_vector(self.vin)
r += ser_vector(self.vout)
if flags & 1:
if (len(self.wit.vtxinwit) != len(self.vin)):
# vtxinwit must have the same length as vin
self.wit.vtxinwit = self.wit.vtxinwit[:len(self.vin)]
for i in range(len(self.wit.vtxinwit), len(self.vin)):
self.wit.vtxinwit.append(CTxInWitness())
r += self.wit.serialize()
r += struct.pack("<I", self.nLockTime)
return r
# Regular serialization is without witness -- must explicitly
# call serialize_with_witness to include witness data.
def serialize(self):
return self.serialize_without_witness()
# Recalculate the txid (transaction hash without witness)
def rehash(self):
self.sha256 = None
self.calc_sha256()
# We will only cache the serialization without witness in
# self.sha256 and self.hash -- those are expected to be the txid.
def calc_sha256(self, with_witness=False):
if with_witness:
# Don't cache the result, just return it
return uint256_from_str(hash256(self.serialize_with_witness()))
if self.sha256 is None:
self.sha256 = uint256_from_str(hash256(self.serialize_without_witness()))
self.hash = encode(hash256(self.serialize())[::-1], 'hex_codec').decode('ascii')
def is_valid(self):
self.calc_sha256()
for tout in self.vout:
if tout.nValue < 0 or tout.nValue > 84000000 * COIN:
return False
return True
def __repr__(self):
return "CTransaction(nVersion=%i vin=%s vout=%s wit=%s nLockTime=%i)" \
% (self.nVersion, repr(self.vin), repr(self.vout), repr(self.wit), self.nLockTime)
class CBlockHeader(object):
def __init__(self, header=None):
if header is None:
self.set_null()
else:
self.nVersion = header.nVersion
self.hashPrevBlock = header.hashPrevBlock
self.hashMerkleRoot = header.hashMerkleRoot
self.nTime = header.nTime
self.nBits = header.nBits
self.nNonce = header.nNonce
self.sha256 = header.sha256
self.hash = header.hash
self.scrypt256 = header.scrypt256
self.calc_sha256()
def set_null(self):
self.nVersion = 1
self.hashPrevBlock = 0
self.hashMerkleRoot = 0
self.nTime = 0
self.nBits = 0
self.nNonce = 0
self.sha256 = None
self.hash = None
self.scrypt256 = None
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
self.hashPrevBlock = deser_uint256(f)
self.hashMerkleRoot = deser_uint256(f)
self.nTime = struct.unpack("<I", f.read(4))[0]
self.nBits = struct.unpack("<I", f.read(4))[0]
self.nNonce = struct.unpack("<I", f.read(4))[0]
self.sha256 = None
self.hash = None
self.scrypt256 = None
def serialize(self):
r = b""
r += struct.pack("<i", self.nVersion)
r += ser_uint256(self.hashPrevBlock)
r += ser_uint256(self.hashMerkleRoot)
r += struct.pack("<I", self.nTime)
r += struct.pack("<I", self.nBits)
r += struct.pack("<I", self.nNonce)
return r
def calc_sha256(self):
if self.sha256 is None:
r = b""
r += struct.pack("<i", self.nVersion)
r += ser_uint256(self.hashPrevBlock)
r += ser_uint256(self.hashMerkleRoot)
r += struct.pack("<I", self.nTime)
r += struct.pack("<I", self.nBits)
r += struct.pack("<I", self.nNonce)
self.sha256 = uint256_from_str(hash256(r))
self.hash = encode(hash256(r)[::-1], 'hex_codec').decode('ascii')
self.scrypt256 = uint256_from_str(bitdeal_scrypt.getPoWHash(r))
def rehash(self):
self.sha256 = None
self.scrypt256 = None
self.calc_sha256()
return self.sha256
def __repr__(self):
return "CBlockHeader(nVersion=%i hashPrevBlock=%064x hashMerkleRoot=%064x nTime=%s nBits=%08x nNonce=%08x)" \
% (self.nVersion, self.hashPrevBlock, self.hashMerkleRoot,
time.ctime(self.nTime), self.nBits, self.nNonce)
class CBlock(CBlockHeader):
def __init__(self, header=None):
super(CBlock, self).__init__(header)
self.vtx = []
def deserialize(self, f):
super(CBlock, self).deserialize(f)
self.vtx = deser_vector(f, CTransaction)
def serialize(self, with_witness=False):
r = b""
r += super(CBlock, self).serialize()
if with_witness:
r += ser_vector(self.vtx, "serialize_with_witness")
else:
r += ser_vector(self.vtx)
return r
# Calculate the merkle root given a vector of transaction hashes
def get_merkle_root(self, hashes):
while len(hashes) > 1:
newhashes = []
for i in range(0, len(hashes), 2):
i2 = min(i+1, len(hashes)-1)
newhashes.append(hash256(hashes[i] + hashes[i2]))
hashes = newhashes
return uint256_from_str(hashes[0])
def calc_merkle_root(self):
hashes = []
for tx in self.vtx:
tx.calc_sha256()
hashes.append(ser_uint256(tx.sha256))
return self.get_merkle_root(hashes)
def calc_witness_merkle_root(self):
# For witness root purposes, the hash of the
# coinbase, with witness, is defined to be 0...0
hashes = [ser_uint256(0)]
for tx in self.vtx[1:]:
# Calculate the hashes with witness data
hashes.append(ser_uint256(tx.calc_sha256(True)))
return self.get_merkle_root(hashes)
def is_valid(self):
self.calc_sha256()
target = uint256_from_compact(self.nBits)
if self.scrypt256 > target:
return False
for tx in self.vtx:
if not tx.is_valid():
return False
if self.calc_merkle_root() != self.hashMerkleRoot:
return False
return True
def solve(self):
self.rehash()
target = uint256_from_compact(self.nBits)
while self.scrypt256 > target:
self.nNonce += 1
self.rehash()
def __repr__(self):
return "CBlock(nVersion=%i hashPrevBlock=%064x hashMerkleRoot=%064x nTime=%s nBits=%08x nNonce=%08x vtx=%s)" \
% (self.nVersion, self.hashPrevBlock, self.hashMerkleRoot,
time.ctime(self.nTime), self.nBits, self.nNonce, repr(self.vtx))
class CUnsignedAlert(object):
def __init__(self):
self.nVersion = 1
self.nRelayUntil = 0
self.nExpiration = 0
self.nID = 0
self.nCancel = 0
self.setCancel = []
self.nMinVer = 0
self.nMaxVer = 0
self.setSubVer = []
self.nPriority = 0
self.strComment = b""
self.strStatusBar = b""
self.strReserved = b""
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
self.nRelayUntil = struct.unpack("<q", f.read(8))[0]
self.nExpiration = struct.unpack("<q", f.read(8))[0]
self.nID = struct.unpack("<i", f.read(4))[0]
self.nCancel = struct.unpack("<i", f.read(4))[0]
self.setCancel = deser_int_vector(f)
self.nMinVer = struct.unpack("<i", f.read(4))[0]
self.nMaxVer = struct.unpack("<i", f.read(4))[0]
self.setSubVer = deser_string_vector(f)
self.nPriority = struct.unpack("<i", f.read(4))[0]
self.strComment = deser_string(f)
self.strStatusBar = deser_string(f)
self.strReserved = deser_string(f)
def serialize(self):
r = b""
r += struct.pack("<i", self.nVersion)
r += struct.pack("<q", self.nRelayUntil)
r += struct.pack("<q", self.nExpiration)
r += struct.pack("<i", self.nID)
r += struct.pack("<i", self.nCancel)
r += ser_int_vector(self.setCancel)
r += struct.pack("<i", self.nMinVer)
r += struct.pack("<i", self.nMaxVer)
r += ser_string_vector(self.setSubVer)
r += struct.pack("<i", self.nPriority)
r += ser_string(self.strComment)
r += ser_string(self.strStatusBar)
r += ser_string(self.strReserved)
return r
def __repr__(self):
return "CUnsignedAlert(nVersion %d, nRelayUntil %d, nExpiration %d, nID %d, nCancel %d, nMinVer %d, nMaxVer %d, nPriority %d, strComment %s, strStatusBar %s, strReserved %s)" \
% (self.nVersion, self.nRelayUntil, self.nExpiration, self.nID,
self.nCancel, self.nMinVer, self.nMaxVer, self.nPriority,
self.strComment, self.strStatusBar, self.strReserved)
class CAlert(object):
def __init__(self):
self.vchMsg = b""
self.vchSig = b""
def deserialize(self, f):
self.vchMsg = deser_string(f)
self.vchSig = deser_string(f)
def serialize(self):
r = b""
r += ser_string(self.vchMsg)
r += ser_string(self.vchSig)
return r
def __repr__(self):
return "CAlert(vchMsg.sz %d, vchSig.sz %d)" \
% (len(self.vchMsg), len(self.vchSig))
class PrefilledTransaction(object):
def __init__(self, index=0, tx = None):
self.index = index
self.tx = tx
def deserialize(self, f):
self.index = deser_compact_size(f)
self.tx = CTransaction()
self.tx.deserialize(f)
def serialize(self, with_witness=False):
r = b""
r += ser_compact_size(self.index)
if with_witness:
r += self.tx.serialize_with_witness()
else:
r += self.tx.serialize_without_witness()
return r
def serialize_with_witness(self):
return self.serialize(with_witness=True)
def __repr__(self):
return "PrefilledTransaction(index=%d, tx=%s)" % (self.index, repr(self.tx))
# This is what we send on the wire, in a cmpctblock message.
class P2PHeaderAndShortIDs(object):
def __init__(self):
self.header = CBlockHeader()
self.nonce = 0
self.shortids_length = 0
self.shortids = []
self.prefilled_txn_length = 0
self.prefilled_txn = []
def deserialize(self, f):
self.header.deserialize(f)
self.nonce = struct.unpack("<Q", f.read(8))[0]
self.shortids_length = deser_compact_size(f)
for i in range(self.shortids_length):
# shortids are defined to be 6 bytes in the spec, so append
# two zero bytes and read it in as an 8-byte number
self.shortids.append(struct.unpack("<Q", f.read(6) + b'\x00\x00')[0])
self.prefilled_txn = deser_vector(f, PrefilledTransaction)
self.prefilled_txn_length = len(self.prefilled_txn)
# When using version 2 compact blocks, we must serialize with_witness.
def serialize(self, with_witness=False):
r = b""
r += self.header.serialize()
r += struct.pack("<Q", self.nonce)
r += ser_compact_size(self.shortids_length)
for x in self.shortids:
# We only want the first 6 bytes
r += struct.pack("<Q", x)[0:6]
if with_witness:
r += ser_vector(self.prefilled_txn, "serialize_with_witness")
else:
r += ser_vector(self.prefilled_txn)
return r
def __repr__(self):
return "P2PHeaderAndShortIDs(header=%s, nonce=%d, shortids_length=%d, shortids=%s, prefilled_txn_length=%d, prefilledtxn=%s" % (repr(self.header), self.nonce, self.shortids_length, repr(self.shortids), self.prefilled_txn_length, repr(self.prefilled_txn))
# P2P version of the above that will use witness serialization (for compact
# block version 2)
class P2PHeaderAndShortWitnessIDs(P2PHeaderAndShortIDs):
def serialize(self):
return super(P2PHeaderAndShortWitnessIDs, self).serialize(with_witness=True)
# Calculate the BIP 152-compact blocks shortid for a given transaction hash
def calculate_shortid(k0, k1, tx_hash):
expected_shortid = siphash256(k0, k1, tx_hash)
expected_shortid &= 0x0000ffffffffffff
return expected_shortid
# This version gets rid of the array lengths, and reinterprets the differential
# encoding into indices that can be used for lookup.
class HeaderAndShortIDs(object):
def __init__(self, p2pheaders_and_shortids = None):
self.header = CBlockHeader()
self.nonce = 0
self.shortids = []
self.prefilled_txn = []
self.use_witness = False
if p2pheaders_and_shortids != None:
self.header = p2pheaders_and_shortids.header
self.nonce = p2pheaders_and_shortids.nonce
self.shortids = p2pheaders_and_shortids.shortids
last_index = -1
for x in p2pheaders_and_shortids.prefilled_txn:
self.prefilled_txn.append(PrefilledTransaction(x.index + last_index + 1, x.tx))
last_index = self.prefilled_txn[-1].index
def to_p2p(self):
if self.use_witness:
ret = P2PHeaderAndShortWitnessIDs()
else:
ret = P2PHeaderAndShortIDs()
ret.header = self.header
ret.nonce = self.nonce
ret.shortids_length = len(self.shortids)
ret.shortids = self.shortids
ret.prefilled_txn_length = len(self.prefilled_txn)
ret.prefilled_txn = []
last_index = -1
for x in self.prefilled_txn:
ret.prefilled_txn.append(PrefilledTransaction(x.index - last_index - 1, x.tx))
last_index = x.index
return ret
def get_siphash_keys(self):
header_nonce = self.header.serialize()
header_nonce += struct.pack("<Q", self.nonce)
hash_header_nonce_as_str = sha256(header_nonce)
key0 = struct.unpack("<Q", hash_header_nonce_as_str[0:8])[0]
key1 = struct.unpack("<Q", hash_header_nonce_as_str[8:16])[0]
return [ key0, key1 ]
# Version 2 compact blocks use wtxid in shortids (rather than txid)
def initialize_from_block(self, block, nonce=0, prefill_list = [0], use_witness = False):
self.header = CBlockHeader(block)
self.nonce = nonce
self.prefilled_txn = [ PrefilledTransaction(i, block.vtx[i]) for i in prefill_list ]
self.shortids = []
self.use_witness = use_witness
[k0, k1] = self.get_siphash_keys()
for i in range(len(block.vtx)):
if i not in prefill_list:
tx_hash = block.vtx[i].sha256
if use_witness:
tx_hash = block.vtx[i].calc_sha256(with_witness=True)
self.shortids.append(calculate_shortid(k0, k1, tx_hash))
def __repr__(self):
return "HeaderAndShortIDs(header=%s, nonce=%d, shortids=%s, prefilledtxn=%s" % (repr(self.header), self.nonce, repr(self.shortids), repr(self.prefilled_txn))
class BlockTransactionsRequest(object):
def __init__(self, blockhash=0, indexes = None):
self.blockhash = blockhash
self.indexes = indexes if indexes != None else []
def deserialize(self, f):
self.blockhash = deser_uint256(f)
indexes_length = deser_compact_size(f)
for i in range(indexes_length):
self.indexes.append(deser_compact_size(f))
def serialize(self):
r = b""
r += ser_uint256(self.blockhash)
r += ser_compact_size(len(self.indexes))
for x in self.indexes:
r += ser_compact_size(x)
return r
# helper to set the differentially encoded indexes from absolute ones
def from_absolute(self, absolute_indexes):
self.indexes = []
last_index = -1
for x in absolute_indexes:
self.indexes.append(x-last_index-1)
last_index = x
def to_absolute(self):
absolute_indexes = []
last_index = -1
for x in self.indexes:
absolute_indexes.append(x+last_index+1)
last_index = absolute_indexes[-1]
return absolute_indexes
def __repr__(self):
return "BlockTransactionsRequest(hash=%064x indexes=%s)" % (self.blockhash, repr(self.indexes))
class BlockTransactions(object):
def __init__(self, blockhash=0, transactions = None):
self.blockhash = blockhash
self.transactions = transactions if transactions != None else []
def deserialize(self, f):
self.blockhash = deser_uint256(f)
self.transactions = deser_vector(f, CTransaction)
def serialize(self, with_witness=False):
r = b""
r += ser_uint256(self.blockhash)
if with_witness:
r += ser_vector(self.transactions, "serialize_with_witness")
else:
r += ser_vector(self.transactions)
return r
def __repr__(self):
return "BlockTransactions(hash=%064x transactions=%s)" % (self.blockhash, repr(self.transactions))
# Objects that correspond to messages on the wire
class msg_version(object):
command = b"version"
def __init__(self):
self.nVersion = MY_VERSION
self.nServices = 1
self.nTime = int(time.time())
self.addrTo = CAddress()
self.addrFrom = CAddress()
self.nNonce = random.getrandbits(64)
self.strSubVer = MY_SUBVERSION
self.nStartingHeight = -1
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
if self.nVersion == 10300:
self.nVersion = 300
self.nServices = struct.unpack("<Q", f.read(8))[0]
self.nTime = struct.unpack("<q", f.read(8))[0]
self.addrTo = CAddress()
self.addrTo.deserialize(f)
if self.nVersion >= 106:
self.addrFrom = CAddress()
self.addrFrom.deserialize(f)
self.nNonce = struct.unpack("<Q", f.read(8))[0]
self.strSubVer = deser_string(f)
if self.nVersion >= 209:
self.nStartingHeight = struct.unpack("<i", f.read(4))[0]
else:
self.nStartingHeight = None
else:
self.addrFrom = None
self.nNonce = None
self.strSubVer = None
self.nStartingHeight = None
def serialize(self):
r = b""
r += struct.pack("<i", self.nVersion)
r += struct.pack("<Q", self.nServices)
r += struct.pack("<q", self.nTime)
r += self.addrTo.serialize()
r += self.addrFrom.serialize()
r += struct.pack("<Q", self.nNonce)
r += ser_string(self.strSubVer)
r += struct.pack("<i", self.nStartingHeight)
return r
def __repr__(self):
return 'msg_version(nVersion=%i nServices=%i nTime=%s addrTo=%s addrFrom=%s nNonce=0x%016X strSubVer=%s nStartingHeight=%i)' \
% (self.nVersion, self.nServices, time.ctime(self.nTime),
repr(self.addrTo), repr(self.addrFrom), self.nNonce,
self.strSubVer, self.nStartingHeight)
class msg_verack(object):
command = b"verack"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_verack()"
class msg_addr(object):
command = b"addr"
def __init__(self):
self.addrs = []
def deserialize(self, f):
self.addrs = deser_vector(f, CAddress)
def serialize(self):
return ser_vector(self.addrs)
def __repr__(self):
return "msg_addr(addrs=%s)" % (repr(self.addrs))
class msg_alert(object):
command = b"alert"
def __init__(self):
self.alert = CAlert()
def deserialize(self, f):
self.alert = CAlert()
self.alert.deserialize(f)
def serialize(self):
r = b""
r += self.alert.serialize()
return r
def __repr__(self):
return "msg_alert(alert=%s)" % (repr(self.alert), )
class msg_inv(object):
command = b"inv"
def __init__(self, inv=None):
if inv is None:
self.inv = []
else:
self.inv = inv
def deserialize(self, f):
self.inv = deser_vector(f, CInv)
def serialize(self):
return ser_vector(self.inv)
def __repr__(self):
return "msg_inv(inv=%s)" % (repr(self.inv))
class msg_getdata(object):
command = b"getdata"
def __init__(self, inv=None):
self.inv = inv if inv != None else []
def deserialize(self, f):
self.inv = deser_vector(f, CInv)
def serialize(self):
return ser_vector(self.inv)
def __repr__(self):
return "msg_getdata(inv=%s)" % (repr(self.inv))
class msg_getblocks(object):
command = b"getblocks"
def __init__(self):
self.locator = CBlockLocator()
self.hashstop = 0
def deserialize(self, f):
self.locator = CBlockLocator()
self.locator.deserialize(f)
self.hashstop = deser_uint256(f)
def serialize(self):
r = b""
r += self.locator.serialize()
r += ser_uint256(self.hashstop)
return r
def __repr__(self):
return "msg_getblocks(locator=%s hashstop=%064x)" \
% (repr(self.locator), self.hashstop)
class msg_tx(object):
command = b"tx"
def __init__(self, tx=CTransaction()):
self.tx = tx
def deserialize(self, f):
self.tx.deserialize(f)
def serialize(self):
return self.tx.serialize_without_witness()
def __repr__(self):
return "msg_tx(tx=%s)" % (repr(self.tx))
class msg_witness_tx(msg_tx):
def serialize(self):
return self.tx.serialize_with_witness()
class msg_block(object):
command = b"block"
def __init__(self, block=None):
if block is None:
self.block = CBlock()
else:
self.block = block
def deserialize(self, f):
self.block.deserialize(f)
def serialize(self):
return self.block.serialize()
def __repr__(self):
return "msg_block(block=%s)" % (repr(self.block))
# for cases where a user needs tighter control over what is sent over the wire
# note that the user must supply the name of the command, and the data
class msg_generic(object):
def __init__(self, command, data=None):
self.command = command
self.data = data
def serialize(self):
return self.data
def __repr__(self):
return "msg_generic()"
class msg_witness_block(msg_block):
def serialize(self):
r = self.block.serialize(with_witness=True)
return r
class msg_getaddr(object):
command = b"getaddr"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_getaddr()"
class msg_ping_prebip31(object):
command = b"ping"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_ping() (pre-bip31)"
class msg_ping(object):
command = b"ping"
def __init__(self, nonce=0):
self.nonce = nonce
def deserialize(self, f):
self.nonce = struct.unpack("<Q", f.read(8))[0]
def serialize(self):
r = b""
r += struct.pack("<Q", self.nonce)
return r
def __repr__(self):
return "msg_ping(nonce=%08x)" % self.nonce
class msg_pong(object):
command = b"pong"
def __init__(self, nonce=0):
self.nonce = nonce
def deserialize(self, f):
self.nonce = struct.unpack("<Q", f.read(8))[0]
def serialize(self):
r = b""
r += struct.pack("<Q", self.nonce)
return r
def __repr__(self):
return "msg_pong(nonce=%08x)" % self.nonce
class msg_mempool(object):
command = b"mempool"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_mempool()"
class msg_sendheaders(object):
command = b"sendheaders"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_sendheaders()"
# getheaders message has
# number of entries
# vector of hashes
# hash_stop (hash of last desired block header, 0 to get as many as possible)
class msg_getheaders(object):
command = b"getheaders"
def __init__(self):
self.locator = CBlockLocator()
self.hashstop = 0
def deserialize(self, f):
self.locator = CBlockLocator()
self.locator.deserialize(f)
self.hashstop = deser_uint256(f)
def serialize(self):
r = b""
r += self.locator.serialize()
r += ser_uint256(self.hashstop)
return r
def __repr__(self):
return "msg_getheaders(locator=%s, stop=%064x)" \
% (repr(self.locator), self.hashstop)
# headers message has
# <count> <vector of block headers>
class msg_headers(object):
command = b"headers"
def __init__(self):
self.headers = []
def deserialize(self, f):
# comment in bitcoind indicates these should be deserialized as blocks
blocks = deser_vector(f, CBlock)
for x in blocks:
self.headers.append(CBlockHeader(x))
def serialize(self):
blocks = [CBlock(x) for x in self.headers]
return ser_vector(blocks)
def __repr__(self):
return "msg_headers(headers=%s)" % repr(self.headers)
class msg_reject(object):
command = b"reject"
REJECT_MALFORMED = 1
def __init__(self):
self.message = b""
self.code = 0
self.reason = b""
self.data = 0
def deserialize(self, f):
self.message = deser_string(f)
self.code = struct.unpack("<B", f.read(1))[0]
self.reason = deser_string(f)
if (self.code != self.REJECT_MALFORMED and
(self.message == b"block" or self.message == b"tx")):
self.data = deser_uint256(f)
def serialize(self):
r = ser_string(self.message)
r += struct.pack("<B", self.code)
r += ser_string(self.reason)
if (self.code != self.REJECT_MALFORMED and
(self.message == b"block" or self.message == b"tx")):
r += ser_uint256(self.data)
return r
def __repr__(self):
return "msg_reject: %s %d %s [%064x]" \
% (self.message, self.code, self.reason, self.data)
# Helper function
def wait_until(predicate, *, attempts=float('inf'), timeout=float('inf')):
attempt = 0
elapsed = 0
while attempt < attempts and elapsed < timeout:
with mininode_lock:
if predicate():
return True
attempt += 1
elapsed += 0.05
time.sleep(0.05)
return False
class msg_feefilter(object):
command = b"feefilter"
def __init__(self, feerate=0):
self.feerate = feerate
def deserialize(self, f):
self.feerate = struct.unpack("<Q", f.read(8))[0]
def serialize(self):
r = b""
r += struct.pack("<Q", self.feerate)
return r
def __repr__(self):
return "msg_feefilter(feerate=%08x)" % self.feerate
class msg_sendcmpct(object):
command = b"sendcmpct"
def __init__(self):
self.announce = False
self.version = 1
def deserialize(self, f):
self.announce = struct.unpack("<?", f.read(1))[0]
self.version = struct.unpack("<Q", f.read(8))[0]
def serialize(self):
r = b""
r += struct.pack("<?", self.announce)
r += struct.pack("<Q", self.version)
return r
def __repr__(self):
return "msg_sendcmpct(announce=%s, version=%lu)" % (self.announce, self.version)
class msg_cmpctblock(object):
command = b"cmpctblock"
def __init__(self, header_and_shortids = None):
self.header_and_shortids = header_and_shortids
def deserialize(self, f):
self.header_and_shortids = P2PHeaderAndShortIDs()
self.header_and_shortids.deserialize(f)
def serialize(self):
r = b""
r += self.header_and_shortids.serialize()
return r
def __repr__(self):
return "msg_cmpctblock(HeaderAndShortIDs=%s)" % repr(self.header_and_shortids)
class msg_getblocktxn(object):
command = b"getblocktxn"
def __init__(self):
self.block_txn_request = None
def deserialize(self, f):
self.block_txn_request = BlockTransactionsRequest()
self.block_txn_request.deserialize(f)
def serialize(self):
r = b""
r += self.block_txn_request.serialize()
return r
def __repr__(self):
return "msg_getblocktxn(block_txn_request=%s)" % (repr(self.block_txn_request))
class msg_blocktxn(object):
command = b"blocktxn"
def __init__(self):
self.block_transactions = BlockTransactions()
def deserialize(self, f):
self.block_transactions.deserialize(f)
def serialize(self):
r = b""
r += self.block_transactions.serialize()
return r
def __repr__(self):
return "msg_blocktxn(block_transactions=%s)" % (repr(self.block_transactions))
class msg_witness_blocktxn(msg_blocktxn):
def serialize(self):
r = b""
r += self.block_transactions.serialize(with_witness=True)
return r
# This is what a callback should look like for NodeConn
# Reimplement the on_* functions to provide handling for events
class NodeConnCB(object):
def __init__(self):
self.verack_received = False
# deliver_sleep_time is helpful for debugging race conditions in p2p
# tests; it causes message delivery to sleep for the specified time
# before acquiring the global lock and delivering the next message.
self.deliver_sleep_time = None
# Remember the services our peer has advertised
self.peer_services = None
def set_deliver_sleep_time(self, value):
with mininode_lock:
self.deliver_sleep_time = value
def get_deliver_sleep_time(self):
with mininode_lock:
return self.deliver_sleep_time
# Spin until verack message is received from the node.
# Tests may want to use this as a signal that the test can begin.
# This can be called from the testing thread, so it needs to acquire the
# global lock.
def wait_for_verack(self):
while True:
with mininode_lock:
if self.verack_received:
return
time.sleep(0.05)
def deliver(self, conn, message):
deliver_sleep = self.get_deliver_sleep_time()
if deliver_sleep is not None:
time.sleep(deliver_sleep)
with mininode_lock:
try:
getattr(self, 'on_' + message.command.decode('ascii'))(conn, message)
except:
print("ERROR delivering %s (%s)" % (repr(message),
sys.exc_info()[0]))
def on_version(self, conn, message):
if message.nVersion >= 209:
conn.send_message(msg_verack())
conn.ver_send = min(MY_VERSION, message.nVersion)
if message.nVersion < 209:
conn.ver_recv = conn.ver_send
conn.nServices = message.nServices
def on_verack(self, conn, message):
conn.ver_recv = conn.ver_send
self.verack_received = True
def on_inv(self, conn, message):
want = msg_getdata()
for i in message.inv:
if i.type != 0:
want.inv.append(i)
if len(want.inv):
conn.send_message(want)
def on_addr(self, conn, message): pass
def on_alert(self, conn, message): pass
def on_getdata(self, conn, message): pass
def on_getblocks(self, conn, message): pass
def on_tx(self, conn, message): pass
def on_block(self, conn, message): pass
def on_getaddr(self, conn, message): pass
def on_headers(self, conn, message): pass
def on_getheaders(self, conn, message): pass
def on_ping(self, conn, message):
if conn.ver_send > BIP0031_VERSION:
conn.send_message(msg_pong(message.nonce))
def on_reject(self, conn, message): pass
def on_close(self, conn): pass
def on_mempool(self, conn): pass
def on_pong(self, conn, message): pass
def on_feefilter(self, conn, message): pass
def on_sendheaders(self, conn, message): pass
def on_sendcmpct(self, conn, message): pass
def on_cmpctblock(self, conn, message): pass
def on_getblocktxn(self, conn, message): pass
def on_blocktxn(self, conn, message): pass
# More useful callbacks and functions for NodeConnCB's which have a single NodeConn
class SingleNodeConnCB(NodeConnCB):
def __init__(self):
NodeConnCB.__init__(self)
self.connection = None
self.ping_counter = 1
self.last_pong = msg_pong()
def add_connection(self, conn):
self.connection = conn
# Wrapper for the NodeConn's send_message function
def send_message(self, message):
self.connection.send_message(message)
def send_and_ping(self, message):
self.send_message(message)
self.sync_with_ping()
def on_pong(self, conn, message):
self.last_pong = message
# Sync up with the node
def sync_with_ping(self, timeout=30):
def received_pong():
return (self.last_pong.nonce == self.ping_counter)
self.send_message(msg_ping(nonce=self.ping_counter))
success = wait_until(received_pong, timeout=timeout)
self.ping_counter += 1
return success
# The actual NodeConn class
# This class provides an interface for a p2p connection to a specified node
class NodeConn(asyncore.dispatcher):
messagemap = {
b"version": msg_version,
b"verack": msg_verack,
b"addr": msg_addr,
b"alert": msg_alert,
b"inv": msg_inv,
b"getdata": msg_getdata,
b"getblocks": msg_getblocks,
b"tx": msg_tx,
b"block": msg_block,
b"getaddr": msg_getaddr,
b"ping": msg_ping,
b"pong": msg_pong,
b"headers": msg_headers,
b"getheaders": msg_getheaders,
b"reject": msg_reject,
b"mempool": msg_mempool,
b"feefilter": msg_feefilter,
b"sendheaders": msg_sendheaders,
b"sendcmpct": msg_sendcmpct,
b"cmpctblock": msg_cmpctblock,
b"getblocktxn": msg_getblocktxn,
b"blocktxn": msg_blocktxn
}
MAGIC_BYTES = {
"mainnet": b"\xfb\xc0\xb6\xdb", # mainnet
"testnet3": b"\xfc\xc1\xb7\xdc", # testnet3
"regtest": b"\xfa\xbf\xb5\xda", # regtest
}
def __init__(self, dstaddr, dstport, rpc, callback, net="regtest", services=NODE_NETWORK):
asyncore.dispatcher.__init__(self, map=mininode_socket_map)
self.log = logging.getLogger("NodeConn(%s:%d)" % (dstaddr, dstport))
self.dstaddr = dstaddr
self.dstport = dstport
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
self.sendbuf = b""
self.recvbuf = b""
self.ver_send = 209
self.ver_recv = 209
self.last_sent = 0
self.state = "connecting"
self.network = net
self.cb = callback
self.disconnect = False
self.nServices = 0
# stuff version msg into sendbuf
vt = msg_version()
vt.nServices = services
vt.addrTo.ip = self.dstaddr
vt.addrTo.port = self.dstport
vt.addrFrom.ip = "0.0.0.0"
vt.addrFrom.port = 0
self.send_message(vt, True)
print('MiniNode: Connecting to Bitdeal Node IP # ' + dstaddr + ':' \
+ str(dstport))
try:
self.connect((dstaddr, dstport))
except:
self.handle_close()
self.rpc = rpc
def show_debug_msg(self, msg):
self.log.debug(msg)
def handle_connect(self):
self.show_debug_msg("MiniNode: Connected & Listening: \n")
self.state = "connected"
def handle_close(self):
self.show_debug_msg("MiniNode: Closing Connection to %s:%d... "
% (self.dstaddr, self.dstport))
self.state = "closed"
self.recvbuf = b""
self.sendbuf = b""
try:
self.close()
except:
pass
self.cb.on_close(self)
def handle_read(self):
try:
t = self.recv(8192)
if len(t) > 0:
self.recvbuf += t
self.got_data()
except:
pass
def readable(self):
return True
def writable(self):
with mininode_lock:
length = len(self.sendbuf)
return (length > 0)
def handle_write(self):
with mininode_lock:
try:
sent = self.send(self.sendbuf)
except:
self.handle_close()
return
self.sendbuf = self.sendbuf[sent:]
def got_data(self):
try:
while True:
if len(self.recvbuf) < 4:
return
if self.recvbuf[:4] != self.MAGIC_BYTES[self.network]:
raise ValueError("got garbage %s" % repr(self.recvbuf))
if self.ver_recv < 209:
if len(self.recvbuf) < 4 + 12 + 4:
return
command = self.recvbuf[4:4+12].split(b"\x00", 1)[0]
msglen = struct.unpack("<i", self.recvbuf[4+12:4+12+4])[0]
checksum = None
if len(self.recvbuf) < 4 + 12 + 4 + msglen:
return
msg = self.recvbuf[4+12+4:4+12+4+msglen]
self.recvbuf = self.recvbuf[4+12+4+msglen:]
else:
if len(self.recvbuf) < 4 + 12 + 4 + 4:
return
command = self.recvbuf[4:4+12].split(b"\x00", 1)[0]
msglen = struct.unpack("<i", self.recvbuf[4+12:4+12+4])[0]
checksum = self.recvbuf[4+12+4:4+12+4+4]
if len(self.recvbuf) < 4 + 12 + 4 + 4 + msglen:
return
msg = self.recvbuf[4+12+4+4:4+12+4+4+msglen]
th = sha256(msg)
h = sha256(th)
if checksum != h[:4]:
raise ValueError("got bad checksum " + repr(self.recvbuf))
self.recvbuf = self.recvbuf[4+12+4+4+msglen:]
if command in self.messagemap:
f = BytesIO(msg)
t = self.messagemap[command]()
t.deserialize(f)
self.got_message(t)
else:
self.show_debug_msg("Unknown command: '" + command + "' " +
repr(msg))
except Exception as e:
print('got_data:', repr(e))
# import traceback
# traceback.print_tb(sys.exc_info()[2])
def send_message(self, message, pushbuf=False):
if self.state != "connected" and not pushbuf:
raise IOError('Not connected, no pushbuf')
self.show_debug_msg("Send %s" % repr(message))
command = message.command
data = message.serialize()
tmsg = self.MAGIC_BYTES[self.network]
tmsg += command
tmsg += b"\x00" * (12 - len(command))
tmsg += struct.pack("<I", len(data))
if self.ver_send >= 209:
th = sha256(data)
h = sha256(th)
tmsg += h[:4]
tmsg += data
with mininode_lock:
self.sendbuf += tmsg
self.last_sent = time.time()
def got_message(self, message):
if message.command == b"version":
if message.nVersion <= BIP0031_VERSION:
self.messagemap[b'ping'] = msg_ping_prebip31
if self.last_sent + 30 * 60 < time.time():
self.send_message(self.messagemap[b'ping']())
self.show_debug_msg("Recv %s" % repr(message))
self.cb.deliver(self, message)
def disconnect_node(self):
self.disconnect = True
class NetworkThread(Thread):
def run(self):
while mininode_socket_map:
# We check for whether to disconnect outside of the asyncore
# loop to workaround the behavior of asyncore when using
# select
disconnected = []
for fd, obj in mininode_socket_map.items():
if obj.disconnect:
disconnected.append(obj)
[ obj.handle_close() for obj in disconnected ]
asyncore.loop(0.1, use_poll=True, map=mininode_socket_map, count=1)
# An exception we can raise if we detect a potential disconnect
# (p2p or rpc) before the test is complete
class EarlyDisconnectError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
|
bitdeal/bitdeal
|
qa/rpc-tests/test_framework/mininode.py
|
Python
|
mit
| 54,390
|
from munch import munchify
import requests
import arrow
from . import BASE_URL
EVENTS_URL = (BASE_URL + "/events?page=1&limit=40&timezone=America/New_York"
"&sort_direction=ASC&sub_types=SportEvent,MediaEvent"
"&sort=eventDate&start_date={}")
SCHEDULE_URL = (BASE_URL + "/events?page=1&limit=30&timezone=America/New_York"
"&sort_direction=ASC&sub_types=SportEvent,MediaEvent"
"&sort=eventDate&sport_codes={}&include_paging=true"
"&map_method=map_schedule&seasons={}")
def events():
"""Fetch events for the current day"""
date = arrow.now().format('YYYY-MM-DD')
uri = EVENTS_URL.format(date)
return munchify(requests.get(uri).json())
def get_schedule(sport_code, year):
"""Get the yearly schedule for a sport code given year in YYYY format"""
uri = SCHEDULE_URL.format(sport_code, year)
data = requests.get(uri).json()["content"]
return munchify(data)
|
pennlabs/pennathletics
|
pennathletics/events.py
|
Python
|
mit
| 965
|
# -*- coding: utf-8 -*-
"""
Folium Tests
-------
"""
import pytest
import os
import json
try:
from unittest import mock
except ImportError:
import mock
import pandas as pd
import jinja2
from jinja2 import Environment, PackageLoader
import vincent
import folium
import base64
from folium.six import PY3
from folium.map import Popup, Marker, FitBounds, FeatureGroup
from folium.features import GeoJson, TopoJson, PolyLine, MultiPolyLine
from folium.colormap import ColorMap
from folium.plugins import ImageOverlay
rootpath = os.path.abspath(os.path.dirname(__file__))
def setup_data():
"""Import economic data for testing."""
with open(os.path.join(rootpath, 'us-counties.json'), 'r') as f:
get_id = json.load(f)
county_codes = [x['id'] for x in get_id['features']]
county_df = pd.DataFrame({'FIPS_Code': county_codes}, dtype=str)
# Read into Dataframe, cast to string for consistency.
df = pd.read_csv(os.path.join(rootpath, 'us_county_data.csv'),
na_values=[' '])
df['FIPS_Code'] = df['FIPS_Code'].astype(str)
# Perform an inner join, pad NA's with data from nearest county.
merged = pd.merge(df, county_df, on='FIPS_Code', how='inner')
return merged.fillna(method='pad')
def test_get_templates():
"""Test template getting."""
env = folium.utilities.get_templates()
assert isinstance(env, jinja2.environment.Environment)
class TestFolium(object):
"""Test class for the Folium library."""
def setup(self):
"""Setup Folium Map."""
with mock.patch('folium.element.uuid4') as uuid4:
uuid4().hex = '0' * 32
self.map = folium.Map(location=[45.5236, -122.6750], width=900,
height=400, max_zoom=20, zoom_start=4)
self.env = Environment(loader=PackageLoader('folium', 'templates'))
def test_init(self):
"""Test map initialization."""
assert self.map.get_name() == 'map_00000000000000000000000000000000'
assert self.map.get_root() == self.map._parent
assert self.map.location == [45.5236, -122.6750]
assert self.map.zoom_start == 4
assert self.map.max_lat == 90
assert self.map.min_lat == -90
assert self.map.max_lon == 180
assert self.map.min_lon == -180
assert self.map.position == 'relative'
assert self.map.height == (400, 'px')
assert self.map.width == (900, 'px')
assert self.map.left == (0, '%')
assert self.map.top == (0, '%')
assert self.map.to_dict() == {
"name": "Map",
"id": "00000000000000000000000000000000",
"children": {
"openstreetmap": {
"name": "TileLayer",
"id": "00000000000000000000000000000000",
"children": {}
}
}
}
def test_cloudmade(self):
"""Test cloudmade tiles and the API key."""
with pytest.raises(ValueError):
folium.Map(location=[45.5236, -122.6750], tiles='cloudmade')
map = folium.Map(location=[45.5236, -122.6750], tiles='cloudmade',
API_key='###')
cloudmade = 'http://{s}.tile.cloudmade.com/###/997/256/{z}/{x}/{y}.png'
assert map._children['cloudmade'].tiles == cloudmade
bounds = map.get_bounds()
assert bounds == [[None, None], [None, None]], bounds
def test_builtin_tile(self):
"""Test custom maptiles."""
default_tiles = ['OpenStreetMap', 'Stamen Terrain', 'Stamen Toner']
for tiles in default_tiles:
map = folium.Map(location=[45.5236, -122.6750], tiles=tiles)
tiles = ''.join(tiles.lower().strip().split())
url = 'tiles/{}/tiles.txt'.format
attr = 'tiles/{}/attr.txt'.format
url = map._env.get_template(url(tiles)).render()
attr = map._env.get_template(attr(tiles)).render()
assert map._children[tiles].tiles == url
assert map._children[tiles].attr == attr
bounds = map.get_bounds()
assert bounds == [[None, None], [None, None]], bounds
def test_custom_tile(self):
"""Test custom tile URLs."""
url = 'http://{s}.custom_tiles.org/{z}/{x}/{y}.png'
attr = 'Attribution for custom tiles'
with pytest.raises(ValueError):
folium.Map(location=[45.5236, -122.6750], tiles=url)
map = folium.Map(location=[45.52, -122.67], tiles=url, attr=attr)
assert map._children[url].tiles == url
assert map._children[url].attr == attr
bounds = map.get_bounds()
assert bounds == [[None, None], [None, None]], bounds
def test_wms_layer(self):
"""Test WMS layer URLs."""
map = folium.Map(location=[44, -73], zoom_start=3)
wms_url = 'http://gis.srh.noaa.gov/arcgis/services/NDFDTemps/'
wms_url += 'MapServer/WMSServer'
wms_name = "Temperature"
wms_layers = 16
wms_format = "image/png"
map.add_wms_layer(wms_name=wms_name,
wms_url=wms_url,
wms_format=wms_format,
wms_layers=wms_layers,
wms_transparent=True)
wms_temp = self.env.get_template('wms_layer.js')
wms = wms_temp.render({'wms_name': map._children[wms_name].get_name(),
'wms_url': wms_url,
'wms_format': wms_format,
'wms_layer_names': wms_layers,
'wms_transparent': 'true'})
assert (''.join(wms.split())[:-1] in
''.join(map.get_root().render().split()))
bounds = map.get_bounds()
assert bounds == [[None, None], [None, None]], bounds
def test_feature_group(self):
"""Test FeatureGroup."""
map = folium.Map()
feature_group = FeatureGroup()
feature_group.add_children(Marker([45, -30], popup=Popup('-30')))
feature_group.add_children(Marker([45, 30], popup=Popup('30')))
map.add_children(feature_group)
map.add_children(folium.map.LayerControl())
map._repr_html_()
bounds = map.get_bounds()
assert bounds == [[45, -30], [45, 30]], bounds
def test_simple_marker(self):
"""Test simple marker addition."""
self.map = folium.Map(location=[44, -73], zoom_start=3)
mark_templ = self.env.get_template('simple_marker.js')
popup_templ = self.env.get_template('simple_popup.js')
# Single Simple marker.
self.map.simple_marker(location=[45.50, -122.7])
marker_1 = list(self.map._children.values())[-1]
mark_1 = mark_templ.render({'marker': marker_1.get_name(),
'lat': 45.50,
'lon': -122.7,
'icon': "{icon:new L.Icon.Default()}"})
assert (''.join(mark_1.split())[:-1] in
''.join(self.map.get_root().render().split()))
bounds = self.map.get_bounds()
assert bounds == [[45.5, -122.7], [45.5, -122.7]], bounds
# Test Simple marker addition.
self.map.simple_marker(location=[45.60, -122.8], popup='Hi')
marker_2 = list(self.map._children.values())[-1]
popup_2 = list(marker_2._children.values())[-1]
html_2 = list(popup_2.html._children.values())[0]
mark_2 = mark_templ.render({'marker': marker_2.get_name(),
'lat': 45.60,
'lon': -122.8,
'icon': "{icon:new L.Icon.Default()}"})
pop_2 = popup_templ.render({'pop_name': popup_2.get_name(),
'pop_txt': 'Hi',
'html_name': html_2.get_name(),
'width': 300})
# assert self.map.mark_cnt['simple'] == 2
assert (''.join(mark_2.split())[:-1] in
''.join(self.map.get_root().render().split()))
assert (''.join(pop_2.split())[:-1] in
''.join(self.map.get_root().render().split()))
# assert self.map.template_vars['custom_markers'][1][2] == pop_2
# Test no popup.
self.map.simple_marker(location=[45.60, -122.8])
for child in list(self.map._children.values())[-1]._children.values():
assert not isinstance(child, Popup)
bounds = self.map.get_bounds()
assert bounds == [[45.5, -122.8], [45.6, -122.7]], bounds
def test_circle_marker(self):
"""Test circle marker additions."""
self.map = folium.Map(location=[45.60, -122.8])
circ_templ = self.env.get_template('circle_marker.js')
# Single Circle marker.
self.map.circle_marker(location=[45.60, -122.8], popup='Hi')
marker = list(self.map._children.values())[-1]
circle_1 = circ_templ.render({'circle': marker.get_name(),
'lat': 45.60,
'lon': -122.8, 'radius': 500,
'line_color': 'black',
'fill_color': 'black',
'fill_opacity': 0.6})
assert (''.join(circle_1.split())[:-1] in
''.join(self.map.get_root().render().split()))
# Second circle marker.
self.map.circle_marker(location=[45.70, -122.9], popup='Hi')
marker = list(self.map._children.values())[-1]
circle_2 = circ_templ.render({'circle': marker.get_name(),
'lat': 45.70,
'lon': -122.9, 'radius': 500,
'line_color': 'black',
'fill_color': 'black',
'fill_opacity': 0.6})
assert (''.join(circle_2.split())[:-1] in
''.join(self.map.get_root().render().split()))
bounds = self.map.get_bounds()
assert bounds == [[45.6, -122.9], [45.7, -122.8]], bounds
def test_poly_marker(self):
"""Test polygon marker."""
self.map = folium.Map(location=[45.5, -122.5])
poly_temp = self.env.get_template('poly_marker.js')
self.map.polygon_marker(location=[45.5, -122.5])
marker = list(self.map._children.values())[-1]
polygon = poly_temp.render({'marker': marker.get_name(),
'lat': 45.5,
'lon': -122.5,
'line_color': 'black',
'line_opacity': 1,
'line_weight': 2,
'fill_color': 'blue',
'fill_opacity': 1,
'num_sides': 4,
'rotation': 0,
'radius': 15})
assert ((''.join(polygon.split()))[-1] in
''.join(self.map.get_root().render().split()))
bounds = self.map.get_bounds()
assert bounds == [[45.5, -122.5], [45.5, -122.5]], bounds
def test_latlng_pop(self):
"""Test lat/lon popovers."""
self.map.lat_lng_popover()
pop = list(self.map._children.values())[-1]
tmpl = 'lat_lng_popover.js'
pop_templ = self.env.get_template(tmpl).render(popup=pop.get_name(),
map=self.map.get_name())
assert ((''.join(pop_templ.split()))[:-1] in
''.join(self.map.get_root().render().split()))
bounds = self.map.get_bounds()
assert bounds == [[None, None], [None, None]], bounds
def test_click_for_marker(self):
"""Test click for marker functionality."""
# Lat/lon popover.
self.map = folium.Map([46, 3])
self.map.click_for_marker()
click_templ = self.env.get_template('click_for_marker.js')
click = click_templ.render({'popup': ('"Latitude: " + lat + "<br>'
'Longitude: " + lng '),
'map': self.map.get_name()})
assert ((''.join(click.split()))[:-1] in
''.join(self.map.get_root().render().split()))
# Custom popover.
self.map.click_for_marker(popup='Test')
click_templ = self.env.get_template('click_for_marker.js')
click = click_templ.render({'popup': '"Test"',
'map': self.map.get_name()})
assert ((''.join(click.split()))[:-1] in
''.join(self.map.get_root().render().split()))
bounds = self.map.get_bounds()
assert bounds == [[None, None], [None, None]], bounds
def test_vega_popup(self):
"""Test vega popups."""
self.map = folium.Map([45.60, -122.8])
vega_templ = self.env.get_template('vega_marker.js')
vega_parse = self.env.get_template('vega_parse.js')
vis = vincent.Bar(width=675 - 75, height=350 - 50, no_data=True)
data = json.loads(vis.to_json())
self.map.simple_marker(location=[45.60, -122.8],
popup=(vis, 'vis.json'))
marker = list(self.map._children.values())[-1]
popup = list(marker._children.values())[-1]
vega = list(popup._children.values())[-1]
vega_str = vega_templ.render({'vega': vega.get_name(),
'popup': popup.get_name(),
'marker': marker.get_name(),
'vega_json': json.dumps(data),
})
out = ''.join(self.map.get_root().render().split())
assert ''.join(vega_parse.render().split()) in out
assert (''.join(vega_str.split()))[:-1] in out
bounds = self.map.get_bounds()
assert bounds == [[45.6, -122.8], [45.6, -122.8]], bounds
def test_geo_json_simple(self):
"""Test geojson method."""
# No data binding.
self.map = folium.Map([43, -100], zoom_start=4)
path = os.path.join(rootpath, 'us-counties.json')
self.map.geo_json(geo_path=path)
self.map._repr_html_()
bounds = self.map.get_bounds()
assert bounds == [[18.948267, -171.742517],
[71.285909, -66.979601]], bounds
def test_geo_json_str(self):
# No data binding.
self.map = folium.Map([43, -100], zoom_start=4)
path = os.path.join(rootpath, 'us-counties.json')
data = json.load(open(path))
for feature in data['features']:
feature.setdefault('properties', {}).setdefault('style', {}).update({ # noqa
'color': 'black',
'opactiy': 1,
'fillOpacity': 0.6,
'weight': 1,
'fillColor': 'blue',
})
self.map.geo_json(geo_str=json.dumps(data))
geo_json = [x for x in self.map._children.values() if
isinstance(x, GeoJson)][0]
out = ''.join(self.map._parent.render().split())
# Verify the geo_json object
obj_temp = jinja2.Template("""
var {{ this.get_name() }} = L.geoJson({{ this.style_data() }})
.addTo({{ this._parent.get_name() }});
{{ this.get_name() }}.setStyle(function(feature) {return feature.properties.style;});
""") # noqa
obj = obj_temp.render(this=geo_json, json=json)
assert ''.join(obj.split())[:-1] in out
bounds = self.map.get_bounds()
assert bounds == [[18.948267, -171.742517],
[71.285909, -66.979601]], bounds
def test_geo_json_bad_color(self):
"""Test geojson method."""
self.map = folium.Map([43, -100], zoom_start=4)
path = os.path.join(rootpath, 'us-counties.json')
# Data binding incorrect color value error.
data = setup_data()
with pytest.raises(ValueError):
self.map.geo_json(path, data=data,
columns=['FIPS_Code', 'Unemployed_2011'],
key_on='feature.id', fill_color='blue')
bounds = self.map.get_bounds()
assert bounds == [[None, None], [None, None]], bounds
def test_geo_json_bad_threshold_scale(self):
"""Test geojson method."""
self.map = folium.Map([43, -100], zoom_start=4)
path = os.path.join(rootpath, 'us-counties.json')
# Data binding threshold_scale too long.
data = setup_data()
with pytest.raises(ValueError):
self.map.geo_json(path, data=data,
columns=['FIPS_Code', 'Unemployed_2011'],
key_on='feature.id',
threshold_scale=[1, 2, 3, 4, 5, 6, 7],
fill_color='YlGnBu')
bounds = self.map.get_bounds()
assert bounds == [[None, None], [None, None]], bounds
def test_geo_json_data_binding(self):
"""Test geojson method."""
data = setup_data()
self.map = folium.Map([43, -100], zoom_start=4)
path = os.path.join(rootpath, 'us-counties.json')
# With DataFrame data binding, default threshold scale.
self.map.choropleth(geo_path=path, data=data,
threshold_scale=[4.0, 1000.0, 3000.0,
5000.0, 9000.0],
columns=['FIPS_Code', 'Unemployed_2011'],
key_on='feature.id', fill_color='YlGnBu',
reset=True)
out = self.map._parent.render()
# Verify the colorscale
domain = [4.0, 1000.0, 3000.0, 5000.0, 9000.0]
palette = folium.utilities.color_brewer('YlGnBu')
d3range = palette[0: len(domain) + 2]
colorscale_obj = [val for key, val in self.map._children.items() if
isinstance(val, ColorMap)][0]
colorscale_temp = self.env.get_template('color_scale.js')
colorscale = colorscale_temp.render({
'this': colorscale_obj,
'domain': domain,
'range': d3range})
assert ''.join(colorscale.split())[:-1] in ''.join(out.split())
bounds = self.map.get_bounds()
assert bounds == [[18.948267, -171.742517],
[71.285909, -66.979601]], bounds
def test_topo_json(self):
"""Test geojson method."""
self.map = folium.Map([43, -100], zoom_start=4)
# Adding TopoJSON as additional layer.
path = os.path.join(rootpath, 'or_counties_topo.json')
self.map.geo_json(geo_path=path, topojson='objects.or_counties_geo')
out = self.map._parent.render()
# Verify TopoJson
topo_json = [val for key, val in self.map._children.items()
if isinstance(val, TopoJson)][0]
topojson_str = topo_json._template.module.script(topo_json)
assert ''.join(topojson_str.split())[:-1] in ''.join(out.split())
bounds = self.map.get_bounds()
assert bounds == [[-124.56617536999985, 41.99187135900012],
[-116.46422312599977, 46.28768217800006]], bounds
def test_map_build(self):
"""Test map build."""
# Standard map.
self.setup()
out = self.map._parent.render()
html_templ = self.env.get_template('fol_template.html')
attr = ('Data by <a href="http://openstreetmap.org">OpenStreetMap'
'</a>,under '
'<a href="http://www.openstreetmap.org/copyright">ODbL</a>.')
tile_layers = [
{'id': 'tile_layer_'+'0'*32,
'address': 'https://{s}.tile.openstreetmap.org/{z}/{x}/{y}.png',
'attr': attr,
'max_zoom': 20,
'min_zoom': 1,
'detect_retina': False,
}]
tmpl = {'map_id': 'map_' + '0' * 32,
'lat': 45.5236, 'lon': -122.675,
'size': 'width: 900.0px; height: 400.0px;',
'zoom_level': 4,
'min_lat': -90,
'max_lat': 90,
'min_lon': -180,
'max_lon': 180,
'tile_layers': tile_layers,
'crs': 'EPSG3857',
}
HTML = html_templ.render(tmpl, plugins={})
assert ''.join(out.split()) == ''.join(HTML.split())
def test_tile_attr_unicode(self):
"""Test tile attribution unicode
Test not cover b'юникод'
because for python 3 bytes can only contain ASCII literal characters.
"""
if not PY3:
map = folium.Map(location=[45.5236, -122.6750],
tiles='test', attr=b'unicode')
map._parent.render()
else:
map = folium.Map(location=[45.5236, -122.6750],
tiles='test', attr=u'юникод')
map._parent.render()
map = folium.Map(location=[45.5236, -122.6750],
tiles='test', attr='юникод')
map._parent.render()
def test_create_map(self):
"""Test create map."""
map = folium.Map(location=[45.5236, -122.6750],
tiles='test', attr='юникод')
# Add json data.
path = os.path.join(rootpath, 'us-counties.json')
data = setup_data()
map.geo_json(geo_path=path, data=data,
columns=['FIPS_Code', 'Unemployed_2011'],
key_on='feature.id', fill_color='YlGnBu',
reset=True)
# Add plugins.
map.polygon_marker(location=[45.5, -122.5])
# Test write.
map._parent.render()
map.save('map.html')
def test_line(self):
"""Test line."""
line_temp = self.env.get_template('polyline.js')
line_opts = {
'color': 'blue',
'weight': 2,
'opacity': 1
}
locations = [
[[45.5236, -122.6750], [45.5236, -122.6751]],
[[45.5237, -122.6750], [45.5237, -122.6751]],
[[45.5238, -122.6750], [45.5238, -122.6751]]
]
self.setup()
self.map.line(locations=locations,
line_color=line_opts['color'],
line_weight=line_opts['weight'],
line_opacity=line_opts['opacity'])
polyline = [val for key, val in self.map._children.items()
if isinstance(val, PolyLine)][0]
out = self.map._parent.render()
line_rendered = line_temp.render({'line': 'line_1',
'this': polyline,
'locations': locations,
'options': line_opts})
assert ''.join(line_rendered.split()) in ''.join(out.split())
bounds = self.map.get_bounds()
assert bounds == [[45.5236, -122.6751], [45.5238, -122.675]], bounds
def test_multi_polyline(self):
"""Test multi_polyline."""
multiline_temp = self.env.get_template('multi_polyline.js')
multiline_opts = {'color': 'blue',
'weight': 2,
'opacity': 1}
locations = [[[45.5236, -122.6750], [45.5236, -122.6751]],
[[45.5237, -122.6750], [45.5237, -122.6751]],
[[45.5238, -122.6750], [45.5238, -122.6751]]]
self.setup()
self.map.multiline(locations=locations,
line_color=multiline_opts['color'],
line_weight=multiline_opts['weight'],
line_opacity=multiline_opts['opacity'])
multipolyline = [val for key, val in self.map._children.items()
if isinstance(val, MultiPolyLine)][0]
out = self.map._parent.render()
multiline_rendered = multiline_temp.render({'multiline': 'multiline_1',
'this': multipolyline,
'locations': locations,
'options': multiline_opts})
assert ''.join(multiline_rendered.split()) in ''.join(out.split())
bounds = self.map.get_bounds()
assert bounds == [[45.5236, -122.6751], [45.5238, -122.675]], bounds
def test_fit_bounds(self):
"""Test fit_bounds."""
bounds = ((52.193636, -2.221575), (52.636878, -1.139759))
self.setup()
self.map.fit_bounds(bounds)
fitbounds = [val for key, val in self.map._children.items() if
isinstance(val, FitBounds)][0]
out = self.map._parent.render()
fit_bounds_tpl = self.env.get_template('fit_bounds.js')
fit_bounds_rendered = fit_bounds_tpl.render({
'bounds': json.dumps(bounds),
'this': fitbounds,
'fit_bounds_options': {}, })
assert ''.join(fit_bounds_rendered.split()) in ''.join(out.split())
self.setup()
self.map.fit_bounds(bounds, max_zoom=15, padding=(3, 3))
fitbounds = [val for key, val in self.map._children.items() if
isinstance(val, FitBounds)][0]
out = self.map._parent.render()
fit_bounds_tpl = self.env.get_template('fit_bounds.js')
fit_bounds_rendered = fit_bounds_tpl.render({
'bounds': json.dumps(bounds),
'fit_bounds_options': json.dumps({'maxZoom': 15,
'padding': (3, 3), },
sort_keys=True),
'this': fitbounds,
})
assert ''.join(fit_bounds_rendered.split()) in ''.join(out.split())
bounds = self.map.get_bounds()
assert bounds == [[None, None], [None, None]], bounds
def test_image_overlay(self):
"""Test image overlay."""
# from numpy.random import random
from folium.utilities import write_png
# import base64
data = [[[1, 0, 0, 1], [0, 0, 0, 0], [0, 0, 0, 0]],
[[1, 1, 0, 0.5], [0, 0, 1, 1], [0, 0, 1, 1]]]
min_lon, max_lon, min_lat, max_lat = -90.0, 90.0, -180.0, 180.0
self.setup()
image_url = 'data.png'
self.map.image_overlay(data, filename=image_url)
out = self.map._parent.render()
imageoverlay = [val for key, val in self.map._children.items() if
isinstance(val, ImageOverlay)][0]
png_str = write_png(data)
# with open('data.png', 'wb') as f:
# f.write(png_str)
png = "data:image/png;base64,{}".format
inline_image_url = png(base64.b64encode(png_str).decode('utf-8'))
image_tpl = self.env.get_template('image_layer.js')
image_name = 'Image_Overlay'
image_opacity = 0.25
image_bounds = [[min_lon, min_lat], [max_lon, max_lat]]
image_rendered = image_tpl.render({'image_name': image_name,
'this': imageoverlay,
'image_url': image_url,
'image_bounds': image_bounds,
'image_opacity': image_opacity})
assert ''.join(image_rendered.split()) in ''.join(out.split())
self.setup()
self.map.image_overlay(data, mercator_project=True)
out = self.map._parent.render()
imageoverlay = [val for key, val in self.map._children.items() if
isinstance(val, ImageOverlay)][0]
image_rendered = image_tpl.render({'image_name': image_name,
'this': imageoverlay,
'image_url': inline_image_url,
'image_bounds': image_bounds,
'image_opacity': image_opacity})
assert ''.join(image_rendered.split()) in ''.join(out.split())
bounds = self.map.get_bounds()
assert bounds == [[-90.0, -180.0], [90.0, 180.0]], bounds
def test_custom_icon(self):
"""Test CustomIcon."""
self.setup()
icon_image = "http://leafletjs.com/docs/images/leaf-green.png"
shadow_image = "http://leafletjs.com/docs/images/leaf-shadow.png"
self.map = folium.Map([45, -100], zoom_start=4)
i = folium.features.CustomIcon(icon_image,
icon_size=(38, 95),
icon_anchor=(22, 94),
shadow_image=shadow_image,
shadow_size=(50, 64),
shadow_anchor=(4, 62),
popup_anchor=(-3, -76),)
mk = folium.map.Marker([45, -100], icon=i,
popup=folium.map.Popup('Hello'))
self.map.add_children(mk)
self.map._parent.render()
bounds = self.map.get_bounds()
assert bounds == [[45, -100], [45, -100]], bounds
def test_tile_layer(self):
mapa = folium.Map([48., 5.], tiles='stamentoner', zoom_start=6)
layer = 'http://otile1.mqcdn.com/tiles/1.0.0/map/{z}/{x}/{y}.png'
mapa.add_children(folium.map.TileLayer(layer, name='MapQuest',
attr='attribution'))
mapa.add_children(folium.map.TileLayer(layer,
name='MapQuest2',
attr='attribution2',
overlay=True))
mapa.add_children(folium.map.LayerControl())
mapa._repr_html_()
bounds = self.map.get_bounds()
assert bounds == [[None, None], [None, None]], bounds
|
BibMartin/folium
|
tests/test_folium.py
|
Python
|
mit
| 30,299
|
import sys
import socket
import time
import os
import getpass
def handshake(address):
print ("Attempting handshake...")
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) #create socket
port = 5010 #specify port
s.connect((address,port)) #connect
s.sendall('110') #send data
reply = s.recv(512) #save reply.
if reply == '111':
print("Connection made")
return s
elif reply != '111':
print("Invalid handshake reply.")
s.close() #close socket
sys.exit()
def send_files(files, socket, repname):
socket.sendall(str(len(files))) #send number of files
print ("files to send: "),str(files)
for x in range(0, len(files)):
if os.path.exists(files[x]): #make sure the file actually exists
if repname == 1:
dat = os.path.abspath(files[x]) #send the paths for each file
print ("Sending '"),dat,("'...")
dat = dat.replace(getpass.getuser(), "%%%%") #if replace name set then replace username with %
else:
dat = os.path.abspath(files[x]) #send the paths for each file
print ("Sending "),dat
socket.send(dat)
time.sleep(0.5)
f = open(files[x], 'rb') #open file for read
data = f.read() #read data in
size = len(data) #get size of data
print("Sending "),size,(" bytes...")
socket.send(str(size)) #send size of file
time.sleep(0.5)
socket.send(data) #send data
time.sleep(0.5)
msg = socket.recv(512)
if msg == '1':
print ("Sending of file "),x+1 ,("Was succsessful")
time.sleep(0.5)
dat = ""
else:
socket.send("null") #If file doesnt exist, tell the server.
time.sleep(0.5)
print("File '"), files[x], ("' does not exist. Ignoring it.")
return
def send(files, address):
repname = 1
socket = handshake(address) #call handshake process.
send_files(files, socket, repname)
sys.exit()
|
Samathy/dotsync
|
src/send.py
|
Python
|
mit
| 2,165
|
"""
Simple User model creation tests.
"""
import csv
import datetime
import json
import tempfile
from django.core.management import call_command
from django.core.urlresolvers import reverse_lazy
from django.test import TransactionTestCase
from mixer.backend.django import mixer
from .base import BaseAPITestCase
from .testdata import fileobj_video
from contentcuration.models import DEFAULT_CONTENT_DEFAULTS
from contentcuration.models import Invitation
from contentcuration.models import User
from contentcuration.utils.csv_writer import _format_size
from contentcuration.utils.csv_writer import write_user_csv
from contentcuration.views.users import send_invitation_email
class UserPoliciesCreationTestCase(TransactionTestCase):
def setUp(self):
call_command("loadconstants")
def create_user(self):
return User.objects.create(
email="mrtest@testy.com",
first_name="Mr.",
last_name="Test",
is_admin=False,
is_staff=False,
date_joined=datetime.datetime.now(),
policies=None,
)
def test_user_with_empty_policy_is_created_successfully(self):
"""
This test should not raise any error when creating a user
with no policy.
"""
assert self.create_user()
def test_content_defaults_is_dict(self):
mrtest = self.create_user()
mrtest.save()
assert mrtest.content_defaults == DEFAULT_CONTENT_DEFAULTS
mrtest2 = User.objects.get(email="mrtest@testy.com")
assert mrtest2.content_defaults == DEFAULT_CONTENT_DEFAULTS
class UserInvitationTestCase(BaseAPITestCase):
def test_user_invitation_dedupe(self):
self.channel.editors.add(self.user)
data = json.dumps({"user_email": "test@testing.com",
"channel_id": self.channel.pk,
"share_mode": "edit",
})
request = self.create_post_request(reverse_lazy("send_invitation_email"), data=data, content_type='application/json')
response = send_invitation_email(request)
self.assertEqual(response.status_code, 200)
self.assertEqual(User.objects.filter(email__iexact="test@testing.com").count(), 1)
data = json.dumps({"user_email": "TeSt@TeStIng.com",
"channel_id": self.channel.pk,
"share_mode": "edit",
})
request = self.create_post_request(reverse_lazy("send_invitation_email"), data=data, content_type='application/json')
response = send_invitation_email(request)
self.assertEqual(response.status_code, 200)
self.assertEqual(User.objects.filter(email__iexact="test@testing.com").count(), 1)
def test_editors_can_access_invitations(self):
"""
This checks that editors for a channel can still access invitations for the same channel
even if they weren't the ones who sent them
"""
guestuser = User.objects.create(email="guest@test.com")
testuser = User.objects.create(email="testuser@test.com")
testviewonlyuser = User.objects.create(email="testviewonlyuser@test.com")
invitation = mixer.blend(Invitation, channel=self.channel, sender=self.user, invited=guestuser)
self.channel.editors.add(testuser)
self.channel.viewers.add(testviewonlyuser)
# Editors should have access
self.client.force_authenticate(testuser)
response = self.get('/api/invitation/{}'.format(invitation.pk))
self.assertEqual(response.status_code, 200)
# Viewers shoudl have access
self.client.force_authenticate(testviewonlyuser)
response = self.get('/api/invitation/{}'.format(invitation.pk))
self.assertEqual(response.status_code, 200)
class UserAccountTestCase(BaseAPITestCase):
def test_user_csv_export(self):
videos = [fileobj_video() for i in range(10)]
for video in videos:
video.uploaded_by = self.user
video.save()
with tempfile.NamedTemporaryFile(suffix=".csv") as tempf:
write_user_csv(self.user, path=tempf.name)
with open(tempf.name, 'rb') as csv_file:
reader = csv.reader(csv_file, delimiter=',')
for index, row in enumerate(reader):
if index == 0:
self.assertEqual(row, ['Channel', 'Title', 'Kind', 'Filename', 'File Size',
'URL', 'Description', 'Author', 'Language',
'License', 'License Description', 'Copyright Holder'])
else:
self.assertIn(videos[index-1].original_filename, row)
self.assertIn(_format_size(videos[index-1].file_size), row)
self.assertEqual(index, len(videos))
|
fle-internal/content-curation
|
contentcuration/contentcuration/tests/test_user.py
|
Python
|
mit
| 4,941
|
__author__ = 'metjush'
# Implementation of Classification Tree Bagging
# ============================================
# This Bag of Trees is built on the Classification Tree object implemented in classtree.py
#
# It uses bootstrap aggregating to grow the forest
#
# The primary parameters to input are the number of trees to grow,
# the number of examples to select for each iteration, and the maximum depth of each forest
import numpy as np
from ClassTree import ClassificationTree
from scipy import stats
import warnings
class TreeBagger:
def __init__(self, n_trees=50, depth_limit=None, sample_fraction=0.75, impurity="gini"):
self.n_trees = n_trees
self.depth_limit = depth_limit if depth_limit in set({int, float, np.int64, np.float64}) else np.inf
self.fraction = sample_fraction
self.trees = [0]*n_trees
self.trained = False
self.impurity = impurity
def __untrain(self):
self.trained = False
self.trees = [0]*self.n_trees
print("Retraining")
#__numpify() takes a regular python list and turns it into a numpy array
def __numpify(self, array):
numpied = np.array(array)
if numpied.dtype in ['int64', 'float64']:
return numpied
else:
return False
# train() trains the Bagged Forest with input numpy arrays X and y
def train(self, X, y):
#check dimensions
if not len(X) == len(y):
raise IndexError("The number of samples in X and y do not match")
#check if X and y are numpy arrays
if type(X) is not np.ndarray:
X = self.__numpify(X)
if not X:
raise TypeError("input dataset X is not a valid numeric array")
if type(y) is not np.ndarray:
y = self.__numpify(y)
if not y:
raise TypeError("input label vector y is not a valid numeric array")
#check if trained
if self.trained:
self.__untrain()
indices = np.arange(len(X))
#determine the size of the bootstrap sample
strapsize = np.int(len(X)*self.fraction)
for t in xrange(self.n_trees):
#creat a new classification tree
tree = ClassificationTree(depth_limit=self.depth_limit, impurity=self.impurity)
#bootstrap a sample
bootstrap = np.random.choice(indices, strapsize)
Xstrap = X[bootstrap,:]
ystrap = y[bootstrap]
#train the t-th tree with the strapped sample
tree.train(Xstrap,ystrap)
self.trees[t] = tree
self.trained = True
print("%d trees grown" % self.n_trees)
# predict() uses a trained Bagged Forest to predict labels for a supplied numpy array X
# returns a one-dimensional vector of predictions, which is selected by a plurality
# vote from all the bagged trees
def predict(self, X):
if not self.trained:
raise RuntimeError("The bagged forest classifier hasn't been trained yet")
#get predictions from each tree
#combine predictions into one matrix
#get the mode of predictions for each sample
prediction_matrix = np.zeros((len(X), self.n_trees))
for t in xrange(self.n_trees):
pred = self.trees[t].predict(X)
prediction_matrix[:,t] = pred
final_vote = stats.mode(prediction_matrix, axis=1)[0]
return final_vote.flatten()
# evaluate() is built on top of predict() to also score the generated prediction
# the methods are the same as with the individual tree
# the default method is the F1 score
# alternatives are classification accuracy and Matthews correlation coefficient
def evaluate(self, X, y, method = 'f1'):
yhat = self.predict(X)
accurate = y == yhat
positive = np.sum(y == 1)
hatpositive = np.sum(yhat == 1)
tp = np.sum(yhat[accurate] == 1)
#F1 score
if method == 'f1':
recall = 1.*tp/positive if positive > 0 else 0.
precision = 1.*tp/hatpositive if hatpositive > 0 else 0.
f1 = (2.*precision*recall)/(precision+recall) if (precision+recall) > 0 else 0.
return f1
#simple accuracy measure
elif method == 'acc':
return (1.*np.sum(accurate))/len(yhat)
#matthews correlation coefficient
elif method == 'matthews':
tn = np.sum(yhat[accurate] == 0)
fp = np.sum(yhat[np.invert(accurate)] == 1)
fn = np.sum(yhat[np.invert(accurate)] == 0)
denominator = np.sqrt( (tp+fp)*(tp+fn)*(tn+fp)*(tn*fn) )
mat = 1.*((tp*tn)-(fp*fn)) / denominator if denominator > 0 else 0.
return mat
else:
warnings.warn("Wrong evaluation method specified, defaulting to F1 score", RuntimeWarning)
return self.evaluate(X,y)
# cross_val() implements cross validation for training Bagged Forests
# for each fold (default = 1), it splits the input dataset X,y by the
# split parameter (default = 0.3), trains the Bag on the training split
# and evaluates it on the cross-val split, using the provided method
# (defaults to F1)
def cross_val(self, X, y, split = 0.3, method = 'f1', folds = 1):
indices = np.arange(len(X))
set_ind = set(indices)
size = np.int(len(X)*(1-split))
scores = np.zeros(folds)
for f in xrange(folds):
train = np.random.choice(indices, size, replace=False)
set_train = set(train)
set_test = list(set_ind.difference(set_train))
Xtrain = X[train, :]
ytrain = y[train]
Xtest = X[set_test, :]
ytest = y[set_test]
self.train(Xtrain,ytrain)
scores[f] = self.evaluate(Xtest, ytest, method)
print(scores[f])
return scores
|
metjush/decision_tree
|
decision_tree/ClassTreeBagging.py
|
Python
|
mit
| 5,931
|
class TaxBracket(object):
"""
creates a tax bracket object
verifies the bracket integrity
allows for passing calculations to the bracket
"""
def __init__(self, bracket: list, deduction: float) -> None:
self.__validate_bracket(bracket)
self.deduction = deduction
self.bracket = bracket
def estimate_net_income(self, gross_income: float) -> float:
adj_gross_income = float(gross_income) - float(self.deduction)
total_burden = TaxBracket.__calculate_total_tax_burden(self.bracket, adj_gross_income)
net_income = adj_gross_income - total_burden
return round(net_income, 2)
def __validate_bracket(self, bracket: dict) -> None:
self.__verify_bracket_lenth(bracket)
self.__verify_bracket_set_types(bracket)
self.__verify_set_lengths(bracket)
@staticmethod
def __verify_bracket_lenth(bracket: list) -> None:
# verify length of the bracket is at least 1
if len(bracket) < 1:
raise ValueError('Bracket has no values')
@staticmethod
def __verify_set_lengths(bracket: list) -> None:
# verify bracket fields between 1 and 3 entries long
for set in bracket:
set_length = len(set)
if set_length < 1 or set_length > 3:
set_string = ' '.join(str(x) for x in set)
raise ValueError('Set length Failure in %s' % set_string)
@staticmethod
def __verify_bracket_set_types(bracket: list) -> None:
# verify bracket fields between 1 and 3 entries long
for set in bracket:
for s in set:
try:
isinstance(float(s), float)
except:
raise TypeError('Set Type Failure')
@staticmethod
def __calculate_tax_burden(set: list, adj_gross_income: float) -> float:
adj_gross_income_deduction = 0
set_length = len(set)
percentile_f = float(set[0]) / 100
adj_gross_income_f = float(adj_gross_income)
# applies the adj_gross_income cap to the total amount
if set_length == 3:
cap = float(set[2])
if adj_gross_income_f > cap:
adj_gross_income_f = cap
# subtracts the minimum if larger number drops it to zero
if set_length >= 2:
set_min = float(set[1])
if adj_gross_income_f > set_min:
adj_gross_income_f = adj_gross_income_f - set_min
else:
adj_gross_income_f = 0.00
# Applies the percentage
if set_length >= 1:
adj_gross_income_deduction = adj_gross_income_f * percentile_f
return round(adj_gross_income_deduction, 2)
@staticmethod
def __calculate_total_tax_burden(bracket: list, adj_gross_income: float) -> float:
burden = 0.00
for set in bracket:
set_burden = TaxBracket.__calculate_tax_burden(set=set,adj_gross_income=adj_gross_income)
burden = burden + set_burden
return burden
|
cwiki/monthly_budget
|
monthly_budget/models/tax_bracket.py
|
Python
|
mit
| 3,060
|
# coding=utf-8
"""
Summation of primes
Problem 10
The sum of the primes below 10 is 2 + 3 + 5 + 7 = 17.
Find the sum of all the primes below two million.
"""
from primes import *
print reduce(lambda x, y: x + y, primesList(2000000))
|
Ladeia/ProjectEuler
|
Problem010/Python/solution_1.py
|
Python
|
mit
| 236
|
# Copyright (C) 2010-2011 Richard Lincoln
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from CIM15.IEC61970.Meas.Control import Control
class Command(Control):
"""A Command is a discrete control used for supervisory control.A Command is a discrete control used for supervisory control.
"""
def __init__(self, value=0, normalValue=0, Discrete=None, ValueAliasSet=None, *args, **kw_args):
"""Initialises a new 'Command' instance.
@param value: The value representing the actuator output
@param normalValue: Normal value for Control.value e.g. used for percentage scaling
@param Discrete: The Measurement variable used for control.
@param ValueAliasSet: The Commands using the set for translation.
"""
#: The value representing the actuator output
self.value = value
#: Normal value for Control.value e.g. used for percentage scaling
self.normalValue = normalValue
self._Discrete = None
self.Discrete = Discrete
self._ValueAliasSet = None
self.ValueAliasSet = ValueAliasSet
super(Command, self).__init__(*args, **kw_args)
_attrs = ["value", "normalValue"]
_attr_types = {"value": int, "normalValue": int}
_defaults = {"value": 0, "normalValue": 0}
_enums = {}
_refs = ["Discrete", "ValueAliasSet"]
_many_refs = []
def getDiscrete(self):
"""The Measurement variable used for control.
"""
return self._Discrete
def setDiscrete(self, value):
if self._Discrete is not None:
self._Discrete._Command = None
self._Discrete = value
if self._Discrete is not None:
self._Discrete.Command = None
self._Discrete._Command = self
Discrete = property(getDiscrete, setDiscrete)
def getValueAliasSet(self):
"""The Commands using the set for translation.
"""
return self._ValueAliasSet
def setValueAliasSet(self, value):
if self._ValueAliasSet is not None:
filtered = [x for x in self.ValueAliasSet.Commands if x != self]
self._ValueAliasSet._Commands = filtered
self._ValueAliasSet = value
if self._ValueAliasSet is not None:
if self not in self._ValueAliasSet._Commands:
self._ValueAliasSet._Commands.append(self)
ValueAliasSet = property(getValueAliasSet, setValueAliasSet)
|
rwl/PyCIM
|
CIM15/IEC61970/Meas/Command.py
|
Python
|
mit
| 3,451
|
#!/usr/bin/env python
'''
crawler.py
Adam Canady
Jeff Ondich, CS257
October 4, 2013
This project is a single-threaded web crawler that returns a number of basic useful functionalities
directed towards webmasters or website content managers.
Usage: python crawler.py [--linklimit int] [--searchprefix string] [--action brokenlinks | outgoing links | summary] url
Note: an option given as a searchprefix will be tested as being inside a URL string, not just a prefix. This
functionality allows the crawler to work on multiple subdomains if just a root domain is given (e.g. carleton.edu).
Additionally, all URLs will be processed without a trailing "/" to allow for greater flexibility in intra-site
navigation conventions.
This crawler pauses for 1/10th of a second between requests to avoid imposing denial of service attacks on webservers.
Some code in this project was derived from Jeff Ondich's web-precrawler.py. Additionally, the
breadth-first-search algorithm was derived from the following Stack Overflow article:
http://stackoverflow.com/questions/16755546/python-breadth-first-search-capable-of-returning-largest-distance
'''
import argparse
import re
import urllib2
import urlparse
import Queue
import sets
import time
class Crawl():
def __init__(self, arguments):
self.arguments = arguments
self.process_arguments(self.arguments.linklimit, self.arguments.startingURL, self.arguments.searchprefix)
self.main()
def process_arguments(self, link_limit, startingURL, search_prefix):
## Take care of arguments ##
if not link_limit: self.link_limit = 1000
elif "infinity" in link_limit: self.link_limit = float('inf')
else: self.link_limit = int(link_limit[0])
self.startingURL = urllib2.urlopen(startingURL).geturl() # Reconcile initial redirects
self.search_prefix = self.startingURL if not search_prefix else search_prefix
self.urls_to_crawl = Queue.Queue()
self.urls_to_crawl.put(self.startingURL)
self.queued_for_crawling = set()
self.queued_for_crawling.add(self.startingURL)
self.links_reverse = {} # links_reverse[link_to] = [link_from]
self.crawled_nodes = set()
self.crawled_path = {} # crawled_nodes[url] = (path_to_url)
self.broken_links = set() # "url_from, url_to"
def print_broken_links(self):
print "Broken Links:"
for link in self.broken_links:
for backlink in self.links_reverse[link]:
print backlink + ", " + link
def print_outgoing_links(self):
print "Outgoing Links:"
for link in self.links_reverse:
if self.search_prefix not in link:
print link
def print_summary(self):
# Process Graph (derived from Stack Overflow post linked above)
q = Queue.Queue()
q.put((self.startingURL,))
visited = set()
visited.add(self.startingURL)
while not q.empty():
path = q.get()
last_node = path[-1]
for node in self.links_reverse.get(last_node,[]):
if node not in visited:
new_path = path + (node,)
q.put(new_path)
visited.add(node)
# Find longest path
longest_path_len = 0
longest_path = ()
for key in self.crawled_path:
path = self.crawled_path[key]
if len(path) > longest_path_len:
longest_path = path
longest_path_len = len(path)
# FilesFound
print "FilesFound:", len(self.crawled_nodes)
# LongestPathDepth
print "LongestPathDepth:", longest_path_len
# LongestPath
print "LongestPath:"
for node in longest_path:
print node
# CantGetHome
print "CantGetHome:"
for link in self.crawled_nodes - visited - self.broken_links:
print link
def do_crawl(self):
while not self.urls_to_crawl.empty() and len(self.crawled_nodes) < self.link_limit:
cur_url = self.urls_to_crawl.get()
cur_page = Page(cur_url)
self.crawled_nodes.add(cur_url)
if cur_page.response_code == 404:
self.broken_links.add(cur_url)
for link in cur_page.links:
full_url = urlparse.urljoin(cur_url, link) # Deal with relational links
if self.search_prefix in full_url and full_url not in self.queued_for_crawling:
self.urls_to_crawl.put(full_url)
self.queued_for_crawling.add(full_url)
# Generate Path
if self.crawled_path.get(cur_url):
self.crawled_path[full_url] = self.crawled_path.get(cur_url) + (full_url,)
else:
self.crawled_path[full_url] = (self.startingURL, full_url)
# Links Reverse
if full_url in self.links_reverse: self.links_reverse[full_url].append(cur_url)
else: self.links_reverse[full_url] = [cur_url]
time.sleep(0.1)
def process_crawl(self):
# Process the crawl
if self.arguments.action and "brokenlinks" in self.arguments.action:
self.print_broken_links()
if self.arguments.action and "outgoinglinks" in self.arguments.action:
self.print_outgoing_links()
if self.arguments.action and "summary" in self.arguments.action:
self.print_summary()
def main(self):
self.do_crawl()
self.process_crawl()
class Page():
def __init__(self, url):
self.url = url
self.get_page()
self.get_links()
# Derived from Jeff's Code
def get_links(self):
href_pattern = re.compile(r'<a .*?href="(.*?)"')
links = []
for href_value in re.findall(href_pattern, self.content):
links.append(href_value)
self.links = links
# Derived from Jeff's Code
def get_page(self):
# Get the text of the requested page.
try:
response = urllib2.urlopen(self.url, timeout=5)
self.content = response.read()
self.response_code = response.code
response.close()
except Exception, e:
self.content = ''
self.response_code = 404
if __name__ == '__main__':
arg_parser = argparse.ArgumentParser(description='Produce a report on the web page specified on the command line.')
arg_parser.add_argument('startingURL', help='URL to start the crawler at.')
arg_parser.add_argument('--linklimit', action='append')
arg_parser.add_argument('--searchprefix', action='append')
arg_parser.add_argument('--action', action='append')
arguments = arg_parser.parse_args()
Crawl(arguments)
|
AdamCanady/CS257-Crawler
|
crawler.py
|
Python
|
mit
| 6,934
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.1 on 2016-10-26 02:48
from __future__ import unicode_literals
import datetime
from django.db import migrations, models
import django.db.models.deletion
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('home', '0014_auto_20161026_0217'),
]
operations = [
migrations.AddField(
model_name='vertice',
name='concept',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to='home.Concept'),
preserve_default=False,
),
migrations.AlterField(
model_name='attempt',
name='date',
field=models.DateTimeField(default=datetime.datetime(2016, 10, 26, 2, 46, 57, 201033, tzinfo=utc)),
),
]
|
maxwallasaurus/arboretum
|
home/migrations/0015_auto_20161026_0248.py
|
Python
|
mit
| 845
|
# Copyright (c) 2017 Uber Technologies, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from json import dumps
from requests import Request
try:
from urllib.parse import quote
from urllib.parse import urlencode
from urllib.parse import urljoin
except ImportError:
from urllib import quote
from urllib import urlencode
from urlparse import urljoin
from uber_rides.utils.handlers import error_handler
from uber_rides.utils import http
def generate_data(method, args):
"""Assign arguments to body or URL of an HTTP request.
Parameters
method (str)
HTTP Method. (e.g. 'POST')
args (dict)
Dictionary of data to attach to each Request.
e.g. {'latitude': 37.561, 'longitude': -122.742}
Returns
(str or dict)
Either params containing the dictionary of arguments
or data containing arugments in JSON-formatted string.
"""
data = {}
params = {}
if method in http.BODY_METHODS:
data = dumps(args)
else:
params = args
return data, params
def generate_prepared_request(method, url, headers, data, params, handlers):
"""Add handlers and prepare a Request.
Parameters
method (str)
HTTP Method. (e.g. 'POST')
headers (dict)
Headers to send.
data (JSON-formatted str)
Body to attach to the request.
params (dict)
Dictionary of URL parameters to append to the URL.
handlers (list)
List of callback hooks, for error handling.
Returns
(requests.PreparedRequest)
The fully mutable PreparedRequest object,
containing the exact bytes to send to the server.
"""
request = Request(
method=method,
url=url,
headers=headers,
data=data,
params=params,
)
handlers.append(error_handler)
for handler in handlers:
request.register_hook('response', handler)
return request.prepare()
def build_url(host, path, params=None):
"""Build a URL.
This method encodes the parameters and adds them
to the end of the base URL, then adds scheme and hostname.
Parameters
host (str)
Base URL of the Uber Server that handles API calls.
path (str)
Target path to add to the host (e.g. 'v1.2/products').
params (dict)
Optional dictionary of parameters to add to the URL.
Returns
(str)
The fully formed URL.
"""
path = quote(path)
params = params or {}
if params:
path = '/{}?{}'.format(path, urlencode(params))
else:
path = '/{}'.format(path)
if not host.startswith(http.URL_SCHEME):
host = '{}{}'.format(http.URL_SCHEME, host)
return urljoin(host, path)
|
uber/rides-python-sdk
|
uber_rides/utils/request.py
|
Python
|
mit
| 4,020
|
from unittest.mock import patch, PropertyMock
from bs4 import BeautifulSoup
from django.utils import translation
from django.core.urlresolvers import reverse
from core.tests.helpers import create_response
@patch('directory_cms_client.client.cms_api_client.lookup_by_slug')
@patch('core.views.LandingPageCMSView.page', new_callable=PropertyMock)
def test_landing_page_context(
mock_get_landing_page, mock_get_component, settings, client, breadcrumbs
):
page = {
'title': 'the page',
'industries': [{'title': 'good 1'}],
'meta': {'languages': ['en-gb']},
'breadcrumbs': breadcrumbs,
}
mock_get_landing_page.return_value = page
mock_get_component.return_value = create_response(
status_code=200,
json_payload={
'banner_label': 'EU Exit updates',
'banner_content': '<p>Lorem ipsum.</p>',
'meta': {'languages': [('ar', 'العربيّة')]},
}
)
response = client.get(reverse('index'))
assert response.status_code == 200
assert response.context_data['page'] == page
@patch('directory_cms_client.client.cms_api_client.lookup_by_slug')
def test_landing_page_not_found(
mock_get_landing_page, settings, client
):
mock_get_landing_page.return_value = create_response(
status_code=404
)
response = client.get(reverse('index'))
assert response.status_code == 404
@patch('core.views.LandingPageCMSView.cms_component',
new_callable=PropertyMock)
@patch('core.views.LandingPageCMSView.page', new_callable=PropertyMock)
def test_landing_page_cms_component(
mock_get_page, mock_get_component, client, settings
):
settings.FEATURE_FLAGS['EU_EXIT_BANNER_ON'] = True
mock_get_page.return_value = {
'title': 'the page',
'sectors': [],
'guides': [],
'meta': {'languages': [('en-gb', 'English')]},
}
mock_get_component.return_value = {
'banner_label': 'EU Exit updates',
'banner_content': '<p>Lorem ipsum.</p>',
'meta': {'languages': [('en-gb', 'English')]},
}
url = reverse('index')
response = client.get(url)
soup = BeautifulSoup(response.content, 'html.parser')
assert soup.select('.banner-container')[0].get('dir') == 'ltr'
assert response.template_name == ['core/landing-page.html']
assert 'EU Exit updates' in str(response.content)
assert '<p class="body-text">Lorem ipsum.</p>' in str(response.content)
@patch('core.views.LandingPageCMSView.cms_component',
new_callable=PropertyMock)
@patch('core.views.LandingPageCMSView.page', new_callable=PropertyMock)
def test_landing_page_cms_component_bidi(
mock_get_page, mock_get_component, client, settings
):
settings.FEATURE_FLAGS['EU_EXIT_BANNER_ON'] = True
mock_get_page.return_value = {
'title': 'the page',
'sectors': [],
'guides': [],
'meta': {'languages': [('ar', 'العربيّة')]},
}
mock_get_component.return_value = {
'banner_label': 'EU Exit updates',
'banner_content': '<p>Lorem ipsum.</p>',
'meta': {'languages': [('ar', 'العربيّة')]},
}
translation.activate('ar')
response = client.get('/trade/?lang=ar')
soup = BeautifulSoup(response.content, 'html.parser')
assert soup.select('.banner-container')[0].get('dir') == 'rtl'
|
uktrade/directory-ui-supplier
|
core/tests/test_views.py
|
Python
|
mit
| 3,369
|
"""
WSGI config for gestionale project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
from whitenoise.django import DjangoWhiteNoise
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "gestionale.settings")
application = get_wsgi_application()
application = DjangoWhiteNoise(application)
|
DavideTonin99/gestionaleFabLab
|
gestionale/wsgi.py
|
Python
|
mit
| 489
|
import os
from keras import backend as K
import tensorflow as tf
from tensorflow.python.tools.freeze_graph import freeze_graph
def exportModelToTF(tfModelOutputDir):
if not os.path.exists(tfModelOutputDir):
os.makedirs(tfModelOutputDir)
# Save checkpoint
saver = tf.train.Saver()
save_path = saver.save(K.get_session(), tfModelOutputDir + "/model")
# Save metagraph
tf.train.write_graph(K.get_session().graph.as_graph_def(), "", tfModelOutputDir + "/metagraph.pb", False)
# Freeze graph
freeze_graph(input_graph=tfModelOutputDir + "/metagraph.pb", input_saver="", input_binary=True,
input_checkpoint=tfModelOutputDir + "/model", output_node_names='softmax/Softmax',
restore_op_name="save/restore_all", filename_tensor_name="save/Const:0",
output_graph=tfModelOutputDir + "/graph.pb", clear_devices=True, initializer_nodes="")
|
SlipknotTN/Dogs-Vs-Cats-Playground
|
deep_learning/keras/lib/tfutils/export.py
|
Python
|
mit
| 924
|
import unittest
from unittest import mock
# noinspection PyUnresolvedReferences
from accrocchio.badgeofshame import accrocchio, detonator, epoxy, compromise, blinder, flypaper
from accrocchio.badgeofshame import this_is_a, this_is_an
from accrocchio.observers import AccrocchioObserver
class TestBadgeOfShame(unittest.TestCase):
def setUp(self):
accrocchio.reset()
assert accrocchio.how_many() == 0
def test(self):
# noinspection PyUnusedLocal
@accrocchio
def accrocchio_fun(a, b):
pass
# noinspection PyUnusedLocal
@detonator
def detonator_fun(a, b):
pass
# noinspection PyUnusedLocal
@epoxy
def epoxy_fun(a, b):
pass
self.assertEqual(0, accrocchio.how_many())
[accrocchio_fun(1, 2) for _ in range(3)]
self.assertEqual(3, accrocchio.how_many())
accrocchio.reset()
self.assertEqual(0, accrocchio.how_many())
[accrocchio_fun(1, 2) for _ in range(3)]
accrocchio.reset()
self.assertEqual(0, accrocchio.how_many())
self.assertEqual(0, detonator.how_many())
self.assertEqual(0, epoxy.how_many())
[detonator_fun(1, 2) for _ in range(3)]
[epoxy_fun(1, 2) for _ in range(4)]
self.assertEqual(7, accrocchio.how_many())
self.assertEqual(3, detonator.how_many())
self.assertEqual(4, epoxy.how_many())
accrocchio.reset()
self.assertEqual(0, accrocchio.how_many())
self.assertEqual(0, detonator.how_many()) # We expect it to have detonators being reset as well
self.assertEqual(0, epoxy.how_many())
[detonator_fun(1, 2) for _ in range(3)]
[epoxy_fun(1, 2) for _ in range(4)]
epoxy.reset()
self.assertEqual(7, accrocchio.how_many())
self.assertEqual(3, detonator.how_many())
self.assertEqual(0, epoxy.how_many())
def test_observers(self):
# noinspection PyUnusedLocal
@accrocchio
def accrocchio_fun(a, b):
pass
# noinspection PyUnusedLocal
@detonator
def detonator_fun(a, b):
pass
# noinspection PyUnusedLocal
@flypaper
def flypaper_fun(a, b):
pass
accrocchio_observer = mock.create_autospec(AccrocchioObserver)
accrocchio.add_observer(accrocchio_observer)
detonator_observer = mock.create_autospec(AccrocchioObserver)
detonator.add_observer(detonator_observer)
accrocchio_fun(1, 2)
self.assertEqual(0, accrocchio_observer.reset.call_count)
self.assertEqual(0, detonator_observer.on_accrocchio.call_count)
self.assertEqual(1, accrocchio_observer.on_accrocchio.call_count)
detonator_fun(1, 2)
self.assertEqual(1, detonator_observer.on_accrocchio.call_count)
self.assertEqual(0, accrocchio_observer.reset.call_count)
self.assertEqual(2, accrocchio_observer.on_accrocchio.call_count)
accrocchio_fun(1, 2)
self.assertEqual(0, accrocchio_observer.reset.call_count)
self.assertEqual(3, accrocchio_observer.on_accrocchio.call_count)
accrocchio.reset()
self.assertEqual(1, accrocchio_observer.reset.call_count)
self.assertEqual(1, detonator_observer.reset.call_count)
detonator.reset()
self.assertEqual(1, accrocchio_observer.reset.call_count)
self.assertEqual(2, detonator_observer.reset.call_count)
# noinspection PyUnusedLocal
def test_metaclass(self):
class AccrocchioClass(metaclass=accrocchio):
pass
class CompromiseClass(metaclass=compromise):
pass
class BlinderClass(metaclass=blinder):
pass
self.assertEqual(3, accrocchio.how_many())
self.assertEqual(1, compromise.how_many())
self.assertEqual(1, blinder.how_many())
self.assertEqual(0, epoxy.how_many())
AccrocchioClass()
self.assertEqual(4, accrocchio.how_many())
self.assertEqual(1, compromise.how_many())
self.assertEqual(1, blinder.how_many())
self.assertEqual(0, epoxy.how_many())
CompromiseClass()
self.assertEqual(5, accrocchio.how_many())
self.assertEqual(2, compromise.how_many())
self.assertEqual(1, blinder.how_many())
self.assertEqual(0, epoxy.how_many())
# noinspection PyUnusedLocal
def test_class_decorator(self):
@accrocchio
class AccrocchioClass:
pass
@compromise
class CompromiseClass:
def a_method(self):
pass
@blinder
class BlinderClass:
pass
self.assertEqual(3, accrocchio.how_many())
self.assertEqual(1, compromise.how_many())
self.assertEqual(1, blinder.how_many())
self.assertEqual(0, epoxy.how_many())
AccrocchioClass()
self.assertEqual(4, accrocchio.how_many())
self.assertEqual(1, compromise.how_many())
self.assertEqual(1, blinder.how_many())
self.assertEqual(0, epoxy.how_many())
c = CompromiseClass()
self.assertEqual(5, accrocchio.how_many())
self.assertEqual(2, compromise.how_many())
self.assertEqual(1, blinder.how_many())
self.assertEqual(0, epoxy.how_many())
c.a_method()
self.assertEqual(5, accrocchio.how_many())
self.assertEqual(2, compromise.how_many())
self.assertEqual(1, blinder.how_many())
self.assertEqual(0, epoxy.how_many())
CompromiseClass()
self.assertEqual(6, accrocchio.how_many())
self.assertEqual(3, compromise.how_many())
self.assertEqual(1, blinder.how_many())
self.assertEqual(0, epoxy.how_many())
def test_one_shot_accrocchi(self):
self.assertEqual(0, accrocchio.how_many())
[this_is_an(accrocchio) for _ in range(3)]
self.assertEqual(3, accrocchio.how_many())
accrocchio.reset()
self.assertEqual(0, accrocchio.how_many())
[this_is_an(accrocchio) for _ in range(3)]
accrocchio.reset()
self.assertEqual(0, accrocchio.how_many())
self.assertEqual(0, detonator.how_many())
self.assertEqual(0, epoxy.how_many())
[this_is_a(detonator) for _ in range(3)]
[this_is_an(epoxy) for _ in range(4)]
self.assertEqual(7, accrocchio.how_many())
self.assertEqual(3, detonator.how_many())
self.assertEqual(4, epoxy.how_many())
accrocchio.reset()
self.assertEqual(0, accrocchio.how_many())
self.assertEqual(0, detonator.how_many()) # We expect it to have detonators being reset as well
self.assertEqual(0, epoxy.how_many())
[this_is_a(detonator) for _ in range(3)]
[this_is_an(epoxy) for _ in range(4)]
epoxy.reset()
self.assertEqual(7, accrocchio.how_many())
self.assertEqual(3, detonator.how_many())
self.assertEqual(0, epoxy.how_many())
def test_context(self):
with accrocchio:
pass
with detonator:
with detonator:
pass
self.assertEqual(3, accrocchio.how_many())
self.assertEqual(2, detonator.how_many())
def test_typing(self):
def f(a: detonator[int]):
pass
self.assertEqual(1, accrocchio.how_many())
self.assertEqual(1, detonator.how_many())
f(1)
self.assertEqual(1, accrocchio.how_many())
self.assertEqual(1, detonator.how_many())
|
fcracker79/accrocchio
|
test/accrocchio/test_badgeofshame.py
|
Python
|
mit
| 7,581
|
from distutils.version import StrictVersion
import json
import django
from django.conf import settings
from django.db import models
from django.utils.translation import ugettext_lazy as _
class UserSetting(models.Model):
TYPE_STRING = "string"
TYPE_NUMBER = "number"
TYPE_BOOL = "bool"
TYPE_JSON = "json"
TYPE_CHOICES = (
(TYPE_STRING, _("string")),
(TYPE_NUMBER, _("number")),
(TYPE_BOOL, _("bool")),
(TYPE_JSON, _("json")),
)
user = models.ForeignKey(settings.AUTH_USER_MODEL)
field_name = models.CharField(max_length=32)
label = models.CharField(max_length=128, blank=True, default='')
field_type = models.CharField(
max_length=16,
choices=TYPE_CHOICES,
default=TYPE_STRING,
)
value = models.CharField(max_length=getattr(settings,
'USERSETTING_VALUE_MAXLEN',
4096))
def __str__(self):
return "'%s': '%s' for user %s" % (
self.field_name,
self.value,
self.user,
)
def __unicode__(self):
return u"'%s': '%s' for user %s" % (
self.field_name,
self.value,
self.user,
)
class SettingGateWay(object):
def __init__(self, user):
self._user = user
def __getattr__(self, k):
if k.startswith('_'):
value = object.__getattribute__(self, k, None)
else:
try:
asObject = UserSetting.objects.get(
user=self._user,
field_name=k,
)
except UserSetting.DoesNotExist:
value = None
else:
try:
value = json.loads(asObject.value)
except:
pass
return value
def __setattr__(self, k, v):
if not k.startswith("_"):
try:
asObject, created = UserSetting.objects.get_or_create(
user=self._user,
field_name=k,
)
if isinstance(v, (list, tuple, dict)):
asObject.field_type = UserSetting.TYPE_JSON
elif isinstance(v, int):
asObject.field_type = UserSetting.TYPE_NUMBER
elif isinstance(v, bool):
asObject.field_type = UserSetting.TYPE_BOOL
else:
asObject.field_type = UserSetting.TYPE_STRING
asObject.value = json.dumps(v)
if created:
asObject.label = k
asObject.save()
except:
raise
else:
object.__setattr__(self, k, v)
def __str__(self):
return "%s" % list(UserSetting.objects.filter(user=self._user))
def __unicode__(self):
return u"%s" % list(UserSetting.objects.filter(user=self._user))
class UserSettingDescriptor(object):
def __get__(self, instance, owner):
return SettingGateWay(instance)
if StrictVersion(django.get_version()) < StrictVersion('1.7.0'):
from django.contrib.auth import get_user_model
if hasattr(settings, 'DDU_SETTING_ATTRIBUTE_NAME'):
setting_attribute_name = settings.DDU_SETTING_ATTRIBUTE_NAME
else:
setting_attribute_name = "settings"
setattr(
get_user_model(),
setting_attribute_name,
UserSettingDescriptor(),
)
|
miraculixx/django-dynamic-usersettings
|
django_dynamic_usersettings/models.py
|
Python
|
mit
| 3,541
|
import requests
class SubScraper:
def __init__(self, subreddit, num):
self.url = "https://www.reddit.com/r/{}.json".format(subreddit)
self.no_of_stories = num
self.main_stories = []
self.links = []
self.data = []
self.fetch_data()
def fetch_data(self):
del self.main_stories[:]
del self.links[:]
del self.data[:]
sauce = requests.get(self.url,
headers={'user-agent': 'Chrome'})
raw_data = sauce.json()["data"]["children"]
for data in raw_data[:self.no_of_stories]:
self.main_stories.append(data["data"]["title"])
self.links.append(data["data"]["url"])
self.data.extend(zip(self.main_stories, self.links))
|
rhys-fernandes/JAMM
|
subreddit_scraper.py
|
Python
|
mit
| 779
|
# -*- coding:utf-8 -*-
import tornado.escape
from torcms.core import tools
from torcms.model.abc_model import MHelper
from torcms.model.core_tab import TabWikiHist
class MWikiHist():
@staticmethod
def get_last(postid):
'''
Get the last wiki in history.
'''
recs = TabWikiHist.select().where(
TabWikiHist.wiki_id == postid).order_by(
TabWikiHist.time_update.desc())
return None if recs.count() == 0 else recs.get()
@staticmethod
def delete(uid):
'''
Delete by uid
'''
return MHelper.delete(TabWikiHist, uid)
@staticmethod
def get_by_uid(uid):
return MHelper.get_by_uid(TabWikiHist, uid)
@staticmethod
def update_cnt(uid, post_data):
entry = TabWikiHist.update(
user_name=post_data['user_name'],
cnt_md=tornado.escape.xhtml_escape(post_data['cnt_md']),
time_update=tools.timestamp(),
).where(TabWikiHist.uid == uid)
entry.execute()
@staticmethod
def query_by_wikiid(postid, limit=5):
recs = TabWikiHist.select().where(
TabWikiHist.wiki_id == postid).order_by(
TabWikiHist.time_update.desc()).limit(limit)
return recs
@staticmethod
def create_wiki_history(raw_data, user_info):
entry = TabWikiHist.create(
uid=tools.get_uuid(),
title=raw_data.title,
wiki_id=raw_data.uid,
user_name=user_info.user_name,
cnt_md=raw_data.cnt_md,
time_update=tools.timestamp()
)
return entry.uid
|
bukun/TorCMS
|
torcms/model/wiki_hist_model.py
|
Python
|
mit
| 1,639
|
import collections
from supriya.enums import CalculationRate
from supriya.ugens.PureUGen import PureUGen
class SinOscFB(PureUGen):
"""
::
>>> sin_osc_fb = supriya.ugens.SinOscFB.ar(
... feedback=0,
... frequency=440,
... )
>>> sin_osc_fb
SinOscFB.ar()
"""
### CLASS VARIABLES ###
_ordered_input_names = collections.OrderedDict(
'frequency',
'feedback',
)
_valid_calculation_rates = None
### INITIALIZER ###
def __init__(
self,
calculation_rate=None,
feedback=0,
frequency=440,
):
PureUGen.__init__(
self,
calculation_rate=calculation_rate,
feedback=feedback,
frequency=frequency,
)
### PUBLIC METHODS ###
@classmethod
def ar(
cls,
feedback=0,
frequency=440,
):
"""
Constructs an audio-rate SinOscFB.
::
>>> sin_osc_fb = supriya.ugens.SinOscFB.ar(
... feedback=0,
... frequency=440,
... )
>>> sin_osc_fb
SinOscFB.ar()
Returns ugen graph.
"""
import supriya.synthdefs
calculation_rate = supriya.CalculationRate.AUDIO
ugen = cls._new_expanded(
calculation_rate=calculation_rate,
feedback=feedback,
frequency=frequency,
)
return ugen
@classmethod
def kr(
cls,
feedback=0,
frequency=440,
):
"""
Constructs a control-rate SinOscFB.
::
>>> sin_osc_fb = supriya.ugens.SinOscFB.kr(
... feedback=0,
... frequency=440,
... )
>>> sin_osc_fb
SinOscFB.kr()
Returns ugen graph.
"""
import supriya.synthdefs
calculation_rate = supriya.CalculationRate.CONTROL
ugen = cls._new_expanded(
calculation_rate=calculation_rate,
feedback=feedback,
frequency=frequency,
)
return ugen
### PUBLIC PROPERTIES ###
@property
def feedback(self):
"""
Gets `feedback` input of SinOscFB.
::
>>> sin_osc_fb = supriya.ugens.SinOscFB.ar(
... feedback=0,
... frequency=440,
... )
>>> sin_osc_fb.feedback
0.0
Returns ugen input.
"""
index = self._ordered_input_names.index('feedback')
return self._inputs[index]
@property
def frequency(self):
"""
Gets `frequency` input of SinOscFB.
::
>>> sin_osc_fb = supriya.ugens.SinOscFB.ar(
... feedback=0,
... frequency=440,
... )
>>> sin_osc_fb.frequency
440.0
Returns ugen input.
"""
index = self._ordered_input_names.index('frequency')
return self._inputs[index]
|
Pulgama/supriya
|
etc/pending_ugens/SinOscFB.py
|
Python
|
mit
| 3,110
|
#!/usr/bin/env python3
# -*- coding:UTF-8 -*-
# Copyright (c) 2019 Nicolas Iooss
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""Check the order of the entries of the glossary files
This program requires Python>=3.6 for F-strings feature.
"""
from pathlib import Path
import re
import sys
def check_section_sort_order(file_path, sect_lines):
"""Check the sort order of the lines of a section"""
# Find out lines of sub_sections and analyze them
result = True
filtered_lines = []
subsection_lines = []
for raw_line in sect_lines:
line = raw_line.rstrip()
if line != raw_line:
print(f"{file_path}: spaces at the end of {repr(raw_line)}")
result = False
if line == '':
if subsection_lines:
# Add empty lines to subsection
subsection_lines.append(line)
# Anyway, add them to the lines in the section too
filtered_lines.append(line)
elif line.startswith(' '):
# Add indented lines to the subsection, without the indent
subsection_lines.append(line[2:])
else:
# non-indented lines means subsections end there
if subsection_lines:
if not check_section_sort_order(file_path, subsection_lines):
result = False
subsection_lines = []
filtered_lines.append(line)
# Ends recursive structures
if subsection_lines:
if not check_section_sort_order(file_path, subsection_lines):
result = False
del subsection_lines
if not filtered_lines:
return result
# If there is a dash, every line needs to start with a dash, and this is it
if any(line.startswith('- ') for line in filtered_lines):
if not all(not line or line.startswith('- ') for line in filtered_lines):
print(f"{file_path}: a section with dash needs to have all with dash: {repr(filtered_lines)}")
result = False
return result # Return directly, here
# Check the sort order of lines starting with a star
last_sortword = None
last_sortword_orig = None
for line in filtered_lines:
if not line:
continue
if not line.startswith('*'):
# Reset the sort order when a text appears
if not re.match(r'^[0-9a-zA-Z]', line):
print(f"{file_path}: unexpected non-list line: {repr(line)}")
result = False
last_sortword = None
last_sortword_orig = None
continue
if len(line) < 3 or line[1] != ' ':
print(f"{file_path}: missing space between */- and words in {repr(line)}")
result = False
continue
# Ignore lists of URLs
if line.startswith('* https://'):
if last_sortword is not None:
print(f"{file_path}: URL while looking for words: {repr(line)}")
result = False
continue
# Find the equal sign
try:
eq_idx = line.index('=', 3)
except ValueError:
print(f"{file_path}: missing = in {repr(line)}")
result = False
continue
# Keep an "original" unmondified version of the word, in order to display it
new_word_orig = new_word = line[2:eq_idx].strip()
new_word = new_word.upper()
new_word = new_word.replace('/', '')
new_word = new_word.replace('-', '')
new_word = new_word.replace('²', '2')
if last_sortword is not None and last_sortword > new_word:
print(f"{file_path}: disorder {last_sortword} > {new_word} " +
f"({last_sortword_orig} needs to come after {new_word_orig})")
result = False
last_sortword = new_word
last_sortword_orig = new_word_orig
return result
def check_file_sort_order(file_path):
"""Check the sort order of a file"""
result = True
current_lines = []
title_line = None
with file_path.open('r', encoding='utf8') as stream:
for line in stream:
if not line.endswith('\n'):
print(f"{file_path}: no \\n at the end of {repr(line)}")
result = False
else:
line = line[:-1]
if line and not re.match(r'[-0-9a-zA-Z=)/:.~]', line[-1]):
print(f"{file_path}: unexpected last character in {repr(line)}")
result = False
# Detect section headers
if len(line) >= 3 and all(c == line[0] for c in line):
try:
title_line = current_lines.pop()
except IndexError:
print(f"{file_path}: unexpected title line {repr(line)}")
result = False
else:
if len(title_line) != len(line):
print(f"{file_path}: the length of the title bar does not match {repr(title_line)}")
result = False
if current_lines:
# Pop the previous empty line
if current_lines[-1] != '':
print(f"{file_path}: unexpected non-empty line before {repr(title_line)}")
result = False
else:
while current_lines and current_lines[-1] == '':
current_lines.pop()
# Analyze the section
if current_lines:
if current_lines[0] == '':
current_lines = current_lines[1:]
if not check_section_sort_order(file_path, current_lines):
result = False
current_lines = []
continue
# Otherwise, stash line into the current lines buffer
current_lines.append(line)
# The first line of a section is empty
if len(current_lines) == 1 and line and title_line is not None:
print(f"{file_path}: unexpected non-empty line first line in {repr(title_line)}")
result = False
return result
def check_sort_order_of_all():
"""Check the sort order of all glossaries"""
result = True
base_dir = Path(__file__).parent
for file_path in base_dir.glob('**/*.rst'):
if file_path.name != 'README.rst':
if not check_file_sort_order(file_path):
result = False
return result
if __name__ == '__main__':
sys.exit(0 if check_sort_order_of_all() else 1)
|
fishilico/shared
|
glossaries/check_sort_order.py
|
Python
|
mit
| 7,722
|
#!/usr/bin/env python
from pygecko.Messages import Twist, Image, IMU, Joystick, Vector, Array, Dictionary
from pygecko.Messages import Pose, Compass, Range, Quaternion
from pygecko.Messages import Buttons, Axes
from pygecko.Messages import serialize, deserialize
# from pygecko.lib.ZmqClass import Pub, Sub
import numpy as np
from nose.tools import raises
def test_twist():
t = Twist()
t.linear.set(.1, .2, .3)
t.angular.set(-1, -2., -3)
m = serialize(t)
m = deserialize(m)
assert m.linear == t.linear
assert m.angular == t.angular
assert isinstance(t, Twist)
assert isinstance(m, Twist)
assert isinstance(t.linear, Vector)
assert isinstance(t.angular, Vector)
assert isinstance(m.linear, Vector)
assert isinstance(m.angular, Vector)
assert t.stamp == m.stamp
assert t.Class == m.Class
def test_image():
im = Image()
im.img = np.random.randint(0, 255, size=(5, 5))
msg = serialize(im)
i = deserialize(msg)
im.decodeB64() # serialize destroys original image
assert isinstance(im, Image)
assert isinstance(i, Image)
assert isinstance(im.img, np.ndarray), type(im.img)
assert isinstance(i.img, np.ndarray), type(i.img)
assert im.depth == i.depth
assert i.img.all() == im.img.all()
assert i.Class == im.Class
assert i.stamp == im.stamp
def test_vector():
v = Vector()
v.x = -0.00001
v.y = 2.123456789
v.z = -0.0123456789
m = serialize(v)
m = deserialize(m)
assert v.x == m.x
assert v.y == m.y
assert v.z == m.z
assert type(v) == type(m)
assert v.Class == m.Class
def test_range():
v = Range()
v.range = [1, 2, 3]
v.range = [1, 2, 3, 4, 5, 6, 7, 8, 9, 0]
v.fov = 20.0
m = serialize(v)
m = deserialize(m)
assert v.range == m.range
assert type(v) == type(m)
assert v.Class == m.Class
assert len(v.range) == len(m.range)
assert v.stamp == m.stamp
assert v.fov == m.fov
def test_quaternion():
q = Quaternion()
q.x = 100
q.y = -100
q.z = 0.12345
q.w = -0.12345
m = serialize(q)
m = deserialize(m)
assert type(q) == type(m) == type(Quaternion())
assert q.Class == m.Class
assert q == m
def test_imu():
p = IMU()
p.linear_acceleration.set(1, 2, 3)
p.angular_velocity.set(1, 2, 3)
p.orientation.set(1, 2, 3, 4)
m = serialize(p)
m = deserialize(m)
assert type(p) == type(m) == type(IMU())
assert isinstance(m.linear_acceleration, Vector)
assert isinstance(m.angular_velocity, Vector)
assert isinstance(m.orientation, Quaternion)
assert p.linear_acceleration == m.linear_acceleration
assert p.angular_velocity == m.angular_velocity
assert p.orientation == m.orientation
assert p.stamp == m.stamp
assert p.Class == m.Class
def test_pose():
p = Pose()
p.position.set(1, 2, 3)
p.orientation.set(1, 2, 3, 4)
m = serialize(p)
m = deserialize(m)
assert type(p) == type(m) == type(Pose())
assert isinstance(m.position, Vector)
assert isinstance(m.orientation, Quaternion)
assert p.position == m.position
assert p.orientation == m.orientation
assert p.stamp == m.stamp
assert p.Class == m.Class
def test_compass():
p = Compass()
p.set(1, 2, 3)
m = serialize(p)
m = deserialize(m)
assert type(p) == type(m) == type(Compass())
assert p.roll == m.roll
assert p.pitch == m.pitch
assert p.heading == m.heading
assert p.stamp == m.stamp
assert p.Class == m.Class
def test_joytstick():
p = Joystick()
p.axes.set([1, 1], [2, 2], [3, 3], 4, 5)
p.buttons.set(True, False, True, False, True, False, True, False, True, False)
m = serialize(p)
m = deserialize(m)
assert type(p) == type(m) == type(Joystick())
assert isinstance(m.axes, Axes)
assert isinstance(m.buttons, Buttons)
assert p.axes.leftStick == m.axes.leftStick
assert p.axes.rightStick == m.axes.rightStick
assert p.axes.dPad == m.axes.dPad
assert p.axes.L2 == m.axes.L2
assert p.stamp == m.stamp
assert p.Class == m.Class
def test_array():
a = Array()
for i in range(4):
a.append(i)
m = serialize(a)
m = deserialize(m)
assert len(m.array) == 4
for i in range(4):
assert m[i] == a[i]
def test_dictionary():
d = Dictionary()
d.dict['bob'] = 5
d.dict['jim'] = -7
m = serialize(d)
m = deserialize(m)
assert 'bob' in m.dict
assert m['bob'] == d['bob']
for key in m.keys():
assert m[key] == d[key]
for k, v in m.items():
assert d[k] == v
@raises(Exception)
def test_msg():
v = Vector()
v.m = 5.0
|
walchko/pygecko
|
retired/old_version/original/tests/test_msg.py
|
Python
|
mit
| 4,300
|
"""
@author: Maneesh D
@email: maneeshd77@gmail.com
"""
from random import randint
from urllib import request
def download_img(url):
num = randint(1, 1000)
file_name = "ManUtd_" + str(num) + ".png"
request.urlretrieve(url, file_name)
def main():
url = "https://lh3.googleusercontent.com/-iDzlv7IG4rY/AAAAAAAAAAI/AAAAAAACsik/FnDXDKxLt5I/s0-c-k-no-ns/photo.jpg"
download_img(url)
if __name__ == '__main__':
main()
|
maneeshd/PyTutorial
|
Basics/Download_Image_Web.py
|
Python
|
mit
| 441
|
from unittest import TestCase
from nose.tools import ok_, eq_
from OrderedFormat.formatter import *
class TestOrderedFormat(TestCase):
def test_iter_depth_zero(self):
eq_(iter_depth([]), 0)
eq_(iter_depth(()), 0)
def test_iter_depth_one(self):
eq_(iter_depth([1]), 1)
eq_(iter_depth((1, )), 1)
def test_iter_depth_for_list(self):
list_array = [0, "1", ["2", [3, 4]], [5, "6"], [7, 8], [9, 10, [11, [12, 13]]]]
eq_(iter_depth(list_array), 4)
def test_iter_depth_for_dic(self):
dict_value = {"a": 1, "b": {"c": 2, "d": {"f": 3, "g": {"h": 4}}}}
eq_(iter_depth(dict_value), 4)
def test_kflatten_simple(self):
yml_data = """
human:
name: John
age: 22
"""
key_data = """
human:
- name
- name
"""
ordered_keys = load_ordered_keys(None, raw_txt=key_data, load_type="yml")
data = kflatten(yml_data, ordered_keys, load_type="yaml")
eq_(data[0], "John")
eq_(data[1], "John")
|
Himenon/OrderedFormat
|
tests/test_formatter.py
|
Python
|
mit
| 1,074
|
from .production import *
DEBUG = True
ALLOWED_HOSTS += ["coll.in", "0.0.0.0"]
REST_FRAMEWORK = {
"DEFAULT_AUTHENTICATION_CLASSES": (
"rest_framework.authentication.SessionAuthentication",
"rest_framework.authentication.BasicAuthentication",
"rest_framework_jwt.authentication.JSONWebTokenAuthentication",
),
"DEFAULT_RENDERER_CLASSES": (
"rest_framework.renderers.BrowsableAPIRenderer",
"rest_framework.renderers.JSONRenderer",
),
}
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.dummy.DummyCache',
}
}
|
andela-cmutembei/III
|
plana/settings/develop.py
|
Python
|
mit
| 597
|
"""empty message
Revision ID: 39769d358276
Revises: 383e1224b4e7
Create Date: 2015-12-03 17:23:48.689959
"""
# revision identifiers, used by Alembic.
revision = '39769d358276'
down_revision = '383e1224b4e7'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('client',
sa.Column('client_id', sa.String(length=40), nullable=False),
sa.Column('client_secret', sa.String(length=55), nullable=False),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.Column('_redirect_uris', sa.Text(), nullable=True),
sa.Column('_default_scopes', sa.Text(), nullable=True),
sa.ForeignKeyConstraint(['user_id'], ['user.id'], ),
sa.PrimaryKeyConstraint('client_id')
)
op.create_table('grant',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.Column('client_id', sa.String(length=40), nullable=False),
sa.Column('code', sa.String(length=255), nullable=False),
sa.Column('redirect_uri', sa.String(length=255), nullable=True),
sa.Column('expires', sa.DateTime(), nullable=True),
sa.Column('_scopes', sa.Text(), nullable=True),
sa.ForeignKeyConstraint(['client_id'], ['client.client_id'], ),
sa.ForeignKeyConstraint(['user_id'], ['user.id'], ondelete='CASCADE'),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_grant_code'), 'grant', ['code'], unique=False)
op.create_table('token',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('client_id', sa.String(length=40), nullable=False),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.Column('token_type', sa.String(length=40), nullable=True),
sa.Column('access_token', sa.String(length=255), nullable=True),
sa.Column('refresh_token', sa.String(length=255), nullable=True),
sa.Column('expires', sa.DateTime(), nullable=True),
sa.Column('_scopes', sa.Text(), nullable=True),
sa.ForeignKeyConstraint(['client_id'], ['client.client_id'], ),
sa.ForeignKeyConstraint(['user_id'], ['user.id'], ),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('access_token'),
sa.UniqueConstraint('refresh_token')
)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('token')
op.drop_index(op.f('ix_grant_code'), table_name='grant')
op.drop_table('grant')
op.drop_table('client')
### end Alembic commands ###
|
taras1k/flask_auth_service
|
server/migrations/versions/39769d358276_.py
|
Python
|
mit
| 2,536
|
import threading
import time
import CrawelerMod
import IndexerMod
class myThread (threading.Thread):
def __init__(self, threadID, name, counter,Lock, CorI):
threading.Thread.__init__(self)
self.threadID = threadID
self.name = name
self.counter = counter
self.Lock = Lock
self.choice = CorI
def run(self):
print( "Starting " + self.name)
if self.choice == 'C':
MyCraweler = CrawelerMod.Craweler()
MyCraweler.Crawel(self.Lock)
elif self.choice == 'I':
MyIndexer = IndexerMod.Indexer()
MyIndexer.StartIndexing(self.Lock)
elif self.choice == 'Q':
MyCraweler = CrawelerMod.Craweler()
MyCraweler.CrawelQOnly(self.Lock)
elif self.choice == 'W':
MyIndexer = IndexerMod.Indexer()
MyIndexer.StartIndexingSaver(self.Lock)
def print_time(threadName, delay, counter):
while counter:
time.sleep(delay)
print ("%s: %s" % (threadName, time.ctime(time.time())))
counter -= 1
|
MohammedAlsayedOmar/APT-SearchEngine
|
SearchEngN/SearchEngN/ThreadingMod.py
|
Python
|
mit
| 1,083
|
from sys import exit
def gold_room():
print "This room is full of gold. How much do you take?"
next = raw_input("> ")
if "0" in next or "1" in next:
how_much = int(next)
else:
dead("Man, learn to type a number.")
if how_much < 50:
print "Nice, you're not greedy, you win!"
exit(0)
else:
dead("Man, You greedy bastard!")
def bear_room():
print "There is a bear here"
print "The bear has a bunch of honey"
print "The fat bear is in front of another door"
print "How are you going to move the bear?"
bear_moved = False
while True:
next = raw_input("> ")
if next == "take honey":
dead("The bear looks at you then slaps your face off")
elif next == "taunt bear" and not bear_moved:
print "The bear has moved from the door, You can go through it now"
bear_moved = True
elif next == "taunt bear" and bear_moved:
dead("The bear gets pissed off and chews your leg off.")
elif next == "open door" and bear_moved:
gold_room()
else:
print "I got no idea what that means."
def cthulhu_room():
print """Here you see the great evil cthulhu.
He, it, whatever stares at you and you go insane.
Do you flee for your life or eat your head?"""
next = raw_input("> ")
if "flee" in next:
start()
elif "head" in next:
dead("Well that was tasty!")
else:
cthulhu_room()
def dead(why):
print why, "Good job!"
exit(0)
def start():
print """You are in a dark room.
There is a door to your right and left.
Which one do you take?"""
next = raw_input("> ")
if next == "left":
bear_room()
elif next == "right":
cthulhu_room()
else:
dead("You stumble around the room until you starve")
start()
|
Hyyyyyyyyyy/acm
|
PythonTest/t35.py
|
Python
|
mit
| 1,893
|
# Copyright (C) 2013 by Ben Morris (ben@bendmorris.com)
# Based on Bio.Nexus, copyright 2005-2008 by Frank Kauff & Cymon J. Cox
# and Bio.Phylo.Newick, copyright 2009 by Eric Talevich.
# All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""I/O function wrappers for the NeXML file format.
See: http://www.nexml.org
"""
from Bio._py3k import StringIO
from Bio.Phylo import NeXML
from xml.dom import minidom
import sys
from ._cdao_owl import cdao_elements, cdao_namespaces, resolve_uri
# For speed try to use cElementTree rather than ElementTree
try:
if (3, 0) <= sys.version_info[:2] <= (3, 1):
# Workaround for bug in python 3.0 and 3.1,
# see http://bugs.python.org/issue9257
from xml.etree import ElementTree as ElementTree
else:
from xml.etree import cElementTree as ElementTree
except ImportError:
from xml.etree import ElementTree as ElementTree
NAMESPACES = {
'xsi': 'http://www.w3.org/2001/XMLSchema-instance',
'xml': 'http://www.w3.org/XML/1998/namespace',
'nex': 'http://www.nexml.org/2009',
'xsd': 'http://www.w3.org/2001/XMLSchema#',
}
NAMESPACES.update(cdao_namespaces)
DEFAULT_NAMESPACE = NAMESPACES['nex']
VERSION = '0.9'
SCHEMA = 'http://www.nexml.org/2009/nexml/xsd/nexml.xsd'
try:
register_namespace = ElementTree.register_namespace
except AttributeError:
if not hasattr(ElementTree, '_namespace_map'):
# cElementTree needs the pure-Python xml.etree.ElementTree
from xml.etree import ElementTree as ET_py
ElementTree._namespace_map = ET_py._namespace_map
def register_namespace(prefix, uri):
ElementTree._namespace_map[uri] = prefix
for prefix, uri in NAMESPACES.items():
register_namespace(prefix, uri)
def qUri(s):
"""Given a prefixed URI, return the full URI."""
return resolve_uri(s, namespaces=NAMESPACES, xml_style=True)
def cdao_to_obo(s):
"""Optionally converts a CDAO-prefixed URI into an OBO-prefixed URI."""
return 'obo:%s' % cdao_elements[s[len('cdao:'):]]
def matches(s):
"""Check for matches in both CDAO and OBO namespaces."""
if s.startswith('cdao:'):
return (s, cdao_to_obo(s))
else:
return (s,)
class NeXMLError(Exception):
"""Exception raised when NeXML object construction cannot continue."""
pass
# ---------------------------------------------------------
# Public API
def parse(handle, **kwargs):
"""Iterate over the trees in a NeXML file handle.
:returns: generator of Bio.Phylo.NeXML.Tree objects.
"""
return Parser(handle).parse(**kwargs)
def write(trees, handle, plain=False, **kwargs):
"""Write a trees in NeXML format to the given file handle.
:returns: number of trees written.
"""
return Writer(trees).write(handle, plain=plain, **kwargs)
# ---------------------------------------------------------
# Input
class Parser(object):
"""Parse a NeXML tree given a file handle.
Based on the parser in `Bio.Nexus.Trees`.
"""
def __init__(self, handle):
self.handle = handle
@classmethod
def from_string(cls, treetext):
handle = StringIO(treetext)
return cls(handle)
def add_annotation(self, node_dict, meta_node):
if 'property' in meta_node.attrib:
prop = meta_node.attrib['property']
else:
prop = 'meta'
if prop in matches('cdao:has_Support_Value'):
node_dict['confidence'] = float(meta_node.text)
else:
node_dict[prop] = meta_node.text
def parse(self, values_are_confidence=False, rooted=False):
"""Parse the text stream this object was initialized with."""
nexml_doc = ElementTree.iterparse(self.handle, events=('end',))
for event, node in nexml_doc:
if node.tag == qUri('nex:tree'):
node_dict = {}
node_children = {}
root = None
child_tags = node.getchildren()
nodes = []
edges = []
for child in child_tags:
if child.tag == qUri('nex:node'):
nodes.append(child)
if child.tag == qUri('nex:edge'):
edges.append(child)
for node in nodes:
node_id = node.attrib['id']
this_node = node_dict[node_id] = {}
if 'otu' in node.attrib and node.attrib['otu']:
this_node['name'] = node.attrib['otu']
if 'root' in node.attrib and node.attrib['root'] == 'true':
root = node_id
for child in node.getchildren():
if child.tag == qUri('nex:meta'):
self.add_annotation(node_dict[node_id], child)
srcs = set()
tars = set()
for edge in edges:
src, tar = edge.attrib['source'], edge.attrib['target']
srcs.add(src)
tars.add(tar)
if src not in node_children:
node_children[src] = set()
node_children[src].add(tar)
if 'length' in edge.attrib:
node_dict[tar]['branch_length'] = float(edge.attrib['length'])
if 'property' in edge.attrib and edge.attrib['property'] in matches('cdao:has_Support_Value'):
node_dict[tar]['confidence'] = float(edge.attrib['content'])
for child in edge.getchildren():
if child.tag == qUri('nex:meta'):
self.add_annotation(node_dict[tar], child)
if root is None:
# if no root specified, start the recursive tree creation function
# with the first node that's not a child of any other nodes
rooted = False
possible_roots = (node.attrib['id'] for node in nodes
if node.attrib['id'] in srcs and
node.attrib['id'] not in tars)
root = next(possible_roots)
else:
rooted = True
yield NeXML.Tree(root=self._make_tree(root, node_dict, node_children), rooted=rooted)
@classmethod
def _make_tree(cls, node, node_dict, children):
"""Traverse the tree creating a nested clade structure.
Return a NeXML.Clade, and calls itself recursively for each child,
traversing the entire tree and creating a nested structure of NeXML.Clade
objects.
"""
this_node = node_dict[node]
clade = NeXML.Clade(**this_node)
if node in children:
clade.clades = [cls._make_tree(child, node_dict, children)
for child in children[node]]
return clade
# ---------------------------------------------------------
# Output
class Writer(object):
"""Based on the writer in Bio.Nexus.Trees (str, to_string)."""
def __init__(self, trees):
self.trees = trees
self.node_counter = 0
self.edge_counter = 0
self.tree_counter = 0
def new_label(self, obj_type):
counter = '%s_counter' % obj_type
setattr(self, counter, getattr(self, counter) + 1)
return '%s%s' % (obj_type, getattr(self, counter))
def write(self, handle, cdao_to_obo=True, **kwargs):
"""Write this instance's trees to a file handle."""
self.cdao_to_obo = cdao_to_obo
# set XML namespaces
root_node = ElementTree.Element('nex:nexml')
root_node.set('version', VERSION)
root_node.set('xmlns', DEFAULT_NAMESPACE)
root_node.set('xsi:schemaLocation', SCHEMA)
for prefix, uri in NAMESPACES.items():
root_node.set('xmlns:%s' % prefix, uri)
otus = ElementTree.SubElement(root_node, 'otus',
**{'id': 'tax', 'label': 'RootTaxaBlock'})
# create trees
trees = ElementTree.SubElement(root_node, 'trees',
**{'id': 'Trees', 'label': 'TreesBlockFromXML', 'otus': 'tax'})
count = 0
tus = set()
for tree in self.trees:
this_tree = ElementTree.SubElement(trees, 'tree',
**{'id': self.new_label('tree')})
first_clade = tree.clade
tus.update(self._write_tree(first_clade, this_tree, rooted=tree.rooted))
count += 1
# create OTUs
for tu in tus:
otu = ElementTree.SubElement(otus, 'otu', **{'id': tu})
# write XML document to file handle
# xml_doc = ElementTree.ElementTree(root_node)
# xml_doc.write(handle,
# xml_declaration=True, encoding='utf-8',
# method='xml')
# use xml.dom.minodom for pretty printing
rough_string = ElementTree.tostring(root_node, 'utf-8')
reparsed = minidom.parseString(rough_string)
try:
handle.write(reparsed.toprettyxml(indent=" "))
except TypeError:
# for compatibility with Python 3
handle.write(bytes(reparsed.toprettyxml(indent=" "), 'utf8'))
return count
def _write_tree(self, clade, tree, parent=None, rooted=False):
"""Recursively process tree, adding nodes and edges to Tree object.
Returns a set of all OTUs encountered.
"""
tus = set()
convert_uri = cdao_to_obo if self.cdao_to_obo else (lambda s: s)
node_id = self.new_label('node')
clade.node_id = node_id
attrib = {'id': node_id, 'label': node_id}
root = rooted and parent is None
if root:
attrib['root'] = 'true'
if clade.name:
tus.add(clade.name)
attrib['otu'] = clade.name
node = ElementTree.SubElement(tree, 'node', **attrib)
if parent is not None:
edge_id = self.new_label('edge')
attrib = {
'id': edge_id, 'source': parent.node_id, 'target': node_id,
'length': str(clade.branch_length),
'typeof': convert_uri('cdao:Edge'),
}
if hasattr(clade, 'confidence') and clade.confidence is not None:
attrib.update({
'property': convert_uri('cdao:has_Support_Value'),
'datatype': 'xsd:float',
'content': '%1.2f' % clade.confidence,
})
node = ElementTree.SubElement(tree, 'edge', **attrib)
if not clade.is_terminal():
for new_clade in clade.clades:
tus.update(self._write_tree(new_clade, tree, parent=clade))
del clade.node_id
return tus
|
zjuchenyuan/BioWeb
|
Lib/Bio/Phylo/NeXMLIO.py
|
Python
|
mit
| 11,094
|
max=10000
total=0
def palindrome_test(i):
for idx, char in enumerate(str(i)):
if not char==str(i)[-(idx+1)]:
return False
return True
def lychrel_test(i):
num_iter=0
while num_iter<50:
j=i+int(str(i)[::-1])
if palindrome_test(j):
return False
else:
i=j
num_iter+=1
return True
print lychrel_test(196)
print lychrel_test(47)
for i in range(1, max+1):
if lychrel_test(i):
print i
total+=1
print total
|
lewisamarshall/euler
|
055/055.py
|
Python
|
mit
| 464
|
from django.conf.urls import url, include
from rest_framework.routers import DefaultRouter
from rest_framework.schemas import get_schema_view
from . import views
# DRF router and registered ViewSets
router = DefaultRouter()
router.register(r'groceryItems', views.GroceryItemViewSet,
base_name='groceryItems')
router.register(r'groceryGroups', views.GroceryGroupViewSet,
base_name='groceryGroups')
router.register(r'recipes', views.RecipeViewSet, base_name='recipes')
router.register(r'sources', views.SourceViewSet, base_name='sources')
router.register(r'books', views.BookViewSet, base_name='books')
router.register(r'users', views.UserViewSet, base_name='users')
router.register(r'tags', views.TagViewSet, base_name='tags')
router.register(r'auth', views.AuthViewSet, base_name='auth')
urlpatterns = [
url(r'^schema/$', get_schema_view(title='Scrape-the-Plate API')),
url(r'^api-auth/', include(
'rest_framework.urls', namespace='rest_framework')),
url(r'^scrape/$', views.scrape_view),
url(r'^', include(router.urls)),
url(r'^', views.not_found),
]
|
jf248/scrape-the-plate
|
recipes/urls.py
|
Python
|
mit
| 1,115
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from datetime import datetime, date
from decimal import Decimal
from base import GAETestCase
from book_app.book_model import Book
from routes.books import rest
from gaegraph.model import Node
from mock import Mock
from mommygae import mommy
class IndexTests(GAETestCase):
def test_success(self):
mommy.save_one(Book)
mommy.save_one(Book)
json_response = rest.index()
context = json_response.context
self.assertEqual(2, len(context))
book_dct = context[0]
self.assertSetEqual(set(['id', 'creation', ]), set(book_dct.iterkeys()))
self.assert_can_serialize_as_json(json_response)
class NewTests(GAETestCase):
def test_success(self):
self.assertIsNone(Book.query().get())
json_response = rest.new(None, )
db_book = Book.query().get()
self.assertIsNotNone(db_book)
self.assert_can_serialize_as_json(json_response)
def test_error(self):
resp = Mock()
json_response = rest.new(resp)
errors = json_response.context
self.assertEqual(500, resp.status_code)
self.assertSetEqual(set([]), set(errors.keys()))
self.assert_can_serialize_as_json(json_response)
class EditTests(GAETestCase):
def test_success(self):
book = mommy.save_one(Book)
old_properties = book.to_dict()
json_response = rest.edit(None, book.key.id(), )
db_book = book.key.get()
self.assertNotEqual(old_properties, db_book.to_dict())
self.assert_can_serialize_as_json(json_response)
def test_error(self):
book = mommy.save_one(Book)
old_properties = book.to_dict()
resp = Mock()
json_response = rest.edit(resp, book.key.id())
errors = json_response.context
self.assertEqual(500, resp.status_code)
self.assertSetEqual(set([]), set(errors.keys()))
self.assertEqual(old_properties, book.key.get().to_dict())
self.assert_can_serialize_as_json(json_response)
class DeleteTests(GAETestCase):
def test_success(self):
book = mommy.save_one(Book)
rest.delete(None, book.key.id())
self.assertIsNone(book.key.get())
def test_non_book_deletion(self):
non_book = mommy.save_one(Node)
response = Mock()
json_response = rest.delete(response, non_book.key.id())
self.assertIsNotNone(non_book.key.get())
self.assertEqual(500, response.status_code)
self.assert_can_serialize_as_json(json_response)
|
janairacs/tekton
|
backend/test/book_tests/book_rest_tests.py
|
Python
|
mit
| 2,601
|
"""@package reconstructors
contains objects to use for reconstructing signals"""
from . import scorer, scorer_factory, sdr_scorer
|
JeroenZegers/Nabu-MSSS
|
nabu/postprocessing/scorers/__init__.py
|
Python
|
mit
| 131
|
#!/usr/bin/env python
"""
Create a client from json (not supported!)
"""
# import the basic python packages we need
import os
import sys
import tempfile
import pprint
import traceback
# disable python from generating a .pyc file
sys.dont_write_bytecode = True
# change me to the path of pytan if this script is not running from EXAMPLES/PYTAN_API
pytan_loc = "~/gh/pytan"
pytan_static_path = os.path.join(os.path.expanduser(pytan_loc), 'lib')
# Determine our script name, script dir
my_file = os.path.abspath(sys.argv[0])
my_dir = os.path.dirname(my_file)
# try to automatically determine the pytan lib directory by assuming it is in '../../lib/'
parent_dir = os.path.dirname(my_dir)
pytan_root_dir = os.path.dirname(parent_dir)
lib_dir = os.path.join(pytan_root_dir, 'lib')
# add pytan_loc and lib_dir to the PYTHONPATH variable
path_adds = [lib_dir, pytan_static_path]
[sys.path.append(aa) for aa in path_adds if aa not in sys.path]
# import pytan
import pytan
# create a dictionary of arguments for the pytan handler
handler_args = {}
# establish our connection info for the Tanium Server
handler_args['username'] = "Administrator"
handler_args['password'] = "Tanium2015!"
handler_args['host'] = "10.0.1.240"
handler_args['port'] = "443" # optional
handler_args['trusted_certs'] = "certs"
# optional, level 0 is no output except warnings/errors
# level 1 through 12 are more and more verbose
handler_args['loglevel'] = 1
# optional, use a debug format for the logging output (uses two lines per log entry)
handler_args['debugformat'] = False
# optional, this saves all response objects to handler.session.ALL_REQUESTS_RESPONSES
# very useful for capturing the full exchange of XML requests and responses
handler_args['record_all_requests'] = True
# instantiate a handler using all of the arguments in the handler_args dictionary
print "...CALLING: pytan.handler() with args: {}".format(handler_args)
handler = pytan.Handler(**handler_args)
# print out the handler string
print "...OUTPUT: handler string: {}".format(handler)
# setup the arguments for the handler.get() method
get_kwargs = {}
get_kwargs["objtype"] = u'client'
get_kwargs["status"] = u'Leader'
# get objects to use as an export to JSON file
print "...CALLING: handler.get() with args: {}".format(get_kwargs)
orig_objs = handler.get(**get_kwargs)
# export orig_objs to a json file
export_kwargs = {}
export_kwargs['obj'] = orig_objs
export_kwargs['export_format'] = 'json'
export_kwargs['report_dir'] = tempfile.gettempdir()
print "...CALLING: handler.export_to_report_file() with args: {}".format(export_kwargs)
json_file, results = handler.export_to_report_file(**export_kwargs)
# create the object from the exported JSON file
create_kwargs = {}
create_kwargs['objtype'] = u'client'
create_kwargs['json_file'] = json_file
# call the handler with the create_from_json method, passing in kwargs for arguments
print "...CALLING: handler.create_from_json() with args {}".format(create_kwargs)
try:
response = handler.create_from_json(**create_kwargs)
except Exception as e:
print "...EXCEPTION: {}".format(e)
# this should throw an exception of type: pytan.exceptions.HandlerError
# uncomment to see full exception
# traceback.print_exc(file=sys.stdout)
'''STDOUT from running this:
...CALLING: pytan.handler() with args: {'username': 'Administrator', 'record_all_requests': True, 'loglevel': 1, 'debugformat': False, 'host': '10.0.1.240', 'password': 'Tanium2015!', 'port': '443'}
...OUTPUT: handler string: PyTan v2.1.4 Handler for Session to 10.0.1.240:443, Authenticated: True, Platform Version: 6.5.314.4301
...CALLING: handler.get() with args: {'objtype': u'client', 'status': u'Leader'}
...CALLING: handler.export_to_report_file() with args: {'report_dir': '/var/folders/dk/vjr1r_c53yx6k6gzp2bbt_c40000gn/T', 'export_format': 'json', 'obj': <taniumpy.object_types.system_status_list.SystemStatusList object at 0x11b470dd0>}
...CALLING: handler.create_from_json() with args {'objtype': u'client', 'json_file': '/var/folders/dk/vjr1r_c53yx6k6gzp2bbt_c40000gn/T/SystemStatusList_2015_09_14-16_15_19-EDT.json'}
...EXCEPTION: client is not a json createable object! Supported objects: user, whitelisted_url, saved_question, group, package, question, action, sensor
'''
'''STDERR from running this:
'''
|
tanium/pytan
|
EXAMPLES/PYTAN_API/invalid_create_client_from_json.py
|
Python
|
mit
| 4,312
|
#!/usr/bin/env python
import sys, os, math
c = 299792458
def main():
print("SpecialRel.py has been loaded.")
def ctometer(a): # Converts the speed that is in terms of c to meters per second.
return a * c
def metertoc(a): # Converts the speeed that is in terms of meters per second into terms of c.
return float(a / c)
def gamma(v): # Calculates the gamma taking the speed in terms of c.
return float((1 / math.sqrt((1 - (v**2)))))
def ctogamma(v): # Calculates the value of gamma based on the speed in terms of meters per second.
return float(gamma(metertoc(v)))
def xlorentz(speed, x, t): # Calculates the lorentz transformation for space.
return float(gamma(speed) * (x - speed * t))
def tlorentz(speed, x, t): # Calculates the lorentz transformation for time.
return float(gamma(speed) * (t - speed * x))
def vlorentz(v, w): # Calculates the lorentz transformation for velocity.
return float((v - w) / (1 - (v * w)))
def timedialation(v, t): # Calculates the time dialation based on the velocity and time.
return float(gamma(v) * t)
def lengthcontraction(v, h): # Calculates the length contraction based on the velocity and time.
return float(h / (gamma(v)))
def spacetimeinterval(x, t): # Calculates the spacetime interval based on the space and time.
return float(math.sqrt((t**2) -(x**2))))
def spacetimeinterval2(t1, t2, x1, x2):
return float(math.sqrt((t2 - t1)**2 - (x2 - x1)**2))
def spaceinterval(x1, x2):
return float(x2 - x1)
def timeinterval(t1, t2):
return float(t2 - t1)
if __name__=="__main__":
main()
|
defunSM/code
|
pyth/SpecialRel.py
|
Python
|
mit
| 1,613
|
"""Filtering outliers"""
__author__ = 'thor'
import numpy as np
def outlier_lidx(data, method='median_dist', **kwargs):
if method == 'median_dist':
kwargs = dict({'thresh': 3}, **kwargs)
thresh = kwargs['thresh']
median_dist = np.abs(data - np.median(data))
mdev = np.median(median_dist)
s = median_dist/mdev if mdev else np.zeros(len(median_dist))
return s >= thresh
elif method == 'mean_dist':
kwargs = dict({'thresh': 3}, **kwargs)
thresh = kwargs['thresh']
data_std = np.std(data)
if data_std:
return abs(data - np.mean(data)) / np.std(data) >= thresh
else:
return np.array([False for i in range(len(data))])
else:
raise ValueError("method not recognized")
|
thorwhalen/ut
|
stats/filt.py
|
Python
|
mit
| 798
|
# -*- coding: utf-8 -*-
from dp_tornado.engine.helper import Helper as dpHelper
class WebHelper(dpHelper):
pass
|
why2pac/dp-tornado
|
dp_tornado/helper/web/__init__.py
|
Python
|
mit
| 120
|
import cgi
import cgitb; cgitb.enable()
import matplotlib
matplotlib.use( 'Agg' )
import numpy as np
from scipy.stats.kde import gaussian_kde
import os,sys
import pylab
from PIL import Image
import uuid
def crop(arg1):
# size is width/height
img = Image.open(arg1)
left = 88
top = 41
width = 545
height = 321
box = (left, top, left+width, top+height)
#area = img.crop(box)
#area.save('cropped_0_388_image1', 'jpeg')
output_img = img.crop(box)
output_img.save(arg1, 'png')
def make_fig():
global folder
folder = "433files/"
global imgFolder
imgFolder = "imgGen/"
global filename
global filename1,filename2,filename3,filename4,filename5,filename6,filename7,filename8,filename9,filename10
filename1 = folder + sys.argv[1]
filename2 = folder + sys.argv[2]
filename3 = folder + sys.argv[3]
filename4 = folder + sys.argv[4]
filename5 = folder + sys.argv[5]
filename6 = folder + sys.argv[6]
filename7 = folder + sys.argv[7]
filename8 = folder + sys.argv[8]
filename9 = folder + sys.argv[9]
filename10 = folder + sys.argv[10]
#Squadra
x1, y1 = np.genfromtxt(filename1, delimiter=',', unpack=True)
x2, y2 = np.genfromtxt(filename2, delimiter=',', unpack=True)
x3, y3 = np.genfromtxt(filename3, delimiter=',', unpack=True)
x4, y4 = np.genfromtxt(filename4, delimiter=',', unpack=True)
x11, y11 = np.genfromtxt(filename5, delimiter=',', unpack=True)
x22, y22 = np.genfromtxt(filename6, delimiter=',', unpack=True)
x33, y33 = np.genfromtxt(filename7, delimiter=',', unpack=True)
x111, y111 = np.genfromtxt(filename8, delimiter=',', unpack=True)
x222, y222 = np.genfromtxt(filename9, delimiter=',', unpack=True)
x333, y333 = np.genfromtxt(filename10, delimiter=',', unpack=True)
#Difesa da sx verso dx
# x1, y1 = np.genfromtxt('terzinoSX.csv', delimiter=',', unpack=True)
# x2, y2 = np.genfromtxt('centraleSX.csv', delimiter=',', unpack=True)
# x3, y3 = np.genfromtxt('centraleDX.csv', delimiter=',', unpack=True)
# x4, y4 = np.genfromtxt('terzinoDX.csv', delimiter=',', unpack=True)
#Centrocampo da sx verso dx
# x11, y11 = np.genfromtxt('centrSX.csv', delimiter=',', unpack=True)
# x22, y22 = np.genfromtxt('centrCC.csv', delimiter=',', unpack=True)
# x33, y33 = np.genfromtxt('centrDX.csv', delimiter=',', unpack=True)
#attacco da sx verso dx
# x111, y111 = np.genfromtxt('alaSX.csv', delimiter=',', unpack=True)
# x222, y222 = np.genfromtxt('puntaCC.csv', delimiter=',', unpack=True)
# x333, y333 = np.genfromtxt('alaDX.csv', delimiter=',', unpack=True)
y1 = y1[np.logical_not(np.isnan(y1))]
x1 = x1[np.logical_not(np.isnan(x1))]
y2 = y2[np.logical_not(np.isnan(y2))]
x2 = x2[np.logical_not(np.isnan(x2))]
y3 = y3[np.logical_not(np.isnan(y3))]
x3 = x3[np.logical_not(np.isnan(x3))]
y4 = y4[np.logical_not(np.isnan(y4))]
x4 = x4[np.logical_not(np.isnan(x4))]
y11 = y11[np.logical_not(np.isnan(y11))]
x11 = x11[np.logical_not(np.isnan(x11))]
y22 = y22[np.logical_not(np.isnan(y22))]
x22 = x22[np.logical_not(np.isnan(x22))]
y33 = y33[np.logical_not(np.isnan(y33))]
x33 = x33[np.logical_not(np.isnan(x33))]
y111 = y111[np.logical_not(np.isnan(y111))]
x111 = x111[np.logical_not(np.isnan(x111))]
y222 = y222[np.logical_not(np.isnan(y222))]
x222 = x222[np.logical_not(np.isnan(x222))]
y333 = y333[np.logical_not(np.isnan(y333))]
x333 = x333[np.logical_not(np.isnan(x333))]
xM1 = sum(x1)/len(x1)
yM1 = sum(y1)/len(y1)
xM2 = sum(x2)/len(x2)
yM2 = sum(y2)/len(y2)
xM3 = sum(x3)/len(x3)
yM3 = sum(y3)/len(y3)
xM4 = sum(x4)/len(x4)
yM4 = sum(y4)/len(y4)
xM11 = sum(x11)/len(x11)
yM11 = sum(y11)/len(y11)
xM22 = sum(x22)/len(x22)
yM22 = sum(y22)/len(y22)
xM33 = sum(x33)/len(x33)
yM33 = sum(y33)/len(y33)
xM111 = sum(x111)/len(x111)
yM111 = sum(y111)/len(y111)
xM222 = sum(x222)/len(x222)
yM222 = sum(y222)/len(y222)
xM333 = sum(x333)/len(x333)
yM333 = sum(y333)/len(y333)
fig = pylab.figure(figsize=(7,4), frameon=False)
#ax1 = fig.add_subplot(211)
ax2 = fig.add_subplot(111)
#alpha=0.5 will make the plots semitransparent
#ax1.pcolormesh(yi, xi, zi.reshape(xi.shape), alpha=0.5)
#ax2.contourf(yi, xi, zi.reshape(xi.shape), alpha=0.3)
#PUNTI ROSSI CHE INDICANO LA POSIZIONE DEI GIOCATORI
ax2.plot(yM1,xM1, "ro", markersize=10)
ax2.plot(yM2,xM2, "ro", markersize=10)
ax2.plot(yM3,xM3, "ro", markersize=10)
ax2.plot(yM4,xM4, "ro", markersize=10)
ax2.plot(yM11,xM11, "ro", markersize=10)
ax2.plot(yM22,xM22, "ro", markersize=10)
ax2.plot(yM33,xM33, "ro", markersize=10)
ax2.plot(yM111,xM111, "ro", markersize=10)
ax2.plot(yM222,xM222, "ro", markersize=10)
ax2.plot(yM333,xM333, "ro", markersize=10)
pylab.axis('off')
#LINEE DI COLLEGAMENTO TRA I GIOCATORI DEI REPARTI
pylab.plot([yM1,yM2,yM3,yM4], [xM1, xM2, xM3, xM4], 'b', linewidth=3)
pylab.plot([yM11,yM22,yM33], [xM11, xM22, xM33], 'b', linewidth=3)
pylab.plot([yM111,yM222,yM333], [xM111, xM222, xM333], 'b', linewidth=3)
#ax1.set_xlim(0, 740)
#ax1.set_ylim(515, 0)
ax2.set_xlim(0, 740)
ax2.set_ylim(515, 0)
#overlay your soccer field
im = pylab.imread('statszone_football_pitch.png')
#ax1.imshow(im, extent=[0, 740, 0, 515], aspect='auto')
ax2.imshow(im, extent=[0, 740, 0, 515], aspect='auto')
global unique_filename
unique_filename = str(uuid.uuid4())
#plt.show()
#plt.savefig('heatmaps_tackles.png')
# if(os.path.isfile('disposizione.png')):
# filename = 'disposizione1.png'
# else:
# filename = 'disposizione.png'
fig.savefig(imgFolder+unique_filename+".png")
print(unique_filename+".png")
make_fig()
crop(imgFolder+unique_filename+".png")
|
antoniofalcone89/webapp
|
plotModulo433.py
|
Python
|
mit
| 5,954
|
#!/usr/bin/env python
import os.path
import re
import sys
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
if sys.argv[-1] == 'publish':
os.system('python setup.py sdist bdist_wheel')
sys.exit()
def read(filename):
return open(os.path.join(os.path.dirname(__file__), filename)).read()
description = 'Google Spreadsheets Python API'
long_description = """
{index}
License
-------
MIT
"""
long_description = long_description.lstrip("\n").format(index=read('docs/index.txt'))
version = re.search(r'^__version__\s*=\s*[\'"]([^\'"]*)[\'"]',
read('gspread/__init__.py'), re.MULTILINE).group(1)
setup(
name='gspread',
packages=['gspread'],
description=description,
long_description=long_description,
version=version,
author='Anton Burnashev',
author_email='fuss.here@gmail.com',
url='https://github.com/burnash/gspread',
keywords=['spreadsheets', 'google-spreadsheets'],
install_requires=['requests>=2.2.1'],
python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*',
classifiers=[
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
"Intended Audience :: End Users/Desktop",
"Intended Audience :: Science/Research",
"Topic :: Office/Business :: Financial :: Spreadsheet",
"Topic :: Software Development :: Libraries :: Python Modules"
],
license='MIT'
)
|
nateyoder/gspread
|
setup.py
|
Python
|
mit
| 1,947
|
#!/usr/bin/env python
from datetime import datetime, timedelta
import unittest
from app import create_app, db
from app.models import User, Post
from config import Config
class TestConfig(Config):
TESTING = True
SQLALCHEMY_DATABASE_URI = 'sqlite://'
ELASTICSEARCH_URL = None
class UserModelCase(unittest.TestCase):
def setUp(self):
self.app = create_app(TestConfig)
self.app_context = self.app.app_context()
self.app_context.push()
db.create_all()
def tearDown(self):
db.session.remove()
db.drop_all()
self.app_context.pop()
def test_password_hashing(self):
u = User(username='susan')
u.set_password('cat')
self.assertFalse(u.check_password('dog'))
self.assertTrue(u.check_password('cat'))
def test_avatar(self):
u = User(username='john', email='john@example.com')
self.assertEqual(u.avatar(128), ('https://www.gravatar.com/avatar/'
'd4c74594d841139328695756648b6bd6'
'?d=identicon&s=128'))
def test_follow(self):
u1 = User(username='john', email='john@example.com')
u2 = User(username='susan', email='susan@example.com')
db.session.add(u1)
db.session.add(u2)
db.session.commit()
self.assertEqual(u1.followed.all(), [])
self.assertEqual(u1.followers.all(), [])
u1.follow(u2)
db.session.commit()
self.assertTrue(u1.is_following(u2))
self.assertEqual(u1.followed.count(), 1)
self.assertEqual(u1.followed.first().username, 'susan')
self.assertEqual(u2.followers.count(), 1)
self.assertEqual(u2.followers.first().username, 'john')
u1.unfollow(u2)
db.session.commit()
self.assertFalse(u1.is_following(u2))
self.assertEqual(u1.followed.count(), 0)
self.assertEqual(u2.followers.count(), 0)
def test_follow_posts(self):
# create four users
u1 = User(username='john', email='john@example.com')
u2 = User(username='susan', email='susan@example.com')
u3 = User(username='mary', email='mary@example.com')
u4 = User(username='david', email='david@example.com')
db.session.add_all([u1, u2, u3, u4])
# create four posts
now = datetime.utcnow()
p1 = Post(body="post from john", author=u1,
timestamp=now + timedelta(seconds=1))
p2 = Post(body="post from susan", author=u2,
timestamp=now + timedelta(seconds=4))
p3 = Post(body="post from mary", author=u3,
timestamp=now + timedelta(seconds=3))
p4 = Post(body="post from david", author=u4,
timestamp=now + timedelta(seconds=2))
db.session.add_all([p1, p2, p3, p4])
db.session.commit()
# setup the followers
u1.follow(u2) # john follows susan
u1.follow(u4) # john follows david
u2.follow(u3) # susan follows mary
u3.follow(u4) # mary follows david
db.session.commit()
# check the followed posts of each user
f1 = u1.followed_posts().all()
f2 = u2.followed_posts().all()
f3 = u3.followed_posts().all()
f4 = u4.followed_posts().all()
self.assertEqual(f1, [p2, p4, p1])
self.assertEqual(f2, [p2, p3])
self.assertEqual(f3, [p3, p4])
self.assertEqual(f4, [p4])
if __name__ == '__main__':
unittest.main(verbosity=2)
|
miguelgrinberg/microblog
|
tests.py
|
Python
|
mit
| 3,542
|
#!/usr/locals/bin python3
import os
COV = None
if os.environ.get('ICALC_COVERAGE'):
import coverage
COV = coverage.coverage(branch=True, include='app/*')
COV.start()
from app import create_app, db
from app.common.models import User, Role, Post
from flask_script import Manager, Shell, Server
from flask_migrate import Migrate, MigrateCommand
app = create_app(os.getenv('ICALC_CONFIG') or 'default')
manager = Manager(app)
migrate = Migrate(app, db)
server = Server(host="0.0.0.0")
manager.add_command("runserver", server)
def make_shell_context():
return dict(app=app,
db=db,
User=User,
Role=Role,
Post=Post)
manager.add_command('shell', Shell(make_context=make_shell_context))
manager.add_command('db', MigrateCommand)
@manager.command
def deploy():
from flask_migrate import upgrade
upgrade()
# 创建用户角色
Role.insert_roles()
# 让所有用户都关注此用户
User.add_self_follows()
@manager.command
def test(coverage=False):
"""Run the unit tests."""
if coverage and not os.environ.get('ICALC_COVERAGE'):
import sys
os.environ['ICALC_COVERAGE'] = '1'
os.execvp(sys.executable, [sys.executable] + sys.argv)
import unittest
tests = unittest.TestLoader().discover('tests')
unittest.TextTestRunner(verbosity=2).run(tests)
if COV:
COV.stop()
COV.save()
print('coverage Summary:')
COV.report()
basedir = os.path.abspath(os.path.dirname(__file__))
covdir = os.path.join(basedir, 'tmp/coverage')
COV.html_report(directory=covdir)
print('HTML version: file://%s/index.html' % covdir)
COV.erase()
if __name__ == '__main__':
manager.run()
|
cmacro/mogufsite
|
manage.py
|
Python
|
mit
| 1,792
|
import copy
from .base import BaseCommand
class RepeatCommand(BaseCommand):
def __init__(self, args):
super(RepeatCommand, self).__init__(args)
try:
self.count = int(args)
except (TypeError, ValueError):
raise ValueError("Invalid repeat value, should be int")
# doc doesn't specify max
if not (0 < self.count <= 10000):
raise ValueError("Invalid count value, should be > 0 and <= 10000")
def _run(self, env):
stack = env.get("STACK")
if len(stack) < 1:
raise ValueError("Repeat needs a previous instruction")
for i in xrange(self.count):
# don't modify the stack while repeating
before = copy.deepcopy(env)
stack[-1].run(before)
|
hamstah/quack
|
quack/commands/repeat.py
|
Python
|
mit
| 792
|
from model.Transport.Transport import Transport
class Bicycle(Transport):
'''
Class to store bike informations
'''
def __init__(self, travel_time, itinerary):
super().__init__(travel_time, itinerary, True)
|
rmulton/lawen
|
model/Transport/Bicycle.py
|
Python
|
mit
| 231
|
from django import template
register = template.Library()
# Messages
@register.inclusion_tag('widgets/messages.html', takes_context=True)
def messages(context):
django_messages = context['messages']
messages = []
for message in django_messages:
messages.append({'tags': message.tags, 'text': message})
return {
'messages': messages,
}
|
bruecksen/isimip
|
isi_mip/core/templatetags/messages.py
|
Python
|
mit
| 374
|
"""
1D RootFind class.
Bruce Wernick
10 June 2021
"""
import sys
from math import sqrt, log10
from const import EPS, TINY
def MIN(a, b):
if a < b:
return a
return b
def SQR(x):
t = x
return t*t
def SIGN(a, b):
if b >= 0.0:
return abs(a)
return -abs(a)
def signum(a, b):
'signed number'
if b < 0.0:
return -abs(a)
return abs(a)
# ---------------------------------------------------------------------
class RootFind(object):
"""Abstract 1D root finder class.
This is the base class for all root find methods.
"""
# class variables
tol = 1e-6
maxi = 128
def __init__(self, f):
'RootFind class constructor'
self.f = f
self.its = 0
self.kind = type(self).__name__
def dxdy(self, x):
"""f(x) and slope inverse dx/df
"""
e = 1e-2
xo = x
fo = self.f(xo)
h = e * abs(xo)
if h <= TINY:
h = e
x = xo + h
fx = self.f(x)
return fo, (x - xo) / (fx - fo)
def dydx(self, x):
"""f(x) and slope df/dx
"""
e = 1e-2
xo = x
fo = self.f(xo)
h = e * abs(xo)
if h <= TINY:
h = e
x = xo + h
fx = self.f(x)
return fo, (fx - fo) / (x - xo)
def dydx2(self, x):
"""f(x), df/dx and d2f/dx2 (2nd derivative)
"""
e = 1e-2
h = e * abs(x)
if h <= TINY:
h = e
fo, df = self.dydx(x)
df2 = (self.f(x+h) - 2.0 * fo + self.f(x-h)) / h / h
return fo, df, df2
def __call__(self, *args):
raise NotImplementedError('abstract root finder called!')
# ---------------------------------------------------------------------
class Newton(RootFind):
"""Newton-Raphson method (pure slope method).
Function must return f(x) and slope.
"""
def __call__(self, x):
for self.its in range(RootFind.maxi):
x0 = x
y, dydx = self.f(x)
if abs(dydx) <= TINY:
raise ValueError('curve too flat for Newton method!')
dx = y / dydx
x -= dx
if abs(y) <= RootFind.tol:
# function value is within tolerance
return x
if abs(dx) <= RootFind.tol:
# calculated change in x is small
return x
if abs(x-x0) <= RootFind.tol:
# x not changing between loops
return x
raise ValueError('max iterations reached!')
class rtSafe(RootFind):
"""Newton with safe bisection.
Based on NR2.
Has the benefit of Newton with the safety of Bisection.
"""
def __call__(self, x1, x2):
fl = self.f(x1)
fh = self.f(x2)
if fl * fh > 0:
raise ValueError('Root must be bracketed in rtsafe')
if abs(fl) <= RootFind.tol:
return x1
if abs(fh) <= RootFind.tol:
return x2
if fl < 0.0:
xl = x1
xh = x2
else:
xh = x1
xl = x2
x = 0.5 * (x1 + x2)
dx0 = abs(x2 - x1)
dx = dx0
fx, df = self.dydx(x)
for self.its in range(RootFind.maxi):
if ((((x-xh)*df-fx)*((x-xl)*df-fx) > 0.0) or (abs(2.0*fx) > abs(dx0*df))):
"bisection step"
dx0 = dx
dx = 0.5 * (xh - xl)
x = xl + dx
if xl == x:
return x
else:
"newton step"
dx0 = dx
dx = fx / df
t = x
x -= dx
if abs(t-x) <= RootFind.tol:
return x
if abs(dx) < RootFind.tol:
return x
fx, df = self.dydx(x)
if fx < 0.0:
xl = x
else:
xh = x
raise ValueError('max iterations reached!')
class Secant(RootFind):
"""Secant method.
"""
def __call__(self, a, b):
fa, fb = self.f(a), self.f(b)
if abs(fa) > abs(fb):
a, b = b, a
fa, fb = fb, fa
for self.its in range(RootFind.maxi):
dx = fa * (a - b) / (fa - fb)
if abs(dx) < RootFind.tol * (1 + abs(a)):
return a-dx
b, a = a, a-dx
fb, fa = fa, self.f(a)
raise ValueError('max iterations reached!')
class Bisect(RootFind):
"""Bisection method.
Numerical Recipes version.
"""
def __call__(self, x1, x2):
f1, f2 = self.f(x1), self.f(x2)
if f1 * f2 >= 0.0:
raise ValueError('root must be bracketed!')
if f1 < 0.0:
dx = x2 - x1
x = x1
else:
dx = x1 - x2
x = x2
for self.its in range(RootFind.maxi):
dx *= 0.5
if abs(dx) < RootFind.tol:
return x
x2 = x + dx
f2 = self.f(x2)
if abs(f2) <= EPS:
return x2
if f2 <= 0.0:
x = x2
raise ValueError('max iterations reached!')
class Ridder(RootFind):
"""Ridder's method
"""
def __call__(self, x1, x2):
fl, fh = self.f(x1), self.f(x2)
if fl * fh >= 0.0:
raise ValueError('root must be bracketed!')
xl, xh = x1, x2
x = -1.11e30
for self.its in range(RootFind.maxi):
xm = 0.5 * (xl + xh)
fm = self.f(xm)
s = sqrt(fm*fm - fl*fh)
if s == 0.0:
return xm
if fl >= fh:
xnew = xm + (xm - xl) * fm / s
else:
xnew = xm + (xl - xm) * fm / s
if (abs(xnew-x) <= RootFind.tol):
return xnew
x = xnew
fx = self.f(x)
if fx == 0.0:
return x
if SIGN(fm,fx) != fm:
xl = xm
fl = fm
xh = x
fh = fx
elif (SIGN(fl,fx) != fl):
xh, fh = x, fx
elif SIGN(fh,fx) != fh:
xl, fl = x, fx
else:
raise ValueError('undefined error!')
if abs(xh-xl) <= RootFind.tol:
return x
raise ValueError('max iterations reached!')
class Brent(RootFind):
"""Brent's inverse quadratic method.
This is supposed to be the most reliable method
(although, not always the fastest).
It is the one recommended by Numerical Recipes.
"""
def __call__(self, a, b):
fa, fb = self.f(a), self.f(b)
if fa * fb >= 0.0:
raise ValueError('root must be bracketed!')
c, fc = b, fb
for self.its in range(RootFind.maxi):
if (fb > 0.0 and fc > 0.0) or (fb < 0.0 and fc < 0.0):
c, fc = a, fa
e = d = b-a
if abs(fc) < abs(fb):
a = b; b = c; c = a
fa = fb; fb = fc; fc = fa
tol1 = 2.0*EPS*abs(b) + 0.5*RootFind.tol
xm = 0.5*(c-b)
if abs(xm) <= tol1 or fb == 0.0:
return b
if (abs(e) >= tol1 and abs(fa) > abs(fb)):
s = fb / fa
if a == c:
p = 2.0 * xm * s
q = 1.0 - s
else:
q = fa / fc
r = fb / fc
p = s * (2.0 * xm * q * (q-r) - (b-a) * (r-1.0))
q = (q-1.0) * (r-1.0) * (s-1.0)
if (p > 0.0):
q = -q
p = abs(p)
min1 = 3.0 * xm * q - abs(tol1 * q)
min2 = abs(e * q)
if (2.0 * p < MIN(min1, min2)):
e = d; d = p/q
else:
d = xm; e = d
else:
d = xm; e = d
a, fa = b, fb
if abs(d) > tol1:
b += d
else:
b += SIGN(tol1, xm)
fb = self.f(b)
raise ValueError('max iterations reached!')
class Brent2(RootFind):
"""Brent's inverse quadratic method, by Kiusalaas,
faster than NR and Wikipedia algorithm.
"""
def __call__(self, x1, x2):
f1 = self.f(x1)
if f1 == 0: return x1
f2 = self.f(x2)
if f2 == 0: return x2
if f1 * f2 > 0:
raise ValueError('root must be bracketed!')
if x1 > x2:
x1, x2 = x2, x1
f1, f2 = f2, f1
x3 = 0.5 * (x1 + x2)
for self.its in range(RootFind.maxi):
f3 = self.f(x3)
if abs(f3) < RootFind.tol:
return x3
if f1 * f3 < 0:
b = x3
else:
a = x3
if (x2 - x1) < RootFind.tol * max(abs(x2), 1):
return 0.5 * (x1 + x2)
P = x3*(f1-f2)*(f2-f3+f1) + f2*x1*(f2-f3) + f1*x2*(f3-f1)
Q = (f2-f1)*(f3-f1)*(f2-f3)
if abs(Q) <= TINY:
dx = b-a
else:
dx = f3*P/Q
x = x3 + dx
if (x2 - x) * (x - x1) < 0:
dx = 0.5 * (x2 - x1)
x = a + dx
if x < x3:
x2, f2 = x3, f3
else:
x1, f1 = x3, f3
x3 = x
raise ValueError('max iterations reached!')
class Wernick(RootFind):
"""Brent type method using Inv Quad Int.
I experimented with Chandrupatla and found some of logic confusing. So,
I went back to a pure IQI with recalc of c at every step. The bracket
adjustment [a, c, s, b] seems to be the trick to fast convergence.
Simplified Logic:
calc c by Bisection.
calc s by Inv Quad (safety check failing to Secant).
adjust bracket.
"""
def __call__(self, a, b):
fa = self.f(a)
if abs(fa) <= EPS:
return a
fb = self.f(b)
if abs(fb) <= EPS:
return b
assert fa * fb <= 0
for self.its in range(RootFind.maxi):
dx = b - a # bracket delta
c = a + 0.5 * dx # bisection
if abs(dx) <= RootFind.tol:
return c
fc = self.f(c)
if abs(fc) <= RootFind.tol:
return c
if fa != fc and fb != fc:
# inv quad interp
fab, fac, fbc = fa-fb, fa-fc, fb-fc
s = a*fc*fb/fac/fab + c*fa*fb/fac/fbc - b*fa*fc/fab/fbc
else:
# secant
s = a + dx * fb / (fa - fb)
fs = self.f(s)
if abs(fs) <= RootFind.tol:
return s
# adjust bracket [a,c,s,b]
if fc * fs < 0:
a, fa = c, fc
b, fb = s, fs
elif fa * fc < 0:
b, fb = c, fc
elif fs * fb < 0:
a, fa = s, fs
raise ValueError('max iterations reached!')
class Broyden(RootFind):
"""1D Broyden method ().
Coded from Broydens multi-dimensional method. Actually, it's a Secant
method but with slope update. The big advantage is that it only needs
a single starting guess and has one function call per loop. The slope
inverse is calculated once at the start and simply corrected at each
step. I'm surprised to not find it everywhere online because it seems
to be fairly rugged and performs well in everything I throw at it.
"""
def __call__(self, x):
fo, K = self.dxdy(x)
if abs(fo) <= RootFind.tol:
return x
for self.its in range(RootFind.maxi):
dx = -K*fo
x += dx
fx = self.f(x)
if abs(fx) <= RootFind.tol:
return x
dfx = fx - fo
if abs(dfx) <= TINY:
return x
a = dx * K * dfx
dK = -K * (a - dx * dx) / a
K += dK
fo = fx
raise ValueError('max iterations reached!')
class Halley(RootFind):
"""Halley method, uses 2nd derivative.
This is supposed to have a higher convergence rate than Newton
but the cost of the 2nd deriv seems to reduce its value.
"""
def __call__(self, x):
for self.its in range(RootFind.maxi):
fx, f1, f2 = self.dydx2(x)
d = 2 * f1 * f1 - fx * f2
if abs(d) <= EPS:
return x
dx = (2 * fx * f1) / d
x -= dx
if abs(dx) <= RootFind.tol:
return x
raise ValueError('max iterations reached!')
class Schroeder(RootFind):
"""Schroeders method, uses 2nd derivative
"""
def __call__(self, x):
for self.its in range(RootFind.maxi):
fx, f1, f2 = self.dydx2(x)
dxn = fx / f1 # newton correction
dx = dxn * (1.0 + 0.5 * dxn * f2 / f1)
x -= dx
if abs(dx) <= RootFind.tol:
return x
raise ValueError('max iterations reached!')
class Illinois(RootFind):
"""Illionois method - modified secant
This is a good choice if Broyden doesn't work.
"""
def __call__(self, x1, x2):
f1, f2 = self.f(x1), self.f(x2)
for self.its in range(RootFind.maxi):
x3 = x2 - f2 * (x1 - x2) / (f1 - f2)
f3 = self.f(x3)
if f2 * f3 < 0: # x2 and x3 straddle root
x1, f1 = x2, f2
if abs(f2) <= RootFind.tol:
return x2
else:
f1 = 0.5 * f1 # reduce slope
x2, f2 = x3, f3
if abs(f2) <= RootFind.tol:
return x2
raise ValueError('max iterations reached!')
class Pegasus(RootFind):
"""Pegasus method - variant of Illinois
"""
def __call__(self, x1, x2):
x = 0.5 * (x1 + x2)
f1, f2 = self.f(x1), self.f(x2)
if f1 * f2 >= 0.0:
raise ValueError('root must be bracketed!')
for self.its in range(RootFind.maxi):
dx = x2 - x1
dy = f2 - f1
if abs(dy) <= EPS:
return x
x3 = x1 - f1 * dx / dy
f3 = self.f(x3)
x = x3
if abs(f3) < RootFind.tol:
return x
if f2 * f3 <= 0:
x1, f1 = x2, f2
else:
m = f2 / (f2 + f3)
f1 = m * f1
x2, f2 = x3, f3
raise ValueError('max iterations reached!')
class Anderson(RootFind):
"""Anderson's method - variant of Illinois
"""
def __call__(self, x1, x2):
x = 0.5 * (x1 + x2)
f1, f2 = self.f(x1), self.f(x2)
if f1 * f2 >= 0.0:
raise ValueError('root must be bracketed!')
for self.its in range(RootFind.maxi):
dx = x2 - x1
dy = f2 - f1
if abs(dy) <= EPS:
return x
x3 = x1 - f1 * dx / dy
f3 = self.f(x3)
x = x3
if abs(f3) < RootFind.tol:
return x
if f2 * f3 <= 0:
x1, f1 = x2, f2
else:
m = 1.0 - f3 / f2
if m <= 0:
m = 0.5
f1 = m * f1
x2, f2 = x3, f3
raise ValueError('max iterations reached!')
class RegulaFalsi(RootFind):
"""standard regula-falsi method.
Included here for completeness.
I wouldn't bother using this one.
"""
def __call__(self, a, b):
fa, fb = self.f(a), self.f(b)
if fa * fb > 0:
raise ValueError('root must be bracketed!')
k = 0
for self.its in range(RootFind.maxi):
df = fa - fb
if df <= EPS:
raise ValueError('too flat!')
c = (fa * b - fb * a) / df
if (abs(b-a) < RootFind.tol*abs(b+a)):
return c
fc = self.f(c)
if fc * fb > 0:
b, fb = c, fc
if k == -1: fa *= 0.5
k = -1
elif fa * fc > 0:
a, fa = c, fc
if k == 1: fb *= 0.5
k = 1
else:
return c
raise ValueError('max iterations reached!')
class ModRegulaFalsi(RootFind):
"""Modified Regula-Falsi
False Position method
Better but still not great.
"""
def __call__(self, a, b):
fa, fb = self.f(a), self.f(b)
if fa * fb >= 0.0:
raise Exception('root must be bracketed!')
if fb < 0.0:
a, b = b, a
fa, fb = fb, fa
c = a
fc = fa
for self.its in range(RootFind.maxi):
c = (b * fa - a * fb) / (fa - fb)
fco = fc
fc = self.f(c)
if fc > 0.0:
a = c; fa = fc
if fc * fco > 0.0:
fb = 0.5 * fb
else:
b = c; fb = fc
if fc * fco > 0.0:
fa = 0.5 * fa
if abs(fc) < RootFind.tol:
return c
raise ValueError('max iterations reached!')
class Trisect(RootFind):
"""Divide range into 3 segments.
Find the range [a,c1], [c1,c2], [c2,b] where the root exists
and call it recursively.
This is just an experiment to see if I could improve on Bisection.
"""
def __init__(self, f):
super(Trisect, self).__init__(f)
RootFind.its = 0
def __call__(self, a, b):
if a > b:
a, b = b, a
d = (b - a) / 3
if d <= RootFind.tol:
return a + d
fa = self.f(a)
if abs(fa) < RootFind.tol:
return a
fb = self.f(b)
if abs(fb) < RootFind.tol:
return b
if fa * fb > 0:
raise ValueError("root must be bracketed")
self.its += 1
if RootFind.its > RootFind.maxi:
raise ValueError('maxits reached!')
# 1st tri-step
c1 = a + d
fc1 = self.f(c1)
if fa * fc1 < 0:
return self.__call__(a, c1)
# 2nd tri-step
c2 = b - d
fc2 = self.f(c2)
if fc1 * fc2 < 0:
return self.__call__(c1, c2)
# 3rd tri-step
return self.__call__(c2, b)
# ---------------------------------------------------------------------
if __name__ == '__main__':
def func(a,b):
def f(x):
y = (x+a)*(x+b)
dydx = a+b+2*x
return y, dydx
return f
fx = func(-2, 3)
root = Newton(fx)
y = root(7)
print(f'{root.kind}: root={y:0.7g}, its={root.its}')
fx = lambda x: (x-2)*(x+3)
root = rtSafe(fx)
y = root(15, 0.1)
print(f'{root.kind}: root={y:0.7g}, its={root.its}')
root = Secant(fx)
y = root(15, 0.1)
print(f'{root.kind}: root={y:0.7g}, its={root.its}')
root = Bisect(fx)
y = root(15, 0.1)
print(f'{root.kind}: root={y:0.7g}, its={root.its}')
root = Ridder(fx)
y = root(15, 0.1)
print(f'{root.kind}: root={y:0.7g}, its={root.its}')
root = Brent(fx)
y = root(15, 0.1)
print(f'{root.kind}: root={y:0.7g}, its={root.its}')
root = Brent2(fx)
y = root(15, 0.1)
print(f'{root.kind}: root={y:0.7g}, its={root.its}')
root = Wernick(fx)
y = root(15, 0.1)
print(f'{root.kind}: root={y:0.7g}, its={root.its}')
root = Broyden(fx)
y = root(7)
print(f'{root.kind}: root={y:0.7g}, its={root.its}')
root = Halley(fx)
y = root(7)
print(f'{root.kind}: root={y:0.7g}, its={root.its}')
root = Schroeder(fx)
y = root(7)
print(f'{root.kind}: root={y:0.7g}, its={root.its}')
root = Illinois(fx)
y = root(15, 0.1)
print(f'{root.kind}: root={y:0.7g}, its={root.its}')
root = Pegasus(fx)
y = root(15, 0.1)
print(f'{root.kind}: root={y:0.7g}, its={root.its}')
root = Anderson(fx)
y = root(15, 0.1)
print(f'{root.kind}: root={y:0.7g}, its={root.its}')
root = RegulaFalsi(fx)
y = root(15, 0.1)
print(f'{root.kind}: root={y:0.7g}, its={root.its}')
root = ModRegulaFalsi(fx)
y = root(3, 0.5)
print(f'{root.kind}: root={y:0.7g}, its={root.its}')
root = Trisect(fx)
y = root(3, 0.5)
print(f'{root.kind}: root={y:0.7g}, its={root.its}')
|
bru32/magz
|
magz/uroot.py
|
Python
|
mit
| 18,710
|
from django import template
from datetime import datetime
from geelweb.django.editos.models import Edito
register = template.Library()
class EditoNode(template.Node):
DEFAULT_TEMPLATE = 'editos/carousel.html'
def __init__(self, template_file=None):
if template_file:
self.template = template_file
else:
self.template = self.DEFAULT_TEMPLATE
def render(self, context):
editos = Edito.objects.filter(active=1, display_from__lte=datetime.now(),
display_until__gte=datetime.now())
t = template.loader.get_template(self.template)
return t.render({'editos': editos})
@register.tag
def editos(parser, token):
"""
Retrieves diaplayable editos and render them using the provided template
Syntax::
{% editos [template/file.html] %}
Exemple usage::
{% editos %}
{% editos editos/carousel.html %}
"""
bits = token.split_contents()
syntax_message = ("%(tag_name)s expects a syntax of %(tag_name)s "
"[path/to/template.html]" %
dict(tag_name=bits[0]))
if len(bits) >= 1 and len(bits) <= 2:
if len(bits) > 1:
template_file = bits[1]
else:
template_file = None
return EditoNode(template_file=template_file)
else:
raise template.TemplateSyntaxError(syntax_message)
|
geelweb/django-editos
|
src/geelweb/django/editos/templatetags/editos.py
|
Python
|
mit
| 1,429
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# pyGADDAG documentation build configuration file, created by
# sphinx-quickstart on Tue Aug 29 19:32:21 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.autodoc']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'pyGADDAG'
copyright = '2017, Jordan Bass'
author = 'Jordan Bass'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
html_theme_options = {
'github_user': 'jorbas',
'github_repo': 'pyGADDAG',
'description': 'Pure Python GADDAG',
'fixed_sidebar': True
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# This is required for the alabaster theme
# refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars
html_sidebars = {
'**': [
'about.html',
'navigation.html',
'relations.html', # needs 'show_related': True theme option to display
'searchbox.html',
'donate.html',
]
}
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'pyGADDAGdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'pyGADDAG.tex', 'pyGADDAG Documentation',
'Jordan Bass', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'pygaddag', 'pyGADDAG Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'pyGADDAG', 'pyGADDAG Documentation',
author, 'pyGADDAG', 'One line description of project.',
'Miscellaneous'),
]
|
jorbas/pyGADDAG
|
docs/conf.py
|
Python
|
mit
| 5,303
|
# -*- coding: utf-8 -*-
##
import sys
import csv
import json
import re
import time
import requests
import couchdb
import configure
#
#LAYOUT_PARSE_1 = re.compile(u"([^0-9]*)[0-9]{1,2}・[0-9]{1,2}")
#LAYOUT_PARSE_2 = re.compile(u"([^0-9]*)[0-9]{1,2}")
LAYOUT_PARSE_1 = re.compile(u"([^0-9]*)[0-9]{1,2},[0-9]{1,2}")
LAYOUT_PARSE_2 = re.compile(u"([^0-9]*)[0-9]{1,2}")
GRAFES = [u"グ", u"ラ", u"ブ", u"ル", u"海", u"なの", u"戦", u"城", u"DMM"]
class COMIC_V_MARKET(object):
LAYOUT_PARSE_1 = re.compile(u"([^0-9]*)-.*")
LAYOUT_PARSE_2 = LAYOUT_PARSE_1
# GROUP = u"ABCDEFGHIJKLMNOPQRS"
GROUP = u"Z"
DELIMITER = ","
LAYOUT_PARSE_1 = re.compile(u"([^0-9]*)[0-9]{1,2}-[0-9]{1,2}")
LAYOUT_PARSE_2 = re.compile(u"([^0-9]*)[0-9]{1,2}")
MESHI = u"めしけっ"
LAYOUT_PARSE_1 = re.compile(u"([^0-9]*)[0-9]{1,2}")
LAYOUT_PARSE_2 = re.compile(u"([^0-9]*)")
TOBIMONO = [u"A", u"B", u"C", u"D", u"E", u"F", u"ETC"]
#LAYOUT_PARSE_1 = re.compile(u"([^0-9]*) - [0-9]{1,2},[0-9]{1,2}")
#LAYOUT_PARSE_2 = re.compile(u"([^0-9]*) - [0-9]{1,2}")
#CIN = u"シンデレラの舞踏会"
#PRINCESS_FESTA = [u"あ", u"い", u"う", u"え", u"お", u"か", u"き", u"く", u"委託"]
class COMICMARKET(object):
LAYOUT_PARSE_1 = re.compile(u"(\(.*?\)[東|西]).*?[0-9]{1,2}.*")
LAYOUT_PARSE_2 = re.compile(u"(.*?)[0-9]{1,2}.*")
GROUP = [u"(木)東", u"(木)西", u"(金)東", u"(土)東", u"(土)西"]
DELIMITER = "\t"
class COMIC1(object):
LAYOUT_PARSE_1 = re.compile(u"([^0-9]*)[0-9]{1,2}・[0-9]{1,2}")
LAYOUT_PARSE_2 = re.compile(u"([^0-9]*)[0-9]{1,2}")
GROUP = u"あいうえおかきくけこさしすせそ"
DELIMITER = "\t"
class KEY_POINT(object):
LAYOUT_PARSE_1 = re.compile(u"([^0-9]*)[0-9]{1,2}")
LAYOUT_PARSE_2 = re.compile(u"([^0-9]*)[0-9]{1,2}")
GROUP = [u"K", u"e", u"y", u"P"]
DELIMITER = "\t"
class TECHBOOKFEST(object):
LAYOUT_PARSE_1 = re.compile(u"([^0-9]*)-[0-9]{1,2}")
LAYOUT_PARSE_2 = re.compile(u"([^0-9]*)-[0-9]{1,2}")
GROUP = [u"A", u"B", u"K", u"I"]
DELIMITER = ","
class PANZER(object):
LAYOUT_PARSE_1 = re.compile(u"([^0-9]*)-[0-9]{1,2}")
LAYOUT_PARSE_2 = re.compile(u"([^0-9]*)-[0-9]{1,2}")
GROUP = u"ABCDEFGHIJKL"
DELIMITER = ","
class OMOJIN(object):
LAYOUT_PARSE_1 = re.compile(u"([^0-9]*)-[0-9]{1,2}")
LAYOUT_PARSE_2 = re.compile(u"([^0-9]*)-[0-9]{1,2}")
GROUP = [u"OJ", u"主催"]
DELIMITER = ","
class CREATION(object):
LAYOUT_PARSE_1 = re.compile(u"([^0-9]*)[0-9]{1,2}")
LAYOUT_PARSE_2 = re.compile(u"([^0-9]*)[0-9]{1,2}")
GROUP = u"ABCDEFGHI"
#GROUP = u"JKLMNOPQRS"
#GROUP = u"アイウエオカキクケ"
DELIMITER = "\t"
class COMICNEXT(object):
LAYOUT_PARSE_1 = re.compile(u"([^0-9]*)[0-9]{1,2}")
LAYOUT_PARSE_2 = re.compile(u"([^0-9]*)[0-9]{1,2}")
GROUP = u"ABCDEFG"
DELIMITER = "\t"
class TREASURE(object):
LAYOUT_PARSE_1 = re.compile(u"([^0-9]*)[0-9]{1,2}")
LAYOUT_PARSE_2 = re.compile(u"([^0-9]*)[0-9]{1,2}")
GROUP = u"アイウエオカキクケコサシスセソタチツテトナニヌネノ"
DELIMITER = "\t"
class GAMELEGEND(object):
LAYOUT_PARSE_1 = re.compile(u"([^0-9]*)[0-9]{1,2}")
LAYOUT_PARSE_2 = re.compile(u"([^0-9]*)[0-9]{1,2}")
GROUP = [u"GL"]
DELIMITER = "\t"
class GURUCOMI(object):
LAYOUT_PARSE_1 = re.compile(u"([^0-9]*)[0-9]{1,2}")
LAYOUT_PARSE_2 = re.compile(u"([^0-9]*)[0-9]{1,2}")
GROUP = [u"通常", u"調理", u"企業"]
DELIMITER = ","
class LOVELIVE(object):
LAYOUT_PARSE_1 = re.compile(u"(.*?)[0-9]{1,2}.*")
LAYOUT_PARSE_2 = re.compile(u"(.*?)[0-9]{1,2}.*")
# GROUP = u"ABCDEFGHIJK"
# GROUP = [u"音", u"ノ", u"木", u"サン", u"シャ", u"イン", u"ことり", u"梨子", u"ルビィ", u"穂乃果", u"千歌"]
GROUP = [u"僕", u"ラ", u"ブ", u"ぬ", u"ま", u"づ", u"凛"]
# GROUP = [u"ラブ", u"ライブ", u"花陽", u"凛花", u"ダイヤ"]
# GROUP = [u"国", u"立", u"音", u"ノ", u"木", u"坂", u"浦", u"星", u"サン", u"シャ", u"イン"]
# GROUP = [u"僕", u"ラ", u"ブ", u"沼", u"津"]
#GROUP = [u"や", u"ざ", u"わ", u"に", u"こ"]
DELIMITER = ","
class ONLINEGAME_UNION(object):
LAYOUT_PARSE_1 = re.compile(u"(.*?)[0-9]{1,2}.*")
LAYOUT_PARSE_2 = re.compile(u"(.*?)[0-9]{1,2}.*")
# GROUP = [u"ドド", u"レレ", u"ミミ", u"プリ", u"アイ", u"駆逐", u"キュア"]
GROUP = [u"DMM", u"FGO", u"グラ", u"花", u"艦隊", u"城"]
DELIMITER = ","
class PUNIKET(object):
LAYOUT_PARSE_1 = re.compile(u"(.*?)[0-9]{1,2}.*")
LAYOUT_PARSE_2 = re.compile(u"(.*?)[0-9]{1,2}.*")
# GROUP = [u"ドド", u"レレ", u"ミミ", u"プリ", u"アイ", u"駆逐", u"キュア"]
# GROUP = [u"ぷ", u"に", u"プリ", u"パラ", u"キュア", u"アイ", u"戦車", u"駆逐", u"グラ", u"FGO", u"けもの", u"なのは"]
GROUP = [u"FGO", u"アズ", u"キュア", u"けもの", u"なの", u"ぷに", u"プリ", u"駆逐", u"戦車"]
DELIMITER = ","
class LYRICALMAGICAL(object):
LAYOUT_PARSE_1 = re.compile(u"(.*?)[0-9]{1,2}.*")
LAYOUT_PARSE_2 = re.compile(u"(.*?)[0-9]{1,2}.*")
GROUP = [u"なの"]
#GROUP = [u"な", u"の", u"は"]
DELIMITER = "\t"
class GAMEMARKET(object):
LAYOUT_PARSE_1 = re.compile(u"([^0-9]*)[0-9]{1,2}")
LAYOUT_PARSE_2 = re.compile(u"([^0-9]*)[0-9]{1,2}-[0-9]{1,2}")
GROUP = [v for v in u"ABCDEFGHIJKLMNOPQRS"] + [u"特設"]
DELIMITER = "\t"
class UTAHIME(object):
LAYOUT_PARSE_1 = re.compile(u"(.*?)[0-9]{1,2},[0-9]{1,2}")
LAYOUT_PARSE_2 = re.compile(u"(.*?)[0-9]{1,2}")
GROUP = [
u"歌", u"姫", u"ミリ", u"シ", u"ン", u"デ", u"レ", u"ラ", u"メモ", u"幸子", u"蘭子",
u"アナ", u"唯", u"晴"
]
DELIMITER = ","
class PRDX(object):
LAYOUT_PARSE_1 = re.compile(u"(.*?)[0-9]{1,2}・[0-9]{1,2}")
LAYOUT_PARSE_2 = re.compile(u"(.*?)[0-9]{1,2}")
GROUP = [u"ブラック", u"アクア", u"マーメイド"]
DELIMITER = ","
class COMITIA(object):
LAYOUT_PARSE_1 = re.compile(u"(.*?)[0-9]{1,2}")
LAYOUT_PARSE_2 = re.compile(u"(.*?)[0-9]{1,2}")
#GROUP = u"ABCDEFGHIJKLMNOPQRSTUVW"
#GROUP = u"あいうえおかきくけこさしすせそたちつてとなにぬねのはひふへほまみむめもやゆ"
GROUP = u"展"
DELIMITER = "\t"
class DENEN(object):
LAYOUT_PARSE_1 = re.compile(u"(.*?)[0-9]{1,2}.*")
LAYOUT_PARSE_2 = re.compile(u"(.*?)[0-9]{1,2}.*")
GROUP = [u"田園"]
class HOURAIGEKISEN(object):
# LAYOUT_PARSE_1 = re.compile(u"(.*?)-[0-9]{1,2}.*")
# LAYOUT_PARSE_2 = re.compile(u"(.*?)-[0-9]{1,2}.*")
# LAYOUT_PARSE_1 = re.compile(u"SP-No\\.(.*?)-[0-9]{1,2}.*")
# LAYOUT_PARSE_2 = re.compile(u"SP-No\\.外(.*?)-[0-9]{1,2}.*")
LAYOUT_PARSE_1 = re.compile(u"(.*?)[0-9]{1,2}.*")
LAYOUT_PARSE_2 = re.compile(u"(.*?)[0-9]{1,2}.*")
GROUP = [u"SP-No.", u"SP-No.外"]
DELIMITER = "\t"
class PUV(object):
LAYOUT_PARSE_1 = re.compile(u"SP-No\\.(.*?)-[0-9]{1,2}.*")
LAYOUT_PARSE_2 = re.compile(u"SP-No\\.(.*?)-[0-9]{1,2}.*")
GROUP = u"あいうえおか"
DELIMITER = "\t"
class SUNRISE_C(object):
LAYOUT_PARSE_1 = re.compile(u"SP-No\\.(.*?)-[0-9]{1,2}.*")
LAYOUT_PARSE_2 = re.compile(u"SP-No\\.(.*?)-[0-9]{1,2}.*")
GROUP = u"ABCDEFGHIJKL"
DELIMITER = "\t"
class KOBE_KANCOLLE(object):
LAYOUT_PARSE_1 = re.compile(u"(.*?)[0-9]{1,2}.*")
LAYOUT_PARSE_2 = re.compile(u"(.*?)[0-9]{1,2}.*")
GROUP = [
u"海岸通", u"居留地", u"生田川", u"錨山", u"市章山", u"北野", u"布引", u"摩耶山", u"甲山",
u"六甲山"
]
DELIMITER = ","
class SOUGETSUSAI(object):
LAYOUT_PARSE_1 = re.compile(u"SP-No\\.(.*?)-[0-9]{1,2}.*")
LAYOUT_PARSE_2 = re.compile(u"SP-No\\.(.*?)-[0-9]{1,2}.*")
GROUP = u"A"
DELIMITER = "\t"
class MOUNANIMOKOWAKUNAI(object):
LAYOUT_PARSE_1 = re.compile(u"SP-No\\.(.*?)-[0-9]{1,2}.*")
LAYOUT_PARSE_2 = re.compile(u"SP-No\\.(.*?)-[0-9]{1,2}.*")
GROUP = u"APU"
DELIMITER = "\t"
class SHT(object):
LAYOUT_PARSE_1 = re.compile(u"(.*?)[0-9]{1,2},[0-9]{1,2}")
LAYOUT_PARSE_2 = re.compile(u"(.*?)[0-9]{1,2}")
GROUP = [
u"円環", u"マミ", u"探偵", u"うん", u"スト", u"キュア", u"咲", u"ルカ", u"ミク", u"電磁",
u"プリ", u"みれぃ", u"戦車", u"愛里寿", u"なの", u"うさぎ", u"勇者", u"絶唱", u"アイ",
u"晴風", u"リル", u"鬼", u"祝福", u"マジ", u"卓球", u"小林", u"バンド", u"けもの", u"魔法",
u"エロ", u"AI", u"みつご", u"ヒミツ", u"SHT"
]
DELIMITER = ","
CONF = COMIC_V_MARKET
def custom_sort(d):
v = re.search("-([0-9]{1,2})", d["layout"])
return int(v.group(1))
#def (dictLayout):
def circle_list(dictLayout):
nGrp = 0
nIndex = 1
listGrp = []
for s in CONF.GROUP:
listGrp.append('{"id": "%d", "name": "%s"}' % (nGrp, s))
nGrp += 1
print(",\n".join(listGrp))
nGrp = 0
for k in CONF.GROUP:
#print dictLayout.keys()
try:
listItem = dictLayout[k]
except KeyError:
print("KeyError", k)
for key in dictLayout.keys():
print(key, k)
sys.exit(1)
listBuffer = []
nIndexLocal = 1
# for o in listItem:
# for o in sorted(listItem, key=lambda obj: obj["layout"]):
for o in sorted(listItem, key=custom_sort):
listBuffer.append(
" {\"layout\": \"%s\", \"sortkey\": \"%03d-%04d-%08d\",\n"
" \"circle_list\": [\n"
" %s\n"
" ]\n"
" }" %
(o["layout"], nGrp + 1, nIndexLocal, nIndex,
json.dumps(o["circle_list"], ensure_ascii=False)))
nIndexLocal += 1
nIndex += 1
exportBuffer = ""
exportBuffer += " \"%d\" : [\n" % (nGrp, )
exportBuffer += ",\n".join(listBuffer)
exportBuffer += "\n "
exportBuffer += "],"
nGrp += 1
# 結果出力
print(exportBuffer)
def parse_layout(s):
oCResult = CONF.LAYOUT_PARSE_1.search(s)
if oCResult is None:
oCResult = CONF.LAYOUT_PARSE_2.search(s)
if oCResult is None:
print("parser_layout error", s)
sys.exit(-1)
return (oCResult.group(1))
def search_twitter_screen_name(db_circlecheck_uinfo, screen_name):
list_user = db_circlecheck_uinfo.view("user/screen_name",
wrapper=None,
reduce=False,
group=False,
descending=False,
include_docs=False,
start_key=[screen_name, ""],
end_key=[screen_name, "Z"])
if len(list_user) == 1:
for r in list_user:
return {
"twitter_screen_name": r.key[0].encode("utf-8"),
"twitter_user_id": r.key[1].encode("utf-8")
}
return None
def main():
# CouchDB
conn_couch = couchdb.Server("http://" + configure.COUCH_USER + ":" +
configure.COUCH_PASS + "@" +
configure.COUCH_HOST)
# CouchDB get tweets
db_circlecheck_uinfo = conn_couch["circlecheck_uinfo"]
#
dictLayout = {}
# dict_result = {
# "twitter_screen_name": screen_name,
# "twitter_user_id": search_result.group(1)
# }
with open(sys.argv[1], "r") as hFile:
oCReader = csv.reader(hFile, delimiter=CONF.DELIMITER)
for r in oCReader:
dictRecord = {"circle_list": {}}
strLayoutBlock = ""
r += ["", "", "", "", "", ""]
for idx, kwd in ((0, "layout"), (1, "circle"), (2, "writer"),
(3, "url"), (4, "twitter"), (5, "pixiv")):
s = r[idx].strip()
if s != u" " and len(s) > 0:
o_re = re.search("https?:\\/\\/twitter.com\\/(.*)", s)
if o_re is not None:
kwd = "twitter"
dict_result = None
#dict_result = search_twitter_screen_name(
# db_circlecheck_uinfo, o_re.group(1))
if dict_result is not None:
for k, v in dict_result.items():
dictRecord["circle_list"][k] = v
if re.search(
"https?:\\/\\/www.pixiv.net\\/member.php\\?.+", s):
kwd = "pixiv"
if re.search("https?:\\/\\/pixiv.me\\/(?!\\?).+", s):
kwd = "pixiv"
dictRecord["circle_list"][kwd] = s
try:
dictRecord["layout"] = dictRecord["circle_list"]["layout"]
except KeyError:
print(r, dictRecord)
sys.exit(-1)
del dictRecord["circle_list"]["layout"]
strLayoutBlock = parse_layout(dictRecord["layout"])
if strLayoutBlock not in dictLayout:
dictLayout[strLayoutBlock] = []
dictLayout[strLayoutBlock].append(dictRecord)
#for k in dictLayout.keys():
# print k
circle_list(dictLayout)
if __name__ == "__main__":
main()
# ---------------------------------------------------------------------- [EOF]
|
MizunagiKB/circlecheck
|
util/make_jsdata.py
|
Python
|
mit
| 13,913
|
class botDatabase:
def __init__(self):
self.bots = {}
self.outputs = {}
def addBot(self,id):
if not id in self.bots:
self.bots[id] = {"low": -1, "high": -1}
def addOutput(self, id):
self.outputs[id] = []
def resetBot(self,id):
self.bots[id]["low"] = -1
self.bots[id]["high"] = -1
def giveToken(self,botid, token):
bot = self.bots[botid]
if bot["low"] == -1:
bot["low"] = token
elif bot["low"] > token:
bot["high"] = bot["low"]
bot["low"] = token
else:
bot["high"] = token
def transferSingle(self,botid,bottransfer, field):
bot = self.bots[botid]
self.giveToken(bottransfer, bot[field])
def transfer(self,botid, botlowid, bothighid):
bot = self.bots[botid]
if(bot["low"] == 17 and bot["high"] == 61):
print botid
self.giveToken(botlowid,bot["low"])
self.giveToken(bothighid,bot["high"])
self.resetBot(botid)
def toOuput(self, botid, outputid, field):
self.outputs[outputid].append(self.bots[botid][field])
self.bots[botid][field] = -1
def render(self):
print "bot low high"
for bot in self.bots:
print str(bot) + ": " + str(self.bots[bot]["low"]) + " " + str(self.bots[bot]["high"])
def renderOutputs(self):
print "Output Value"
index = 0
for o in self.outputs:
product = 1
for value in self.outputs[o]:
product *= value
print str(o) + " " + str(product)
def checkbot(self, botid):
bot = self.bots[botid]
if(bot["low"] == 17 and bot["high"] == 61):
print botid
if bot["low"] == -1 or bot["high"] == -1:
return False
return True
bots = botDatabase()
instructions = []
file = open("input.txt", "r")
for line in file:
cmd , args = line.split(" ", 1)
if cmd == "value":
value, x,y,z, botid = args.split(" ")
bots.addBot(int(botid))
bots.giveToken(int(botid), int(value))
if cmd == "bot":
bot, a,b,c,lowtype,botlow,e,f,g,hightype,bothigh = args.split(" ")
bots.addBot(int(bot))
if lowtype == "bot":
bots.addBot(int(botlow))
else:
bots.addOutput(int(botlow))
if hightype == "bot":
bots.addBot(int(bothigh))
else:
bots.addOutput(int(bothigh))
instructions.append((int(bot),lowtype, int(botlow), hightype, int(bothigh)))
file.close()
bots.render()
print
print "start transfering"
while(instructions):
for cmd in instructions:
bot,lowtype,botlow,hightype,bothigh = cmd
canTransfer = bots.checkbot(int(bot))
if canTransfer:
if lowtype == "output":
bots.toOuput(int(bot), int(botlow), "low")
else:
bots.transferSingle(int(bot), int(botlow), "low")
if hightype == "output":
bots.toOuput(int(bot), int(bothigh), "high")
else:
bots.transferSingle(int(bot), int(bothigh), "high")
instructions.remove(cmd)
break
print bots.renderOutputs()
|
tbjoern/adventofcode
|
Ten/script.py
|
Python
|
mit
| 2,827
|
import unittest
from mygrations.formats.mysql.file_reader.create_parser import CreateParser
class TableDifferenceColumnsTest(unittest.TestCase):
def test_drop_columns(self):
a = CreateParser()
a.parse(
"""CREATE TABLE `tasks` (
`id` int(10) unsigned NOT NULL AUTO_INCREMENT,
`account_id` int(10) DEFAULT NULL,
`task` varchar(255) DEFAULT NULL
);
"""
)
b = CreateParser()
b.parse(
"""CREATE TABLE `tasks` (
`id` int(10) unsigned NOT NULL AUTO_INCREMENT,
`account_id` int(10) DEFAULT NULL,
`membership_id` int(10) unsigned not null,
`task` varchar(255) DEFAULT NULL,
`subject` text
);
"""
)
# if we subtract b from a we should get some drop column queries in one alter statement
operations = b.to(a)
self.assertEquals(1, len(operations))
self.assertEquals('ALTER TABLE `tasks` DROP membership_id, DROP subject;', str(operations[0]))
def test_add_columns(self):
a = CreateParser()
a.parse(
"""CREATE TABLE `tasks` (
`id` int(10) unsigned NOT NULL AUTO_INCREMENT,
`account_id` int(10) DEFAULT NULL,
`task` varchar(255) DEFAULT NULL
);
"""
)
b = CreateParser()
b.parse(
"""CREATE TABLE `tasks` (
`id` int(10) unsigned NOT NULL AUTO_INCREMENT,
`account_id` int(10) DEFAULT NULL,
`membership_id` int(10) unsigned not null,
`task` varchar(255) DEFAULT NULL,
`subject` text
);
"""
)
# if we subtract b from a we should get some drop column queries in one alter statement
operations = a.to(b)
self.assertEquals(1, len(operations))
self.assertEquals(
'ALTER TABLE `tasks` ADD `membership_id` INT(10) UNSIGNED NOT NULL AFTER `account_id`, ADD `subject` TEXT AFTER `task`;',
str(operations[0])
)
def test_add_remove_change_columns(self):
a = CreateParser()
a.parse(
"""CREATE TABLE `tasks` (
`id` int(10) unsigned NOT NULL AUTO_INCREMENT,
`account_id` int(10) DEFAULT NULL,
`task` varchar(255) DEFAULT NULL
);
"""
)
b = CreateParser()
b.parse(
"""CREATE TABLE `tasks` (
`id` int(10) unsigned NOT NULL AUTO_INCREMENT,
`account_id` int(10) NOT NULL DEFAULT 0,
`membership_id` int(10) unsigned not null,
`subject` text
);
"""
)
# but we can ask for it in one
operations = a.to(b)
self.assertEquals(1, len(operations))
self.assertEquals(
'ALTER TABLE `tasks` ADD `membership_id` INT(10) UNSIGNED NOT NULL AFTER `account_id`, ADD `subject` TEXT AFTER `membership_id`, CHANGE `account_id` `account_id` INT(10) NOT NULL DEFAULT 0, DROP task;',
str(operations[0])
)
def test_split(self):
a = CreateParser()
a.parse(
"""CREATE TABLE `tasks` (
`id` int(10) unsigned NOT NULL AUTO_INCREMENT,
`account_id` int(10) DEFAULT NULL,
`task` varchar(255) DEFAULT NULL
);
"""
)
b = CreateParser()
b.parse(
"""CREATE TABLE `tasks` (
`id` int(10) unsigned NOT NULL AUTO_INCREMENT,
`account_id` int(10) NOT NULL DEFAULT 0,
`membership_id` int(10) unsigned not null,
`subject` text,
CONSTRAINT `tasks_account_id_ref_accounts_id` FOREIGN KEY (`account_id`) REFERENCES `accounts` (`id`) ON DELETE CASCADE ON UPDATE CASCADE
);
"""
)
operations = a.to(b, True)
self.assertEquals(2, len(operations))
self.assertEquals(
'ALTER TABLE `tasks` ADD CONSTRAINT `tasks_account_id_ref_accounts_id` FOREIGN KEY (`account_id`) REFERENCES `accounts` (`id`) ON DELETE CASCADE ON UPDATE CASCADE;',
str(operations['fks'])
)
self.assertEquals(
'ALTER TABLE `tasks` ADD `membership_id` INT(10) UNSIGNED NOT NULL AFTER `account_id`, ADD `subject` TEXT AFTER `membership_id`, CHANGE `account_id` `account_id` INT(10) NOT NULL DEFAULT 0, DROP task;',
str(operations['kitchen_sink'])
)
|
cmancone/mygrations
|
mygrations/tests/integration/table_difference_columns_test.py
|
Python
|
mit
| 4,532
|
import json
import os
import tempfile
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from sqlalchemy.ext.declarative import declarative_base
from configdb import exceptions
from configdb.db.interface import base
from configdb.db.interface import sa_generator
class SqlAlchemyQueryCriteria(object):
pass
class SqlAlchemyQueryEquals(SqlAlchemyQueryCriteria):
def __init__(self, spec):
self.target = spec['value']
def get_filter(self, classattr):
return classattr == self.target
class SqlAlchemyQuerySubstringMatch(SqlAlchemyQueryCriteria):
def __init__(self, spec):
self.like_str = '%%%s%%' % spec['value']
def get_filter(self, classattr):
return classattr.like(self.like_str)
class SqlAlchemyDbInterface(base.DbInterface):
"""Interface to an SQL database using SQLAlchemy."""
QUERY_TYPE_MAP = dict(base.DbInterface.QUERY_TYPE_MAP)
QUERY_TYPE_MAP.update({
'eq': SqlAlchemyQueryEquals,
'substring': SqlAlchemyQuerySubstringMatch,
})
AUDIT_SUPPORT = True
def __init__(self, uri, schema, schema_dir=None, opts={}):
self.Session = sessionmaker(autocommit=False, autoflush=False)
Base = declarative_base()
self._objs = {'Base': Base}
self._schema = schema
self._schema_dir = schema_dir # unused, meant for caching
self._load_schema()
self.engine = create_engine(uri, pool_recycle=1800, **opts)
self.Session.configure(bind=self.engine)
Base.metadata.create_all(self.engine)
def _load_schema(self):
with tempfile.NamedTemporaryFile() as schema_file:
schema_gen = sa_generator.SqlAlchemyGenerator(self._schema)
schema_file.write(schema_gen.generate())
schema_file.flush()
execfile(schema_file.name, self._objs)
def _get_class(self, entity_name):
return self._objs[entity_name.capitalize()]
def session(self):
return base.session_context_manager(self.Session())
def add_audit(self, entity_name, object_name, operation,
data, auth_ctx, session):
ins = self._objs['audit_table'].insert()
if data is not None:
data = self._schema.get_entity(entity_name).to_net(data)
session.execute(ins, {'entity': entity_name,
'object': object_name,
'op': operation,
'data': json.dumps(data) if data else None,
'user': auth_ctx.get_username()})
def get_audit(self, query, session):
audit_table = self._objs['audit_table']
sql_query = None
for key, value in query.iteritems():
qstmt = (getattr(audit_table.c, key) == value)
if sql_query is None:
sql_query = qstmt
else:
sql_query &= qstmt
return session.execute(
audit_table.select().where(sql_query).order_by('stamp desc'))
def get_by_name(self, entity_name, object_name, session):
return session.query(self._get_class(entity_name)).filter_by(
name=object_name).first()
def find(self, entity_name, query, session):
classobj = self._get_class(entity_name)
entity = self._schema.get_entity(entity_name)
sa_query = session.query(classobj)
# Assemble the SQL query. The query is split between
# SQL-compatible criteria, and postprocessed criteria (which
# will be applied by the standard _run_query method).
pp_query = {}
for field_name, q in query.iteritems():
if not isinstance(q, SqlAlchemyQueryCriteria):
pp_query[field_name] = q
else:
field = entity.fields[field_name]
if field.is_relation():
remote_cls = self._get_class(field.remote_name)
classattr = getattr(remote_cls, 'name')
sa_query = sa_query.filter(getattr(classobj, field_name).any(
q.get_filter(classattr)))
else:
classattr = getattr(classobj, field_name)
sa_query = sa_query.filter(q.get_filter(classattr))
# Apply the post-process query to the SQL results.
return self._run_query(entity, pp_query, sa_query)
def delete(self, entity_name, object_name, session):
session.delete(self.get_by_name(entity_name, object_name, session))
def create(self, entity_name, attrs, session):
obj = self._get_class(entity_name)()
entity = self._schema.get_entity(entity_name)
for k, v in attrs.iteritems():
field = entity.fields[k]
if field.is_relation():
rel_attr = getattr(obj, k)
for lv in v:
rel_obj = self.get_by_name(
field.remote_name, lv, session)
rel_attr.append(rel_obj)
else:
setattr(obj, k, v)
session.add(obj)
return obj
|
lavagetto/configdb
|
configdb/db/interface/sa_interface.py
|
Python
|
mit
| 5,149
|
from rdflib import Graph
__all__ = ['BenchableGraph']
class BenchableGraph(object):
"""
Provides a convenient way to use a graph for benchmarks.
"""
def __init__(self, store, graph_id, store_config, graph_create=False):
"""
:param str store: Type of store to use.
:param str graph_id: The graph identifier.
:param store_config: Configuration to open the store.
:type store_config: str or tuple
:param bool graph_create: True to create the graph upon connecting.
"""
self.graph = Graph(store=store, identifier=graph_id)
self._graph_id = graph_id
self._store_config = store_config
self._graph_create = graph_create
def connect(self):
"""Connect to the store.
.. note::
For some configurations, RDFlib will postpone the actual connection to
the store until needed (when doing a graph.query() or graph.add()).
This behaviour comes from RDFbib implementation of graph.open().
"""
return self.graph.open(configuration=self._store_config, create=self._graph_create)
def close(self, commit_pending_transaction=True):
"""Close a connection to a store.
:param bool commit_pending_transaction: True if to commit pending transaction before closing, False otherwise.
.. note::
The graph.close() method is not implemented for SPARQL Store in RDFLib
"""
self.graph.close(commit_pending_transaction=commit_pending_transaction)
|
vincent-octo/ktbs_bench_manager
|
ktbs_bench_manager/benchable_graph.py
|
Python
|
mit
| 1,555
|
from django.conf.urls import url
from feasta.views import *
from feasta.forms import PasswordChangeForm
from django.conf import settings
from django.contrib.auth.views import logout, password_change
from django.contrib.auth.decorators import login_required
urlpatterns = [
url(r'^$',
login_required(Home.as_view())),
url(r'^accounts/login/$',
Login.as_view()),
url(r'^mark/absent/$',
login_required(MarkAbsentView.as_view())),
url(r'^submit/mark/absent/$',
login_required(markAbsent)),
url(r'^menu/$',
login_required(MenuView.as_view())),
url(r'^accounts/edit-profile/$',
login_required(EditProfile.as_view())),
url(r'^accounts/profile/$',
login_required(MyProfile.as_view())),
url(r'^getlist/today/$',
login_required(ListForMeal.as_view())),
url(r'^accounts/signout/$',
login_required(logout),
kwargs={'next_page':settings.LOGOUT_URL },
name='logout'),
url(r'^accounts/change-password/$',
login_required(password_change),
name='passwordchange',
kwargs={'post_change_redirect':settings.LOGIN_URL,
'template_name':'passwordchange.html',
'password_change_form':PasswordChangeForm
}),
url(r'^register/summer/$',
login_required(SummerRegisterView.as_view())),
url(r'^add/guest/$',
login_required(AddGuestView.as_view())),
url(r'^pickdates/$',
login_required(PickDatesView.as_view())),
]
|
IIITS/Feasta
|
feasta/urls.py
|
Python
|
mit
| 1,438
|
"""
Simple algorithms for the 2-SUM and 3-SUM problems.
R-SUM problem:
Check if a given list of numbers contains R elements that some to a target
value.
These algorithms can actually be used with any kind of items that support
summation, subtraction and equality tests.
Author:
Christos Nitsas
(nitsas)
(chrisnitsas)
Language:
Python 3(.4)
Date:
November, 2014
"""
import itertools
__all__ = ['two_sum', 'three_sum']
def two_sum(nums, target=0, distinct=True):
"""
Return the indices of two numbers in `nums` that sum to `target` if such
numbers exist; None otherwise.
nums -- a list of numbers
target -- a number (default 0)
distinct -- if True only return distinct indices, i.e. there must be two
entries (not necessarily with distinct values) that sum to
target - we don't accept a single entry being repeated;
allow repeats otherwise
Time complexity: O(n)
Space complexity: O(n)
"""
# insert all nj's in a dict, which will remember their index in `nums`
num_to_index = dict()
for j, nj in enumerate(nums):
num_to_index[nj] = j
# iterate through nums
for i, ni in enumerate(nums):
# if ni's complement (w.r.t. target) exists
if (target - ni) in num_to_index:
# do we want distinct entries? or do we allow repeats?
if distinct and i == num_to_index[target - ni]:
continue
# return the indices of ni and its complement
return i, num_to_index[target - ni]
# else
return None
def three_sum(nums, target=0, distinct=True):
"""
Return the indices of three numbers in `nums` that sum to `target` if such
numbers exist; None otherwise.
nums -- a list of numbers
target -- a number (default 0)
distinct -- if True only return distinct indices, i.e. there must be three
entries (not necessarily with distinct values) that sum to
target - we don't accept a single entry being repeated;
allow repeats otherwise
Time complexity: O(n**2)
Space complexity: O(n**2)
"""
# insert all nk's in a dict, which will remember their index in `nums`
num_to_index = dict()
for k, nk in enumerate(nums):
num_to_index[nk] = k
# iterate through pairs in `nums`
for i, ni in enumerate(nums):
# j takes values from i onwards
for j, nj in enumerate(itertools.islice(nums, start=i, stop=None),
start=i):
# if (ni + nj)'s complement (w.r.t. target) exists
if (target - ni - nj) in num_to_index:
k = num_to_index[target - ni - nj]
# do we want distinct entries? or do we allow repeats?
if distinct and (i == j or i == k or j == k):
continue
return i, j, k
# else
return None
|
nitsas/py3algs
|
py3algs/algorithms/two_three_sum.py
|
Python
|
mit
| 2,965
|
#!/usr/bin/env python
# coding=utf-8
"""
Run all unit tests.
"""
from __future__ import absolute_import
from __future__ import print_function
import glob
import os
import sys
import unittest
__author__ = "Alberto Pettarin"
__copyright__ = "Copyright 2016, Alberto Pettarin (www.albertopettarin.it)"
__license__ = "MIT"
__version__ = "0.0.1"
__email__ = "alberto@albertopettarin.it"
__status__ = "Production"
TEST_DIRECTORY = "ipapy/tests"
TEST_PATTERN = "test_*.py"
TEST_PREFIX = "test_"
class NOPStream(object):
""" NOP stream """
def __init__(self, verbose=False):
self.verbose = verbose
def flush(self):
""" NOP """
pass
def write(self, msg):
""" NOP """
if self.verbose:
print(msg)
def main():
""" Perform tests """
if ("--help" in sys.argv) or ("-h" in sys.argv):
print("")
print("Usage: python %s [--sort] [--verbose]" % sys.argv[0])
print("")
sys.exit(0)
sort_tests = ("--sort" in sys.argv) or ("-s" in sys.argv)
verbose = ("--verbose" in sys.argv) or ("-v" in sys.argv)
pattern = TEST_PATTERN
prefix = TEST_PREFIX
all_files = [os.path.basename(f) for f in glob.glob(os.path.join(TEST_DIRECTORY, pattern))]
cli_files = [arg for arg in sys.argv[1:] if not arg.startswith("-")]
selected_files = []
for cli_file in cli_files:
if not cli_file.startswith(prefix):
cli_file = prefix + cli_file
if not cli_file.endswith(".py"):
cli_file += ".py"
if cli_file in all_files:
selected_files.append(cli_file)
if len(selected_files) == 0:
selected_files = all_files
if sort_tests:
selected_files = sorted(selected_files)
verbosity = 0
if verbose:
verbosity = 2
results = {}
nop_stream = NOPStream(verbose=verbose)
for test_file in selected_files:
print("Running", test_file, "...")
testsuite = unittest.TestLoader().discover(start_dir=TEST_DIRECTORY, pattern=test_file)
result = unittest.TextTestRunner(stream=nop_stream, verbosity=verbosity).run(testsuite)
results[test_file] = {
"tests" : result.testsRun,
"errors" : len(result.errors),
"failures" : len(result.failures)
}
total_tests = sum([results[k]["tests"] for k in results])
total_errors = sum([results[k]["errors"] for k in results])
total_failures = sum([results[k]["failures"] for k in results])
print("")
print("Tests: ", total_tests)
print("Errors: ", total_errors)
print("Failures: ", total_failures)
if total_errors > 0:
print("")
print("Errors in the following tests:")
print("\n".join([key for key in results.keys() if results[key]["errors"] > 0]))
print("")
if total_failures > 0:
print("")
print("Failures in the following tests:")
print("\n".join([key for key in results.keys() if results[key]["failures"] > 0]))
print("")
print("")
if total_errors + total_failures == 0:
print("[INFO] Tests completed: all passed!")
print("")
sys.exit(0)
else:
print("[INFO] Tests completed: errors or failures found!")
print("")
sys.exit(1)
if __name__ == '__main__':
main()
|
pettarin/ipapy
|
run_all_unit_tests.py
|
Python
|
mit
| 3,339
|
"""
mote-serial-log: Mote serial logger with output coloring.
Log printf and HDLC messages from serial port to terminal and logfile.
"""
from setuptools import setup, find_packages
from os.path import join as pjoin
import mote_serial_logger
doclines = __doc__.split("\n")
setup(name='mote_serial_logger',
version=mote_serial_logger.__version__,
description='Mote serial logger with output coloring.',
long_description='\n'.join(doclines[2:]),
url='http://github.com/thinnect/serial-logger',
author='Raido Pahtma',
author_email='raido@thinnect.com',
license='MIT',
platforms=['any'],
packages=find_packages(),
install_requires=["pyserial"],
test_suite='nose.collector',
tests_require=['nose'],
scripts=[pjoin('bin', 'serial-logger'), pjoin('bin', 'tail-serial-log')],
zip_safe=False)
|
thinnect/serial-logger
|
setup.py
|
Python
|
mit
| 871
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('tracking', '0003_auto_20141023_0834'),
]
operations = [
migrations.AlterField(
model_name='bannedip',
name='ip_address',
field=models.GenericIPAddressField(help_text='The IP address that should be banned', verbose_name='IP Address'),
),
migrations.AlterField(
model_name='visitor',
name='ip_address',
field=models.GenericIPAddressField(),
),
]
|
letam/django-tracking
|
tracking/migrations/0004_alter_ip_address_fields.py
|
Python
|
mit
| 640
|
#!/usr/bin/env python
from optparse import OptionParser
import multiprocessing
import gunicorn.app.base
import os
import sys
import re
import shutil
import tempfile
import pyferret
from paste.request import parse_formvars
import subprocess
from jinja2 import Template
import itertools
from PIL import Image
# ==============================================================
def number_of_workers():
return (multiprocessing.cpu_count() * 2) + 1
# ==============================================================
def handler_app(environ, start_response):
fields = parse_formvars(environ)
if environ['REQUEST_METHOD'] == 'GET':
try:
if fields['SERVICE'] != 'WMS':
raise
#FILE = fields['FILE']
COMMAND = fields['COMMAND']
VARIABLE = fields['VARIABLE'].replace('%2B', '+')
#pyferret.run('use ' + FILE)
#pyferret.run('show data')
# load the environment (dataset to open + variables definition)
pyferret.run('go ' + envScript)
try:
PATTERN = fields['PATTERN']
except:
PATTERN = None
tmpname = tempfile.NamedTemporaryFile(suffix='.png').name
tmpname = os.path.basename(tmpname)
# ---------------------------------------------------------
if fields['REQUEST'] == 'GetColorBar':
pyferret.run('set window/aspect=1/outline=0')
pyferret.run('go margins 2 4 3 3')
pyferret.run(COMMAND + '/set_up ' + VARIABLE)
pyferret.run('ppl shakey 1, 0, 0.15, , 3, 9, 1, `($vp_width)-1`, 1, 1.25 ; ppl shade')
pyferret.run('frame/format=PNG/transparent/xpixels=400/file="' + tmpdir + '/key' + tmpname + '"')
im = Image.open(tmpdir + '/key' + tmpname)
box = (0, 325, 400, 375)
area = im.crop(box)
area.save(tmpdir + '/' + tmpname, "PNG")
# ---------------------------------------------------------
elif fields['REQUEST'] == 'GetMap':
WIDTH = int(fields['WIDTH'])
HEIGHT = int(fields['HEIGHT'])
# BBOX=xmin,ymin,xmax,ymax
BBOX = fields['BBOX'].split(',')
HLIM = '/hlim=' + BBOX[0] + ':' + BBOX[2]
VLIM = '/vlim=' + BBOX[1] + ':' + BBOX[3]
# outline=5 is a strange setting but works otherwise get outline around polygons
pyferret.run('set window/aspect=1/outline=5')
pyferret.run('go margins 0 0 0 0')
pyferret.run(COMMAND + '/noaxis/nolab/nokey' +
HLIM + VLIM + ' ' + VARIABLE)
pyferret.run('frame/format=PNG/transparent/xpixels=' +
str(WIDTH) + '/file="' + tmpdir + '/' + tmpname + '"')
if os.path.isfile(tmpdir + '/' + tmpname):
if PATTERN:
img = Image.open(tmpdir + '/' + tmpname)
pattern = Image.open(PATTERN)
img = Image.composite(img, pattern, pattern)
img.save(tmpdir + '/' + tmpname)
# ---------------------------------------------------------
else:
raise
if os.path.isfile(tmpdir + '/' + tmpname):
ftmp = open(tmpdir + '/' + tmpname, 'rb')
img = ftmp.read()
ftmp.close()
os.remove(tmpdir + '/' + tmpname)
start_response('200 OK', [('content-type', 'image/png')])
return [img]
except:
return iter('Exception caught')
# ==============================================================
class myArbiter(gunicorn.arbiter.Arbiter):
def halt(self):
# Close pyferret
pyferret.stop()
print('Removing temporary directory: ', tmpdir)
shutil.rmtree(tmpdir)
super(myArbiter, self).halt()
# ==============================================================
class StandaloneApplication(gunicorn.app.base.BaseApplication):
def __init__(self, app, options=None):
# Start pyferret
pyferret.start(journal=False, unmapped=True, quiet=True, verify=False)
master_pid = os.getpid()
print('---------> gunicorn master pid: ', master_pid)
if not serverOnly: # nw will be launched
listSynchroMapsToSet = list(
itertools.permutations(range(1, nbMaps+1), 2))
instance_WMS_Client = Template(template_WMS_client())
instance_NW_Package = Template(template_nw_package())
with open(tmpdir + '/index.html', 'w') as f:
f.write(instance_WMS_Client.render(cmdArray=cmdArray, gunicornPID=master_pid,
listSynchroMapsToSet=listSynchroMapsToSet,
mapWidth=mapWidth, mapHeight=mapHeight,
mapCenter=mapCenter, mapZoom=mapZoom, port=port))
with open(tmpdir + '/package.json', 'w') as f:
f.write(instance_NW_Package.render(nbMaps=nbMaps,
mapWidth=mapWidth, mapHeight=mapHeight))
# Launch NW.js
proc = subprocess.Popen(['nw', tmpdir])
print('Client nw process: ', proc.pid)
self.options = options or {}
self.application = app
super(StandaloneApplication, self).__init__()
def load_config(self):
config = dict([(key, value) for key, value in self.options.items()
if key in self.cfg.settings and value is not None])
for key, value in config.items():
self.cfg.set(key.lower(), value)
def load(self):
return self.application
# if control before exiting is needed
def run(self):
try:
myArbiter(self).run()
except RuntimeError as e:
print('\nError: %s\n' % e, file=sys.stderr)
sys.stderr.flush()
sys.exit(1)
# ==============================================================
def template_WMS_client():
return '''
<!doctype html>
<html>
<head>
<meta charset='utf-8'>
<title>Slippy maps with WMS from pyferret</title>
<script src='http://cdnjs.cloudflare.com/ajax/libs/jquery/3.6.0/jquery.min.js'></script>
<link rel='stylesheet' href='http://cdnjs.cloudflare.com/ajax/libs/jqueryui/1.12.1/themes/base/jquery-ui.min.css' />
<script src='http://cdnjs.cloudflare.com/ajax/libs/jqueryui/1.12.1/jquery-ui.min.js'></script>
<link rel='stylesheet' href='http://cdnjs.cloudflare.com/ajax/libs/leaflet/1.7.1/leaflet.css' />
<script src='http://cdnjs.cloudflare.com/ajax/libs/leaflet/1.7.1/leaflet.js'></script>
<script src='http://cdn.rawgit.com/jieter/Leaflet.Sync/master/L.Map.Sync.js'></script>
<style type='text/css'>
html, body { font-family: 'arial' }
.mapContainer { display: inline-block ; margin-left: 10px; margin-top: 10px;}
.title { font-size: 12px; white-space: nowrap; overflow: hidden; text-overflow: ellipsis; width: {{ mapWidth }}px; }
.map { width: {{ mapWidth }}px; height: {{ mapHeight }}px; }
.key { text-align: center; margin: auto; }
.key img { width: {{ mapWidth }}px; height: auto; max-width: 400px; }
.leaflet-bar a, .leaflet-bar a:hover {
height: 16px;
line-height: 16px;
width: 16px;
}
.leaflet-control-zoom-in, .leaflet-control-zoom-out {
font-size: 14px;
text-indent: 0px;
}
#dialog {
display: none;
font-size: 12px;
}
#commandLine {
width: 100%;
font-size: 12px;
}
.ui-dialog { z-index: 1000 !important; }
.ui-dialog-title { font-size: 12px !important; }
.ui-icon-gripsmall-diagonal-se { z-index: 1000 !important; }
</style>
</head>
<body>
<div id="dialog">
<input id="commandLine" type="text" placeholder="New command">
</div>
{% for aDict in cmdArray -%}
<div class='mapContainer'>
<div id='title{{ loop.index }}' class='title'></div>
<div id='map{{ loop.index }}' class='map'></div>
<div id='key{{ loop.index }}' class='key'><img /></div>
</div>
{% endfor -%}
<script type='text/javascript'>
//===============================================
var crs = L.CRS.EPSG4326;
var map = [];
var wmspyferret = [];
var frontiers= [];
{% for aDict in cmdArray -%}
//===============================================
wmspyferret[{{ loop.index }}] = L.tileLayer.wms('http://localhost:{{ port }}', {
command: '{{ aDict.command }}',
variable: '{{ aDict.variable }}',
crs: crs,
format: 'image/png',
transparent: true,
uppercase: true
});
frontiers[{{ loop.index }}] = L.tileLayer.wms('https://www.globalcarbonatlas.org:8443/geoserver/GCA/wms', {
layers: 'GCA:GCA_frontiersCountryAndRegions',
format: 'image/png',
crs: crs,
transparent: true
});
map[{{ loop.index }}] = L.map('map{{ loop.index }}', {
layers: [wmspyferret[{{ loop.index }}], frontiers[{{ loop.index }}]],
crs: crs,
center: {{ mapCenter }},
zoom: {{ mapZoom }},
attributionControl: false
});
{% endfor %}
//===============================================
// Set up synchro between maps
{% for synchro in listSynchroMapsToSet -%}
map[{{ synchro[0] }}].sync(map[{{ synchro[1] }}]);
{% endfor %}
//===============================================
function getTitle(aCommand, aVariable) {
// Inspect command to get /title qualifier if present
m = aCommand.match(/title=([\w&]+)/); // equivalent to search in python
if (m != null)
title = m[1]
else
title = aVariable
return title
}
//===============================================
{% for aDict in cmdArray -%}
title{{ loop.index }} = getTitle(wmspyferret[{{ loop.index }}].wmsParams.command, wmspyferret[{{ loop.index }}].wmsParams.variable.replace('%2B','+'));
$('#title{{ loop.index }}').html(title{{ loop.index }});
$('#title{{ loop.index }}').attr('title', wmspyferret[{{ loop.index }}].wmsParams.command + ' ' + wmspyferret[{{ loop.index }}].wmsParams.variable.replace('%2B','+'));
$('#key{{ loop.index }}').children('img').attr('src', 'http://localhost:{{ port }}/?SERVICE=WMS&REQUEST=GetColorBar' +
'&COMMAND=' + wmspyferret[{{ loop.index }}].wmsParams.command +
'&VARIABLE=' + wmspyferret[{{ loop.index }}].wmsParams.variable.replace('+','%2B'));
$('#map{{ loop.index }}').resizable();
{% endfor %}
//===============================================
$(".title").on('click', function() {
id = $(this).attr('id');
mapId = id.replace('title','');
$('#commandLine').val($('#'+id).attr('title'));
$('#commandLine').attr('mapId', mapId);
$('#dialog').dialog({ title: 'Command of map #'+mapId, modal: false, width: 600, height: 100,
position: {my: "left", at: "left+10", of: window} });
});
//===============================================
$('#commandLine').on('keypress', function(e) {
if(e.which === 13) {
commandLine = $(this).val().split(' ');
command = commandLine[0];
commandLine.shift();
variable = commandLine.join(' ');
mapId = $(this).attr('mapId');
wmspyferret[mapId].setParams({ command: command, variable: variable.replace('+','%2B') });
title = getTitle(command, variable);
$('#title'+mapId).html(title);
$('#title'+mapId).attr('title', command + ' ' + variable);
$('#key'+mapId).children('img').attr('src', 'http://localhost:{{ port }}/?SERVICE=WMS&REQUEST=GetColorBar' +
'&COMMAND=' + command +
'&VARIABLE=' + variable.replace('+','%2B'));
}
});
//===============================================
$('.map').on('resize', function() {
width = $(this).width();
height = $(this).height();
{% for aDict in cmdArray -%}
$('#map{{ loop.index }}').width(width);
$('#map{{ loop.index }}').height(height);
{% endfor %}
});
//===============================================
var exec = require('child_process').exec,child;
process.stdout.write('Starting NW application\\n');
process.on('exit', function (){
process.stdout.write('Exiting from NW application, now killing the gunicorn server\\n');
process.kill({{ gunicornPID }}); // kill gunicorn server
});
</script>
</body>
</html>
'''
# ==============================================================
def template_nw_package():
return '''
{
"name": "Slippy maps with WMS from pyferret",
"main": "index.html",
"window": {
"toolbar": false,
"width": {{ nbMaps*mapWidth + nbMaps*10 + 60 }},
"height": {{ mapHeight + 100 }}
}
}
'''
# ==============================================================
# ------------------------------------------------------
usage = "%prog [--width=400] [--height=400] [--size=value] [--center=[0,0]] [--zoom=1]" + \
"\n [--env=pyferretWMS.jnl] [--server] [--port=8000]" + \
"\n 'cmd/qualifiers variable; cmd/qualifiers variable'" + \
"\n\n'cmd/qualifiers variable' is a classic ferret call (no space allowed except to" + \
"\nseparate the variable from the command and its qualifiers). The semi-colon character ';'" +\
"\nis the separator between commands and will determine the number of maps to be drawn." + \
"\nThe qualifiers can include the title qualifier considering that the space character" + \
"\nis not allowed since used to distinguish the cmd/qualifiers and the variable(s)." + \
"\nFor this, you can use the HTML code ' ' for the non-breaking space (without the ending semi-colon)." + \
"\nFor example: 'shade/lev=20/title=Simulation A varA; shade/lev=20/title=Simulation B varB'"
version = "%prog 0.9.7"
# ------------------------------------------------------
parser = OptionParser(usage=usage, version=version)
parser.add_option("--width", type="int", dest="width", default=400,
help="200 < map width <= 600")
parser.add_option("--height", type="int", dest="height", default=400,
help="200 < map height <= 600")
parser.add_option("--size", type="int", dest="size",
help="200 < map height and width <= 600")
parser.add_option("--env", dest="envScript", default="pyferretWMS.jnl",
help="ferret script to set the environment (default=pyferretWMS.jnl). It contains datasets to open, variables definition.")
parser.add_option("--center", type="string", dest="center", default='[0,-40]',
help="Initial center of maps as [lat, lon] (default=[0,-40])")
parser.add_option("--zoom", type="int", dest="zoom", default=1,
help="Initial zoom of maps (default=1)")
parser.add_option("--server", dest="serverOnly", action="store_true", default=False,
help="Server only (default=False)")
parser.add_option("--port", type="int", dest="port", default=8000,
help="Server port number (default=8000)")
(options, args) = parser.parse_args()
if options.size:
mapHeight = options.size
mapWidth = options.size
else:
mapHeight = options.height
mapWidth = options.width
mapCenter = options.center
mapZoom = options.zoom
envScript = options.envScript
serverOnly = options.serverOnly
port = options.port
# ------------------------------------------------------
# Global variables
nbMaps = 0
cmdArray = []
tmpdir = tempfile.mkdtemp()
print('Temporary directory to remove: ', tmpdir)
# ------------------------------------------------------
if serverOnly:
if len(args) != 0:
parser.error("No argument needed in mode server")
parser.print_help()
else:
if len(args) != 1:
parser.error("Wrong number of arguments")
parser.print_help()
if (mapWidth < 200 or mapWidth > 600) or (mapHeight < 200 or mapHeight > 600):
parser.error(
"Map size options incorrect (200 <= size,width,height <= 600)")
parser.print_help()
sys.exit(1)
if not os.path.isfile(envScript):
parser.error("Environment script option missing")
parser.print_help()
sys.exit(1)
cmdsRequested = args[0]
cmds = cmdsRequested.split(';') # get individual commands
cmds = list(map(str.strip, cmds)) # remove surrounding spaces if present
nbMaps = len(cmds)
print(str(nbMaps) + ' maps to draw')
if nbMaps > 4:
print("\n=======> Error: Maximum number of maps: 4\n")
parser.print_help()
sys.exit(1)
# create array of dict {'command', 'variable'}
for i, cmd in enumerate(cmds, start=1):
# Get command
command = cmd.split(' ')[0]
# Get variable
variable = ' '.join(cmd.split(' ')[1:])
cmdArray.append({'command': command, 'variable': variable})
# ------------------------------------------------------
options = {
'bind': '%s:%s' % ('127.0.0.1', port),
'workers': number_of_workers(),
'worker_class': 'sync',
'threads': 1
}
StandaloneApplication(handler_app, options).run()
sys.exit(1)
|
PBrockmann/wms-pyferret
|
pyferretWMS.py
|
Python
|
mit
| 17,176
|
from setuptools import setup
setup(
name='stack_dumpper',
version='0.0.2',
author='Jayson Reis',
author_email='santosdosreis@gmail.com',
description='Dump stack trace from all threads (including main thread) when CRTL + \ is pressed or SIGQUIT is received.',
url='https://github.com/jaysonsantos/python-stack-dumpper',
packages=['stack_dumpper'],
classifiers=[
'Development Status :: 4 - Beta',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3.4',
]
)
|
jaysonsantos/python-stack-dumpper
|
setup.py
|
Python
|
mit
| 589
|
from .polo import optimal_leaf_ordering
|
adrianveres/polo
|
polo/__init__.py
|
Python
|
mit
| 39
|
"""django_channels URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from django.contrib import admin
urlpatterns = [
url(r'^admin/', admin.site.urls),
]
|
sourcelair-blueprints/django-channels-chat
|
django_channels/urls.py
|
Python
|
mit
| 771
|
from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.collections import RegularPolyCollection
from BF_functions import user_rc
'''
Radial velocity plotter!
Makes a plot with two panels: top is RV vs. time, bottom is RV vs. orbital phase
You need to have these columns in your input file: TIME, PHASE, RV1, RV1_ERR, RV2, RV2_ERR
Update September 2015:
Has two flag options
1. apply some shift to the RVs before plotting them (doShift)
2. read in another set of calculated RVs from cols 8,9,10,11 and plot RV1 = RV_col8-RV_col3
and RV2 = RV_col10-RV_col5, plus a line at RV = 0 (compareRVs)
Update June 2016:
Simplified some options; no longer manually sets point shape as a function of "source" string.
If you want that functionality, use an older version of this code... it was messy.
**NOTE that any RV value with an error bar = 0 is not plotted!**
'''
dateoffset = 2454833. # this value will be subtracted from bjds in pane vs. time
#sysname = '5285607'; filename = '5285607Outfile_take2.txt'
#timestart = 980; timeend = 1020
#phasemin = 0.5; phasemax = 1.5
#RVmin = -45; RVmax = 180
#sysname = '6449358'; filename = 'data/6449358/6449358Outfile.txt'
#timestart = 1700; timeend = 2000
#phasemin = 0.5; phasemax = 1.5
#RVmin = 0; RVmax = 140
colors = user_rc()
sysname = '3247294'; filename = 'data/3247294/3247294outfile.txt'
timestart = 970; timeend = 1040
phasemin = -0.1; phasemax = 1.0
RVmin = 0; RVmax = 150
#sysname = '5285607'; filename = 'data/5285607/5285607OutfileJC.txt'
#timestart = 975; timeend = 1040
#phasemin = 0.5; phasemax = 1.5
#RVmin = -50; RVmax = 200 # 5285607
#sysname = '6131659'; filename = 'data/6131659/6131659outfileALL.txt'
#timestart = 1520; timeend = 2000
#phasemin = -0.1; phasemax = 1.0
#RVmin = 0; RVmax = 200
#sysname = '6781535'; filename = 'data/6781535/6781535Outfile.txt'#
#timestart = 1700; timeend = 2000
#phasemin = -0.1; phasemax = 1.0
#RVmin = -100; RVmax = 200
#sysname = '4285087'; filename = 'data/4285087/4285087Outfile.txt'
#timestart = 1700; timeend = 2000
#phasemin = -0.1; phasemax = 1.0
#RVmin = 0; RVmax = 200
#sysname = '6778289'; filename = 'data/6778289/6778289OutfileJC.txt'
#timestart = 1720; timeend = 1990
#phasemin = 0.5; phasemax = 1.12
#RVmin = -50; RVmax = 150 # 6778289
#sysname = '6864859'; filename = 'data/6864859/6864859OutfileJC.txt'
#timestart = 1720; timeend = 1990
#phasemin = 0.53; phasemax = 1.53
#RVmin = 40; RVmax = 140 # 6864859
# Other useful definitions
red = '#e34a33' # red, star 1
yel = '#fdbb84' # yellow, star 2
# usecols=(0,1,3,4,5,6) # this is the default, with RVs in 3,4,5,6 not 8,9,10,11
rvdata = np.loadtxt(filename, comments='#', unpack=True)
bjd = rvdata[0]
phase = rvdata[1]
rv1 = rvdata[3]; rverr1 = rvdata[4]
rv2 = rvdata[5]; rverr2 = rvdata[6]
rv3 = rvdata[7]; rverr3 = rvdata[8]
# Skip any RV values that have 0 for error bars or are already None
for idx, err in enumerate(rverr1):
if err == 0:
rv1[idx] = None
rverr1[idx] = None
for idx, err in enumerate(rverr2):
if err == 0:
rv2[idx] = None
rverr2[idx] = None
rv1mask = np.isfinite(rv1)
rv2mask = np.isfinite(rv2)
rv3mask = np.isfinite(rv3)
# Double the arrays so we can plot any phase from 0 to phase 2... assuming phase is in range (0,1)
rv1_double = np.concatenate((rv1,rv1), axis=0)
rv2_double = np.concatenate((rv2,rv2), axis=0)
rv3_double = np.concatenate((rv3,rv3), axis=0)
phase_double = np.concatenate((np.array(phase),np.array(phase)+1.0), axis=0)
rverr1_double = np.concatenate((rverr1,rverr1), axis=0)
rverr2_double = np.concatenate((rverr2,rverr2), axis=0)
rverr3_double = np.concatenate((rverr3, rverr3), axis=0)
# Set up the figure
fig = plt.figure(1, figsize=(13,9))
# Unfolded RV vs time (BJD-2454833)
ax2 = plt.subplot(2,1,1)
plt.axis([timestart, timeend, RVmin, RVmax])
ax2.spines['top'].set_visible(False)
ax2.spines['right'].set_visible(False)
ax2.xaxis.set_ticks_position('bottom')
ax2.yaxis.set_ticks_position('left')
plt.tick_params(axis='both', which='major')
# dotted lines to guide the eye
plt.plot(bjd[rv1mask]-dateoffset, rv1[rv1mask], color=colors[15], mfc=None, mec=None, lw=1.5, ls=':')
plt.plot(bjd[rv2mask]-dateoffset, rv2[rv2mask], color=colors[15], mfc=None, mec=None, lw=1.5, ls=':')
plt.plot(bjd[rv3mask]-dateoffset, rv3[rv3mask], color=colors[15], mfc=None, mec=None, lw=1.5, ls=':')
for idx, date in enumerate(bjd):
plt.errorbar(date-dateoffset, rv1[idx], yerr=rverr1[idx], fmt='ko', color=colors[15], mfc=colors[6], mec=colors[14], ms=10, lw=1.5)
plt.errorbar(date-dateoffset, rv2[idx], yerr=rverr2[idx], fmt='ko', color=colors[15], mfc=colors[2], mec=colors[14], ms=10, lw=1.5)
plt.errorbar(date-dateoffset, rv3[idx], yerr=rverr3[idx], fmt='ko', color=colors[15], mfc=colors[8], mec=colors[14], ms=10, lw=1.5)
plt.xlabel("Time (BJD -- {0:.0f})".format(dateoffset))
# Folded RV vs phase
ax1 = plt.subplot(2,1,2)
plt.axis([phasemin, phasemax, RVmin, RVmax])
ax1.spines['top'].set_visible(False)
ax1.spines['right'].set_visible(False)
ax1.xaxis.set_ticks_position('bottom')
ax1.yaxis.set_ticks_position('left')
plt.tick_params(axis='both', which='major')
for idx, ph in enumerate(phase_double):
plt.errorbar(phase_double[idx], rv1_double[idx], yerr=rverr1_double[idx], marker='o', color=colors[6], mec=colors[14], ecolor=colors[6], ms=10, ls='None', lw=1.5)
plt.errorbar(phase_double[idx], rv2_double[idx], yerr=rverr2_double[idx], marker='o', color=colors[2], mec=colors[14], ecolor=colors[2], ms=10, ls='None', lw=1.5)
plt.errorbar(phase_double[idx], rv3_double[idx], yerr=rverr3_double[idx], marker='o', color=colors[8], mec=colors[14], ecolor=colors[8], ms=10, ls='None', lw=1.5)
plt.xlabel("Orbital Phase")
# Draw vertical lines at phase = 0.5
#plt.axvline(x=0.5, ymin=-59, ymax=45, color='k', ls=':')
#plt.axvline(x=1.5, ymin=-59, ymax=45, color='k', ls=':')
# Option for a legend and labels (note: for a legend you will need to add a label to the plt.errorbar commands)
#plt.legend(ncol=2, loc=1, numpoints=1, frameon=False, bbox_to_anchor=(1,2.35), columnspacing=0.7)
fig.text(0.07, 0.5, 'Radial Velocity (km s$^{-1}$)', ha='center', va='center', rotation='vertical', size='large')
fig.text(0.14, 0.13, 'Folded')
fig.text(0.14, 0.55, 'Unfolded')
fig.text(0.14, 0.9, sysname, size='large')
plt.show()
fig.savefig('3247294rv.png')
|
savvytruffle/cauldron
|
rvs/rvplotmaker.py
|
Python
|
mit
| 6,377
|
# Tweepy
# Copyright 2009-2010 Joshua Roesslein
# See LICENSE for details.
import logging
import httplib
from socket import timeout
from threading import Thread
from time import sleep
import ssl
from tweepy.models import Status
from tweepy.api import API
from tweepy.error import TweepError
from tweepy.utils import import_simplejson, urlencode_noplus
json = import_simplejson()
STREAM_VERSION = '1.1'
class StreamListener(object):
def __init__(self, api=None):
self.api = api or API()
def on_connect(self):
"""Called once connected to streaming server.
This will be invoked once a successful response
is received from the server. Allows the listener
to perform some work prior to entering the read loop.
"""
pass
def on_data(self, raw_data):
"""Called when raw data is received from connection.
Override this method if you wish to manually handle
the stream data. Return False to stop stream and close connection.
"""
data = json.loads(raw_data)
if 'in_reply_to_status_id' in data:
status = Status.parse(self.api, data)
if self.on_status(status) is False:
return False
elif 'delete' in data:
delete = data['delete']['status']
if self.on_delete(delete['id'], delete['user_id']) is False:
return False
elif 'event' in data:
status = Status.parse(self.api, data)
if self.on_event(status) is False:
return False
elif 'direct_message' in data:
status = Status.parse(self.api, data)
if self.on_direct_message(status) is False:
return False
elif 'limit' in data:
if self.on_limit(data['limit']['track']) is False:
return False
elif 'disconnect' in data:
if self.on_disconnect(data['disconnect']) is False:
return False
else:
logging.error("Unknown message type: " + str(raw_data))
def on_status(self, status):
"""Called when a new status arrives"""
return
def on_exception(self, exception):
"""Called when an unhandled exception occurs."""
return
def on_delete(self, status_id, user_id):
"""Called when a delete notice arrives for a status"""
return
def on_event(self, status):
"""Called when a new event arrives"""
return
def on_direct_message(self, status):
"""Called when a new direct message arrives"""
return
def on_limit(self, track):
"""Called when a limitation notice arrvies"""
return
def on_error(self, status_code):
"""Called when a non-200 status code is returned"""
return False
def on_timeout(self):
"""Called when stream connection times out"""
return
def on_disconnect(self, notice):
"""Called when twitter sends a disconnect notice
Disconnect codes are listed here:
https://dev.twitter.com/docs/streaming-apis/messages#Disconnect_messages_disconnect
"""
return
class Stream(object):
host = 'stream.twitter.com'
def __init__(self, auth, listener, **options):
self.auth = auth
self.listener = listener
self.running = False
self.timeout = options.get("timeout", 300.0)
self.retry_count = options.get("retry_count")
# values according to https://dev.twitter.com/docs/streaming-apis/connecting#Reconnecting
self.retry_time_start = options.get("retry_time", 5.0)
self.retry_420_start = options.get("retry_420", 60.0)
self.retry_time_cap = options.get("retry_time_cap", 320.0)
self.snooze_time_step = options.get("snooze_time", 0.25)
self.snooze_time_cap = options.get("snooze_time_cap", 16)
self.buffer_size = options.get("buffer_size", 1500)
if options.get("secure", True):
self.scheme = "https"
else:
self.scheme = "http"
self.api = API()
self.headers = options.get("headers") or {}
self.parameters = None
self.body = None
self.retry_time = self.retry_time_start
self.snooze_time = self.snooze_time_step
def _run(self):
# Authenticate
url = "%s://%s%s" % (self.scheme, self.host, self.url)
# Connect and process the stream
error_counter = 0
conn = None
exception = None
while self.running:
if self.retry_count is not None and error_counter > self.retry_count:
# quit if error count greater than retry count
break
try:
if self.scheme == "http":
conn = httplib.HTTPConnection(self.host, timeout=self.timeout)
else:
conn = httplib.HTTPSConnection(self.host, timeout=self.timeout)
self.auth.apply_auth(url, 'POST', self.headers, self.parameters)
conn.connect()
conn.request('POST', self.url, self.body, headers=self.headers)
resp = conn.getresponse()
if resp.status != 200:
if self.listener.on_error(resp.status) is False:
break
error_counter += 1
if resp.status == 420:
self.retry_time = max(self.retry_420_start, self.retry_time)
sleep(self.retry_time)
self.retry_time = min(self.retry_time * 2, self.retry_time_cap)
else:
error_counter = 0
self.retry_time = self.retry_time_start
self.snooze_time = self.snooze_time_step
self.listener.on_connect()
self._read_loop(resp)
except (timeout, ssl.SSLError) as exc:
# If it's not time out treat it like any other exception
if isinstance(exc, ssl.SSLError) and not (exc.args and 'timed out' in str(exc.args[0])):
exception = exc
break
if self.listener.on_timeout() == False:
break
if self.running is False:
break
conn.close()
sleep(self.snooze_time)
self.snooze_time = min(self.snooze_time + self.snooze_time_step,
self.snooze_time_cap)
except Exception as exception:
# any other exception is fatal, so kill loop
break
# cleanup
self.running = False
if conn:
conn.close()
if exception:
# call a handler first so that the exception can be logged.
self.listener.on_exception(exception)
raise
def _data(self, data):
if self.listener.on_data(data) is False:
self.running = False
def _read_loop(self, resp):
while self.running and not resp.isclosed():
# Note: keep-alive newlines might be inserted before each length value.
# read until we get a digit...
c = '\n'
while c == '\n' and self.running and not resp.isclosed():
c = resp.read(1)
delimited_string = c
# read rest of delimiter length..
d = ''
while d != '\n' and self.running and not resp.isclosed():
d = resp.read(1)
delimited_string += d
# read the next twitter status object
if delimited_string.strip().isdigit():
next_status_obj = resp.read( int(delimited_string) )
if self.running:
self._data(next_status_obj)
if resp.isclosed():
self.on_closed(resp)
def _start(self, async):
self.running = True
if async:
Thread(target=self._run).start()
else:
self._run()
def on_closed(self, resp):
""" Called when the response has been closed by Twitter """
pass
def userstream(self, stall_warnings=False, _with=None, replies=None,
track=None, locations=None, async=False, encoding='utf8'):
self.parameters = {'delimited': 'length'}
if self.running:
raise TweepError('Stream object already connected!')
self.url = '/%s/user.json?delimited=length' % STREAM_VERSION
self.host='userstream.twitter.com'
if stall_warnings:
self.parameters['stall_warnings'] = stall_warnings
if _with:
self.parameters['with'] = _with
if replies:
self.parameters['replies'] = replies
if locations and len(locations) > 0:
assert len(locations) % 4 == 0
self.parameters['locations'] = ','.join(['%.2f' % l for l in locations])
if track:
encoded_track = [s.encode(encoding) for s in track]
self.parameters['track'] = ','.join(encoded_track)
self.body = urlencode_noplus(self.parameters)
self._start(async)
def firehose(self, count=None, async=False):
self.parameters = {'delimited': 'length'}
if self.running:
raise TweepError('Stream object already connected!')
self.url = '/%s/statuses/firehose.json?delimited=length' % STREAM_VERSION
if count:
self.url += '&count=%s' % count
self._start(async)
def retweet(self, async=False):
self.parameters = {'delimited': 'length'}
if self.running:
raise TweepError('Stream object already connected!')
self.url = '/%s/statuses/retweet.json?delimited=length' % STREAM_VERSION
self._start(async)
def sample(self, count=None, async=False):
self.parameters = {'delimited': 'length'}
if self.running:
raise TweepError('Stream object already connected!')
self.url = '/%s/statuses/sample.json?delimited=length' % STREAM_VERSION
if count:
self.url += '&count=%s' % count
self._start(async)
def filter(self, follow=None, track=None, async=False, locations=None,
count=None, stall_warnings=False, languages=None, encoding='utf8'):
self.parameters = {}
self.headers['Content-type'] = "application/x-www-form-urlencoded"
if self.running:
raise TweepError('Stream object already connected!')
self.url = '/%s/statuses/filter.json?delimited=length' % STREAM_VERSION
if follow:
encoded_follow = [s.encode(encoding) for s in follow]
self.parameters['follow'] = ','.join(encoded_follow)
if track:
encoded_track = [s.encode(encoding) for s in track]
self.parameters['track'] = ','.join(encoded_track)
if locations and len(locations) > 0:
assert len(locations) % 4 == 0
self.parameters['locations'] = ','.join(['%.4f' % l for l in locations])
if count:
self.parameters['count'] = count
if stall_warnings:
self.parameters['stall_warnings'] = stall_warnings
if languages:
self.parameters['language'] = ','.join(map(str, languages))
self.body = urlencode_noplus(self.parameters)
self.parameters['delimited'] = 'length'
self._start(async)
def disconnect(self):
if self.running is False:
return
self.running = False
|
memaker/tweepy
|
tweepy/streaming.py
|
Python
|
mit
| 11,639
|
from django.contrib.auth.models import User
from rest_framework.generics import RetrieveAPIView
from snippets.serializers.UserSerializer import UserSerializer
class UserDetail(RetrieveAPIView):
queryset = User.objects.all()
serializer_class = UserSerializer
|
sinner/testing-djrf
|
tutorial/snippets/views/UserDetail.py
|
Python
|
mit
| 268
|
import collections
# def maxSum(arr):
# n = len(arr)
# q = collections.deque(arr)
# num_list = []
# for _ in xrange(n):
# tmp = q.popleft()
# num_list.append(list(q))
# q.append(tmp)
# DP = [[0] * (n-1) for _ in xrange(n)]
# for i in range(n):
# DP[i][0] = num_list[i][0]
# DP[i][1] = num_list[i][1]
# for k in range(2, n-1):
# DP[i][k] = max(DP[i][k-1], DP[i][k-2]+num_list[i][k])
# print DP
# print num_list
# return max(DP[c][-2] for c in range(n))
def maxSum(arr):
n = len(arr)
q = collections.deque(arr)
q_list = []
for _ in range(n):
tmp = q.popleft()
q.append(tmp)
q_list.append(list(q))
max_ = 0
for q in q_list:
que = collections.deque(q)
curMax = 0
while que:
curMax += que.popleft()
que.popleft()
que.pop()
max_ = max(curMax, max_)
return max_
print maxSum([1,10,1,8,1,2])
print maxSum([1,99,98, 99, 100, 101])
|
quake0day/oj
|
new_dp_3ncircle.py
|
Python
|
mit
| 1,045
|
"""
Django settings for chatdemo project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
VAR_DIR = os.path.join(BASE_DIR, 'var')
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '#+)n3ox9id*+(rz-h6#&&fry7j3m^9sn(ux_!$9$k*@(zrv=%x'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'sepiida_chat',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'chatdemo.urls'
WSGI_APPLICATION = 'chatdemo.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(VAR_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(VAR_DIR, 'static')
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'static')
]
TEMPLATE_DIRS = [
os.path.join(BASE_DIR, 'templates'),
]
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(VAR_DIR, 'media')
|
Thinker-ru/sepiida-chat
|
chatdemo/chatdemo/settings.py
|
Python
|
mit
| 2,272
|
import logging
class Singleton:
"""
A non-thread-safe helper class to ease implementing singletons.
This should be used as a decorator -- not a metaclass -- to the
class that should be a singleton.
The decorated class can define one `__init__` function that
takes only the `self` argument. Other than that, there are
no restrictions that apply to the decorated class.
To get the singleton instance, use the `Instance` method. Trying
to use `__call__` will result in a `TypeError` being raised.
Limitations: The decorated class cannot be inherited from.
"""
def __init__(self, decorated):
self._decorated = decorated
def instance(self):
"""
Returns the singleton instance. Upon its first call, it creates a
new instance of the decorated class and calls its `__init__` method.
On all subsequent calls, the already created instance is returned.
"""
try:
return self._instance
except AttributeError:
self._instance = self._decorated()
return self._instance
def __call__(self):
logging.error('Singletons must be accessed through `Instance()`.')
raise TypeError('Singletons must be accessed through `Instance()`.')
def __instancecheck__(self, inst):
return isinstance(inst, self._decorated)
|
eusyar/4fun
|
util/DesignPatterns.py
|
Python
|
mit
| 1,289
|
import pytest
@pytest.fixture
def thyrus():
from api.scrapers.item import scrape_item
return scrape_item('d19447e548d')
def test_scrape_item_by_id(thyrus):
assert thyrus.id == 'd19447e548d'
assert thyrus.name == 'Thyrus Zenith'
assert thyrus.type == 'Two-handed Conjurer\'s Arm'
assert thyrus.ilvl == 90
assert thyrus.mind == 31
assert thyrus.spell_speed == 26
def test_scrape_item_adds_to_database(thyrus):
from api.models.item import Item
t = Item.query.filter_by(name='Thyrus Zenith').first()
assert t.id == thyrus.id
def test_item_json(client):
response = client.get('/scrape/item/cada9ec7074')
assert response.status_code == 200
assert response.json == {
'id': 'cada9ec7074',
'ilvl': 110,
'name': 'Arachne Robe',
'type': 'Body',
'stats': {
'accuracy': 8,
'auto_attack': 0,
'block_rate': 0,
'block_strength': 0,
'critical_hit_rate': 0,
'damage': 0,
'defense': 54,
'delay': 0,
'determination': 0,
'magic_defense': 92,
'mind': 39,
'piety': 31,
'spell_speed': 29,
'vitality': 41
}
}
def test_item_invalid_lodestone_id(client):
response = client.get('/scrape/item/23fh032hf0oi1so3a012r1')
assert response.status_code == 403
assert response.json == {
'error': 'Invalid Request',
'message': 'Lodestone ID does not exist'
}
def test_character_illegal_lodestone_id(client):
response = client.get('/scrape/item/123abc!')
assert response.status_code == 403
assert response.json == {
'error': 'Invalid Request',
'message': 'Illegal characters in requested ID'
}
def test_item_repr():
from api.scrapers.item import scrape_item
item = scrape_item('9139260df17')
assert repr(item) == '<Item lodestone_id=9139260df17 name=Aegis Shield Zeta type=Shield ilvl=135>'
|
Demotivated/loadstone
|
api/tests/test_item.py
|
Python
|
mit
| 2,023
|
"""
pyexcel_io.readers.tsv
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The lower level tsv file format handler.
:copyright: (c) 2014-2017 by Onni Software Ltd.
:license: New BSD License, see LICENSE for more details
"""
import pyexcel_io.constants as constants
from .csvr import CSVBookReader
class TSVBookReader(CSVBookReader):
""" Read tab separated values """
def __init__(self):
CSVBookReader.__init__(self)
self._file_type = constants.FILE_FORMAT_TSV
def open(self, file_name, **keywords):
keywords['dialect'] = constants.KEYWORD_TSV_DIALECT
CSVBookReader.open(self, file_name, **keywords)
def open_stream(self, file_content, **keywords):
keywords['dialect'] = constants.KEYWORD_TSV_DIALECT
CSVBookReader.open_stream(self, file_content, **keywords)
|
caspartse/QQ-Groups-Spider
|
vendor/pyexcel_io/readers/tsv.py
|
Python
|
mit
| 829
|
# Generated by Django 2.2.13 on 2021-04-14 14:53
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('studygroups', '0144_auto_20210413_1010'),
]
operations = [
migrations.AddField(
model_name='meeting',
name='wrapup_sent_at',
field=models.DateTimeField(blank=True, null=True),
),
]
|
p2pu/learning-circles
|
studygroups/migrations/0145_meeting_wrapup_sent_at.py
|
Python
|
mit
| 410
|
import datetime
from .store import *
def create_textset(name, description, source, url, filename):
"""
creates a text set object
:param name: name of the text set
:param filename: filename of the data
:return: a textset object
"""
# create object and control data
creation_date = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
ts = TextSet(0, name, description, source, url, filename, creation_date)
# save and create objects related to the textset
ts.textset_id = str(incr_key_store('textset:counter'))
set_key_store('textset:%s:status' % ts.textset_id, 'created')
rpush_key_store('textset:list', ts.textset_id)
ts.finalize_creation()
ts.save()
return ts
def get_textset(textset_id):
"""
retrieves a textset object from its id
:param textset_id: id
:return: textset object
"""
d = get_key_store('textset:%s' % textset_id)
ds = TextSet(**d['init_data'])
ds.load(d['load_data'])
ds.status = get_key_store('textset:%s:status' % textset_id)
return ds
def get_textset_status(textset_id):
"""
retrieves the status of a textset from its id
:param textset_id: id
:return: status (string)
"""
return get_key_store('textset:%s:status' % textset_id)
def get_textset_list():
"""
get the list of all textsets
:return: list of textsets objects or empty list if error (eg. redis or environment not set)
"""
#try:
return [get_textset(textset_id) for textset_id in get_textset_ids()]
#except:
# return []
def get_textset_ids():
"""
get the list of ids all textsets
:return: list of ids
"""
return list_key_store('textset:list')
def update_textset(textset_id, name, description, source, url):
"""
update specific fields of the textset
:param textset_id: id of the textset
:param name: new name of the textset
:param description: new description of the textset
:param source: source of the textset
:param url: url of the textset
:return:
"""
ts = get_textset(textset_id)
ts.name = name
ts.description = description
ts.source = source
ts.url = url
ts.save()
def reset_textset(textset_id):
"""
reset the results
:param textset_id: id
:return:
"""
# removes entries
set_key_store('textset:%s:status' % textset_id, 'created')
def delete_textset(textset_id):
"""
deletes a textset and the results
:param textset_id: id
:return:
"""
# removes entries
del_key_store('textset:%s:status' % textset_id)
lrem_key_store('textset:list', textset_id)
# delete file
os.remove(get_data_folder() + '/texts' + '/' + str(textset_id) + '.txt')
class TextSet(object):
def __init__(self, textset_id, name, description, source, url, filename, creation_date):
self.textset_id = textset_id
self.name = name
self.description = description
self.url = url
self.source = source # url or file id
if filename == '':
raise ValueError('filename cannot be empty')
self.filename = filename
ext = filename.split('.')[-1].lower()
if ext not in ['txt']:
raise TypeError('unknown text format: use txt')
if not os.path.exists(filename):
raise ValueError('file %s not found' % filename)
self.size = 0
self.lines = 0
self.creation_date = creation_date
def finalize_creation(self):
# import text
with open(self.filename, 'r') as f:
txt = f.readlines()
self.size = sum([len(s) for s in txt])
self.lines = len(txt)
folder = get_data_folder() + '/texts'
if not os.path.exists(folder):
os.makedirs(folder)
with open(folder + '/' + str(self.textset_id) + '.txt', 'w') as f:
for s in txt:
f.write(s + '\n')
def save(self):
store = {'init_data': {'textset_id': self.textset_id, 'name': self.name, 'description': self.description,
'source': self.source, 'url': self.url, 'filename': self.filename,
'creation_date': self.creation_date},
'load_data': {'size': self.size, 'lines': self.lines}}
set_key_store('textset:%s' % self.textset_id, store)
def load(self, store):
# reload data from json
for k in store.keys():
setattr(self, k, store[k])
|
pierre-chaville/automlk
|
automlk/textset.py
|
Python
|
mit
| 4,512
|
#!/usr/bin/env python
import re
from util.Cache import IndexedCache, SecondOrderCache
from util.Report import Report
from numpy import mean
class ParseEquations:
def __init__(self):
self.identity = None
self.nullTerm = "Nothing"
self.defaultLimit = (None,None)
self.overRideIdentityLimit = False
self.overRideLimit = False
self.setDefault = True
def parseCoefficient( self, element, direction = "+"):
coefficient = 1.0
regex = re.compile( '([0-9\\.E-]+)' )
m = regex.match( element )
if m:
coefficient = float( m.group( 1 ) )
element = m.string[m.end( 1 ):]
element = element.strip()
#print "%s: %f" %(participant, coefficient)
return ( element, float( coefficient ) )
def parseEquation( self, equation):
listOfParticipants = []
equation.replace( '"', '' )
if equation.find( ' + ' ) != -1:
for element in equation.split( ' + ' ):
p = element.strip()
value = self.parseCoefficient(p)
listOfParticipants.append( value )
else:
element = equation.strip()
if element != '' and ( element.find( self.nullTerm ) == -1 ):
p = element.strip()
value = self.parseCoefficient(p)
listOfParticipants.append( value )
return listOfParticipants
def parseInequality(self,equation):
regex = re.compile('(.*)([<=>])(.*)')
m = regex.match(equation)
if m:
equation = m.group(1)
relation = m.group(2)
limit = m.group(3)
else:
equation = equation
relation = None
limit = None
return (equation,relation,limit)
def addData(self,model,rowName,eqn):
values = self.parseEquation(eqn)
for (variableName,coeffecent) in values:
model.addData(rowName,variableName,coeffecent)
return model
def addRowLimit(self,model,name,relation,limit):
if relation == "=":
model.addRowLimit(name,(limit,limit))
if relation == "<":
model.addRowLimit(name,(None,limit))
if relation == ">":
model.addRowLimit(name,(limit,None))
if relation == None:
return None
def addInequality(self,model,name,eqn):
(equation,relation,limit) = self.parseInequality(eqn)
self.addData(model,name,equation)
self.addRowLimit(model,name,relation,limit)
return model
def setVariableLimits(self,model,limit):
for name in model.getColumnNames():
model.addColumnLimit(name,limit)
return model
def parseObjective(self,eqn):
result = {}
values = self.parseEquation(eqn)
for (variableName,coeffecent) in values:
result[variableName] = coeffecent
return result
def applyTerms(self,model,rowName,terms,prefix,target):
for (tag,coeffecent) in terms:
if tag == self.identity:
itag = target
else:
itag = prefix % tag
model.addData(rowName,itag,coeffecent)
if (tag == self.identity) & (not self.overRideIdentityLimit):
continue
elif self.overRideLimit or (self.setDefault & (itag not in model.getColumnLimits())):
model.addColumnLimit(itag,self.defaultLimit)
return model
def parseApplication(self,equation):
regex = re.compile('(.*):(.*)([<=>])(.*)')
m = regex.match(equation)
rowTag = None
relation = None
limit = None
if m:
rowTag = m.group(1)
equation = m.group(2)
relation = m.group(3)
limit = m.group(4)
return (rowTag,equation,relation,limit)
regex = re.compile('(.*)([<=>])(.*)')
m = regex.match(equation)
rowTag = None
if m:
equation = m.group(1)
relation = m.group(2)
limit = m.group(3)
return (rowTag,equation,relation,limit)
def applyRelation(self,model,relationName,value,targets=[''],prefixPattern="%s"):
'''
variable pattern
row name => <relationName><target>
variable name => <prefixPattern%tag><target>
'''
(rowTag,equation,relation,limit) = self.parseApplication(value)
terms = self.parseEquation(equation)
for target in targets:
if rowTag == None:
rowName = relationName + target
else:
rowName = rowTag % target
prefix = prefixPattern + target
self.applyTerms(model,rowName,terms,prefix,target)
self.addRowLimit(model,rowName,relation,limit)
return model
class LinearModel(SecondOrderCache):
'''
Core math matrix object for linear optimization modeling in reflux
Uses the Second Order Cache object as its foundation
Three main parts:
1)Sparce matrix of coeffecents
2)Row and column limits
3)Objective vector
Minimize:
Z = c*x
Subject to:
S*x <=> b
l < x < u
Extended Linear Control Model
With additional components:
4)Annotations
5)Targets
6)ControlMap
6)NaturalObjective
7)SyntheticObjective
'''
def __init__(self):
self.verbose = False
#Data Containers
self.modelName = ''
self.data = SecondOrderCache()
self.rowLimits = {}
self.columnLimits = {}
self.mipColumns = IndexedCache()
self.objective = {}
#Report of model annotation values (dict)
self.annotation = None
#Control
self.targets = None
self.controlMap = None
self.controlClusters = None
self.naturalObjective = None
self.syntheticObjective = None
#Variables
self.scale = 1 #! to be removed
self.defaultLowerLimit = None
self.defaultUpperLimit = None
def __str__(self):
return self.data.__str__()
def _addString(self,other):
print "string"
def _addScalar(self,other):
print "scalar"
def _addDict(self,other):
print "dict"
def _addLinearModel(self,other):
print "model"
def __add__(self,other):
if type(other) == type(""):
self._addString(other)
if type(other) == type(0) or type(other) ==type(0.0):
self._addScalar(other)
if type(other) == type({}):
self._addDict(other)
if type(other) == type(self):
self._addLinearModel(other)
return self
def __eq__(self,value):
if type(self) != type(value):
return False
a = self.data.equals(value.data)
b = self.rowLimits == value.rowLimits
c = self.columnLimits == value.columnLimits
d = self.objective == value.objective
e = self.mipColumns == value.mipColumns
result = a and b and c and d and e
return result
def _scaleTuple(self,value):
'''
scales a par of values
used in scaling of whole matrix
@type value: float
'''
(v1,v2) = value
if v1 != None:
v1 = float(v1*self.scale)
if v2 != None:
v2 = float(v2*self.scale)
return (v1, v2)
def _getDefaultColumnLimits(self):
'''
returns the column limits with defaults for values not given
'''
columnNames = set(self.getColumnNames())
limitNames = set(self.columnLimits.keys())
defaultingNames = columnNames.difference(limitNames)
result = {}
for name in defaultingNames:
if not (self.defaultLowerLimit == None and self.defaultUpperLimit == None):
result[name] = (self.defaultLowerLimit,self.defaultUpperLimit)
return result
def _getDefaultRowLimits(self):
'''
returns the row limits with defaults for values not given
'''
columnNames = set(self.getRowNames())
limitNames = set(self.rowLimits.keys())
defaultingNames = columnNames.difference(limitNames)
result = {}
for name in defaultingNames:
if not (self.defaultLowerLimit == None and self.defaultUpperLimit == None):
result[name] = (self.defaultLowerLimit,self.defaultUpperLimit)
return result
def _annotateString(self,value,annotationMap,regex,nsep=" "):
result = ''
tags = re.findall(regex,value)
for s in tags:
if s in annotationMap.keys():
r = annotationMap[s]
else:
r = s
result += nsep + r
result = result[len(nsep):]
return result
def _annotateMap(self,data,annotationMap,regex):
result = {}
for (key,value) in data.items():
ikey = self._annotateString(key, annotationMap, regex)
result[ikey] = value
return result
def _annotateList(self,data,annotationMap,regex):
result = []
for value in data:
ivalue = self._annotateString(value, annotationMap, regex)
result.append(ivalue)
return result
def annotateGeneList(self,data,annotationName = "bnumber", regex="[a-zA-Z0-9\(\)]+"):
result = data
if self.annotation == None:
return result
if annotationName in self.annotation.keys():
annotationMap = self.annotation[annotationName]
gMap = annotationMap.getColumn("gene")
if annotationMap != None:
result = self._annotateList(data,gMap,regex)
return result
def annotateGenes(self,objective,annotationName = "bnumber", regex="[a-zA-Z0-9\(\)]+"):
result = objective
if self.annotation == None:
return result
if annotationName in self.annotation.keys():
annotationMap = self.annotation[annotationName]
gMap = annotationMap.getColumn("gene")
if annotationMap != None:
result = self._annotateMap(objective,gMap,regex)
return result
def getGeneTargetMap(self):
if self.controlMap == None:
return None
result = {}
for (r,gs) in self.controlMap.items():
for g in gs:
if g not in result.keys():
result[g] = set()
result[g].add(r)
return result
def setProperty(self,name,value):
self.properties[name] = value
def getProperty(self,name):
return self.properties[name]
def setScalex(self,scale):
'''
Sets the scale for the matrix
Not entirely checked for completeness and usage
@param scale: the scaling factor for the limits
@type scale: float
'''
self.scale = scale
def addRowName(self,name):
'''
Adds a row name to matrix
@type name: string
'''
self.data.rowCache.addValue(name)
return None
def addColumnName(self,name):
'''
Adds column name to matrix
@type name: string
'''
self.data.columnCache.addValue(name)
return None
def setMipColumnName(self,name):
'''
Sets column with value name as a integer column
@type name: string
'''
self.mipColumns.addValue(name)
def setMipColumnNames(self,names,tag="%s"):
'''
@type names: string[]
'''
for name in names:
iname = tag % name
self.setMipColumnName(iname)
return None
def getMipColumnNames(self):
'''
Returns array of strings of column names which are set to integers
@rtype: string[]
'''
return self.mipColumns.getValues()
def getRowIndex(self,name):
'''
Returns index of row of value name
@type name: string
@rtype: int
'''
return self.data.rowCache.getindex(name)
def getColumnIndex(self,name):
'''
Returns index of column of value name
@type name: string
@rtype: int
'''
return self.data.columnCache.getindex(name)
def getRowNames(self):
'''
Returns an list of the row names
@rtype: string[]
'''
return self.data.rowCache.getValues()
def getColumnNames(self):
'''
Returns a list of the column names
@rtype: string[]
'''
return self.data.columnCache.getValues()
def addRowLimit(self,rowName,limit):
'''
Sets limits for selected row
@type rowName: string
@type limit (float,float)
'''
self.addRowName(rowName)
self.rowLimits[rowName] = self._scaleTuple(limit)
def addColumnLimit(self,columnName,limit):
'''
Sets limits for selected column
@columnName: string
@type limit (float,float)
'''
self.addColumnName(columnName)
self.columnLimits[columnName] = self._scaleTuple(limit)
def getRowLimit(self,name):
'''
returns limit for row
@rtype: (float,float)
'''
if name in self.rowLimits:
return self.rowLimits[name]
else:
return (None,None)
def getColumnLimit(self,name):
'''
returns limit for column
@rtype: (float,float)
'''
if name in self.columnLimits:
return self.columnLimits[name]
else:
return (None,None)
def addRowLimits(self,limits):
'''
Sets row limits from map
@type limits: {string:(float,float)}
'''
for key in limits.keys():
(lower,upper) = limits[key]
self.addRowLimit(key,(lower,upper))
def addColumnLimits(self,limits):
'''
Sets colum limits from map
@type limits: {string:(float,float)}
'''
for key in limits.keys():
(lower,upper) = limits[key]
self.addColumnLimit(key,(lower,upper))
def getRowLimits(self):
'''
returns map of row limits
@rtype: {name:(float,float)}
'''
result = self.rowLimits.copy()
defaultLimits = self._getDefaultRowLimits()
result.update(defaultLimits)
return result
def getColumnLimits(self):
'''
returns map of column limits
@rtype: {name:(float,float)}
'''
result = self.columnLimits.copy()
defaultLimits = self._getDefaultColumnLimits()
result.update(defaultLimits)
return result
def addObjective(self,columnName, value):
'''
sets objective coeffecent for a column
@type columnName: string
@type value: float
'''
self.objective[columnName] = value
def setObjective(self,objectiveMap):
'''
Sets objective coeffecents from map
@type objectiveMap: {sting:float}
'''
self.objective = {}
for key in objectiveMap.keys():
value = objectiveMap[key]
self.objective[key]=value
def getObjective(self):
'''
returns objective map
@rtype {string,float}
'''
return self.objective
def getRowValueMap(self,rowName):
'''
returns a dict of row values (column name: value)
@type rowName: string
@rtype {string,float}
'''
r = self.data.getRow(rowName)
#return r
result = {}
for key in self.data.keys():
if key[0] == rowName:
colName = key[1]
value = self.data.getValue(key[0],key[1])
result[colName] = value
if r != result:
pass
return result
def getColumnValueMap(self,colName):
'''
returns a dict of row values (column name: value)
@type rowName: string
@rtype {string,float}
'''
r = self.data.getColumn(colName)
#return r
result = {}
for key in self.data.keys():
if key[1] == colName:
rowName = key[0]
value = self.data.getValue(key[0],key[1])
#self.getValue(key[0],key[1])
result[rowName] = value
if r != result:
pass
return result
def getRowByValue(self,name,function):
result = {}
values = self.getRowValueMap(name)
for (k,v) in values.items():
if function(v):
result[k] = v
return result
def getColumnByValue(self,name,function):
result = {}
values = self.getColumnValueMap(name)
for (k,v) in values.items():
if function(v):
result[k] = v
return result
def getRowValuesFromPred(self,name,values):
result = {}
rvalues = self.getRowValueMap(name)
for (k,v) in rvalues.items():
vi = None
if k in values:
vi = values[k]
result[k] = (v,vi)
return result
def addData(self,rowName,columnName,value):
'''
Add data value to model matrix
@type rowName: string
@type columnName: string
@type value: string
'''
if value == None or value == 0.0:
return None
floatValue = float(value)
self.data.addValue(rowName,columnName,floatValue)
#self.addValue(rowName,columnName,floatValue)
return (0,0)
def addDataCache(self,data):
'''
Adds a data cach object to the coeffecent matrix
@type data: SecondOrderCache {(string,string):float}
'''
self.data.extend(data)
#self.data.rowCache.extend(data.rowCache)
#self.data.columnCache.extend(data.columnCache)
def getData(self,rowName,columnName):
'''
Get the coeffecent of the matrix
@type rowName: string
@type columnName: string
'''
return self.data.getValue(rowName,columnName)
#return self.getValue(rowName,columnName)
def removeData(self,rowName,columnName):
'''
removes a datapoint by row and column name
@type rowName: string
@type columnName: string
'''
self.data.removeValue(rowName,columnName)
def removeRow(self,rowName):
columnNames = self.data.rowMap[rowName]
self.data.removeRow(rowName)
if rowName in self.rowLimits:
del self.rowLimits[rowName]
for columnName in columnNames:
rCount = len(self.data.columnMap[columnName])
if rCount == 0:
self.removeColumn(columnName)
#if self.verbose: print "removing column [%s]" % (columnName)
if columnName in self.columnLimits.keys():
del self.columnLimits[columnName]
return None
def removeColumn(self, columnName):
rowNames = self.data.columnMap[columnName]
self.data.removeColumn(columnName)
if columnName in self.columnLimits:
del self.columnLimits[columnName]
if columnName in self.mipColumns.dataArray:
self.mipColumns.removeValue(columnName)
if self.targets != None:
if columnName in self.targets:
self.targets.removeValue(columnName)
if self.controlMap != None:
if columnName in self.controlMap.keys():
del self.controlMap[columnName]
#print "checking rownames [%s]" % (rowNames)
for rowName in rowNames:
cNames = self.data.rowMap[rowName]
if len(cNames) == 0:
self.removeRow(rowName)
#if self.verbose: print "removing row [%s]" % (rowName)
if rowName in self.rowLimits.keys():
del self.rowLimits[rowName]
return None
def addRow(self,rowName,data):
'''
add a row in the form of a dictonary
@type rowName: string
@type data: {string,float}
'''
for key in data.keys():
value = data[key]
self.addData(rowName,key,value)
def addColumn(self,columnName,modelVector):
'''
@type columnName: string
@type data: {string:float}
'''
for key in modelVector.keys():
value = modelVector[key]
self.addData(key,columnName,value)
def getSparseMatrix(self):
'''
@rtype (int,int,float)[]
'''
return self.data.getSparseMatrix()
def getSparseMatrixMap(self):
'''
return the index matrix
'''
return self.data.getIndexMatrix()
def addConstraints(self,model):
'''
Add a linear constraints to the current model
@type model: LinearModel
'''
self.data.extend(model.data)
rowLimits = model.getRowLimits()
columnLimits = model.getColumnLimits()
self.addRowLimits(rowLimits)
self.addColumnLimits(columnLimits)
self.mipColumns.extend(model.mipColumns)
return None
def addModel(self,model):
'''
Add a linear model to the current model
@type model: LinearModel
'''
self.data.extend(model.data)
rowLimits = model.getRowLimits()
columnLimits = model.getColumnLimits()
self.addRowLimits(rowLimits)
self.addColumnLimits(columnLimits)
self.mipColumns.extend(model.mipColumns)
return None
def extend(self,model):
'''
Adda linear model and update the objective function
@type model: LinearModel
'''
self.addModel(model)
self.objective = model.objective
def multiply(self,modelMatrix,value):
'''
Multiply the values of the model by a scalar
@type value: float
'''
result = LinearModel()
result.rowCache.extend(modelMatrix.rowCache)
result.columnCache.extend(modelMatrix.columnCache)
result.data.extend(modelMatrix.data.multiply(value))
rowLimits = modelMatrix.getRowLimits()
columnLimits = modelMatrix.getColumnLimits()
result.addRowLimits(rowLimits)
result.addColumnLimits(columnLimits)
def transpose(self):
'''
Get the transpose of the current model
@rtype: LinearModel
'''
result = LinearModel()
result.scale=self.scale
result.rowCache=self.columnCache
result.columnCache=self.rowCache
result.data = self.data.getTranspose()
return result
def getIndexMatrix(self):
'''
Returns the index matrix of the model
@rtype: (int,int,float)[]
'''
return self.data.getIndexMatrix()
def _reportRow(self,dataValues,limit,predValues=None):
result = ""
for (k,v) in dataValues.items():
p = None
if predValues != None:
if k in predValues:
p = predValues[k]
r = "%s(%s)[%s] + " %(v,p,k)
result += r
result = result[:-2]
result += " = (%s,%s)" % (limit[0],limit[1])
return result
def _vectorValue(self,v1,v2):
result = 0
for k in set(v1.keys()).intersection(v2.keys()):
result += v1[k] * v2[k]
return result
def floatLimit(self,limit):
if limit[0] != None:
r1 = limit[0]
else:
r1 = float("-inf")
if limit[1] != None:
r2 = limit[1]
else:
r2 = float("inf")
result = (r1,r2)
return result
def modelReport(self,dir=1,prediction=None):
report = Report()
delta = 1e-6
for rowName in self.getRowNames():
rLimit = self.floatLimit(self.getRowLimit(rowName))
rValues =self.getRowValueMap(rowName)
rString = self._reportRow(rValues,rLimit,prediction)
report.addElement(rowName,"Type","row")
report.addElement(rowName,"Equation",rString)
rLimitS = "(%s,%s)" % (rLimit[0],rLimit[1])
report.addElement(rowName,"Limit",rLimitS)
if prediction != None:
rValue = self._vectorValue(rValues, prediction)
rValue = round(rValue,6)
rValid = rLimit[0]-delta < rValue < rLimit[1] + delta
report.addElement(rowName,"Value",rValue)
report.addElement(rowName,"Valid",rValid)
for colName in self.getColumnNames():
cLimit = self.floatLimit(self.getColumnLimit(colName))
cValues =self.getColumnValueMap(colName)
cString = self._reportRow(cValues,cLimit,prediction)
report.addElement(colName,"Type","column")
report.addElement(colName,"Equation",cString)
if prediction != None:
if colName in prediction.keys():
cValue = prediction[colName]
cValid = cLimit[0]-delta < cValue < cLimit[1] + delta
report.addElement(colName,"Value",cValue)
report.addElement(colName,"Valid",cValid)
return report
def _convertToGeneNames(self,reactionName):
if reactionName not in self.controlMap.keys():
return None
geneNames = self.controlMap[reactionName]
if type(geneNames) == type(""):
return set([geneNames])
else:
return geneNames
def _convertToGeneTag(self,reactionName,geneClusters = None):
geneNames = self._convertToGeneNames(reactionName)
if geneNames == None:
return reactionName
iGeneNames = set(geneNames)
for geneName in geneNames:
if geneClusters != None:
if geneName in geneClusters.keys():
for iGeneName in geneClusters[geneName]:
iGeneNames.add(iGeneName)
geneTag = ''
for iGeneName in iGeneNames:
geneTag = geneTag + " " + iGeneName
geneTag = geneTag[1:]
return geneTag
def getEnzymeControlMap(self):
result = {}
for (key,values) in self.controlMap.items():
for v in values:
if v not in result.keys():
result[v] = set()
result[v].add(key)
return result
def getControlsForNames(self,variableNames):
result = set()
for name in variableNames:
geneNames = self._convertToGeneNames(name)
if geneNames != None:
for gName in geneNames:
result.add(gName)
return result
def printGeneObjective(self,iObjective,geneClusters=None):
iGeneObjective = {}
iOtherObjective = {}
igControl = {}
for rxnName in iObjective.keys():
rControl = iObjective[rxnName]
if rxnName not in self.controlMap.keys():
iOtherObjective[rxnName] = rControl
else:
geneTag = self._convertToGeneTag(rxnName, geneClusters)
if geneTag not in iGeneObjective:
igControl[geneTag] = []
igControl[geneTag].append(rControl)
for k in igControl.keys():
iGeneObjective[k] = mean(igControl[k])
return (iGeneObjective,iOtherObjective)
|
bionomicron/Redirector
|
core/model/LinearModel.py
|
Python
|
mit
| 28,564
|
#!/usr/bin/python
__author__ = 'vilelag'
import argparse
import numpy as np
def create_parsers():
#parser for the main program
parser = argparse.ArgumentParser(description='Calculate distance between representations, given 2 dictionaries'
'and a file with class data. For an example check '
'./Tests/start_config/*.sh')
parser.add_argument('-sn', metavar='<file>', required=True,
help='Dictionary with word representations before w2v training')
parser.add_argument('-en', metavar='<file>', required=True,
help='Dictionary with word representations after w2v training')
parser.add_argument('-c', '-class', metavar='<file>', required=True,
help='Class data generated by create_class.py')
return parser
def read_class_file(fin):
with open(fin, 'r') as f:
content = f.read().splitlines()
words = []
for line in content:
tmp = line.split(' ')
words.append(tmp[0].upper())
return words
def read_word_representation(path):
with open(path) as f:
content = f.read().splitlines()
data = dict()
words, size = content[0].split(' ')
words = int(words)
size = int(size)
for i in range(1, words+1):
temp = content[i].split(' ')
data[temp[0].upper()] = np.asarray([np.float64(x) for x in temp[1:-1]], dtype=np.float64)
# Normalizing
data[temp[0].upper()] *= 1 / np.linalg.norm(data[temp[0].upper()])
return data
def main():
parser = create_parsers()
args = vars(parser.parse_args())
start_net = args['sn']
finish_net = args['en']
_class = args['c']
words = read_class_file(_class)
print('Reading start representations')
start = read_word_representation(start_net)
print('Reading finish representations')
finish = read_word_representation(finish_net)
# distance = []
mean = 0
_size = len(words)
for w in words:
try:
# tmp = np.linalg.norm(finish[w]-start[w])
# tmp = np.dot(finish[w]/np.linalg.norm(finish[w]), start[w]/np.linalg.norm(start[w]))
tmp = np.dot(finish[w], start[w])
# distance.append(tmp)
print'{}: {}'.format(w,tmp)
mean += tmp
except KeyError:
_size -= 1
mean /= _size
print 'mean: {}'.format(mean)
main()
|
gustavo-momente/word2vec_variations
|
rep_dist.py
|
Python
|
mit
| 2,489
|
#!/usr/bin/env python
# Jonas Schnelli, 2013
# make sure the Bitcoin-Qt.app contains the right plist (including the right version)
# fix made because of serval bugs in Qt mac deployment (https://bugreports.qt-project.org/browse/QTBUG-21267)
from string import Template
from datetime import date
bitcoinDir = "./";
inFile = bitcoinDir+"/share/qt/Info.plist"
outFile = "sdfcoin-Qt.app/Contents/Info.plist"
version = "unknown";
fileForGrabbingVersion = bitcoinDir+"sdfcoin-qt.pro"
for line in open(fileForGrabbingVersion):
lineArr = line.replace(" ", "").split("=");
if lineArr[0].startswith("VERSION"):
version = lineArr[1].replace("\n", "");
fIn = open(inFile, "r")
fileContent = fIn.read()
s = Template(fileContent)
newFileContent = s.substitute(VERSION=version,YEAR=date.today().year)
fOut = open(outFile, "w");
fOut.write(newFileContent);
print "Info.plist fresh created"
|
waynerjohn/johnwayner
|
share/qt/clean_mac_info_plist.py
|
Python
|
mit
| 894
|
import argparse
settings = Namespace()
#default settings.
settings.metachar = '!'
settings.chat_prefix = '#'
settings.chat_color = 'f' # white
settings.log_level = 'INFO'
settings.console = True
settings.gui = False
|
twoolie/Minecraft-Tools
|
src/minecraft/conf.py
|
Python
|
mit
| 227
|
from . import queue_monitor
if __name__ == '__main__':
queue_monitor.main()
|
gabfl/redis-priority-queue
|
src/__main__.py
|
Python
|
mit
| 81
|
"""This is oldowan.genbank.parse."""
__all__ = ['parse_genbank']
from oldowan.genbank.constants import INCLUDE
from oldowan.genbank.utility import join_stripped_lines
import re
#
# PARSER
#
def parse_genbank(entry, include=INCLUDE):
"""Parse text of genbank entry into dictionary.
"""
include = set(include)
if not entry.startswith('LOCUS'):
raise TypeError('entry is not in genbank format')
hash = {}
if 'locus' in include:
loc_mo = RE_LOCUS.match(entry)
if loc_mo:
loc = loc_mo.group(0)
hash['locus'] = loc[12:28].strip()
hash['division'] = loc[64:67].strip()
hash['orientation'] = loc[55:63].strip()
hash['strandedness'] = loc[44:47].strip()
hash['nucleic_acid'] = loc[47:53].strip()
date_mo = RE_DATE.search(loc)
if date_mo:
hash['date'] = date_mo.group(0)
if 'definition' in include:
defn_mo = RE_DEFINITION.search(entry)
if defn_mo:
hash['definition'] = join_stripped_lines(defn_mo.group(1))
if 'version' in include:
vers_mo = RE_VERSION.search(entry)
if vers_mo:
hash['accession'] = vers_mo.group(1)
hash['version'] = vers_mo.group(2)
hash['gi'] = vers_mo.group(3)
if 'source' in include:
src_mo = RE_SOURCE.search(entry)
if src_mo:
hash['source'] = src_mo.group(1)
hash['organism'] = src_mo.group(2)
hash['taxonomy'] = RE_TAXONOMY_WHITESPACE.sub('; ', src_mo.group(3))
if 'features' in include:
fea_mo = RE_FEATURE.search(entry)
if fea_mo:
hash['features'] = extract_features(fea_mo.group(0))
elif 'source_feature_only' in include:
fea_mo = RE_FEATURE.search(entry)
if fea_mo:
hash['features'] = extract_source_feature(fea_mo.group(0))
if 'sequence' in include:
ori_mo = RE_SEQUENCE.search(entry)
if ori_mo:
ori = ori_mo.group(1)
# remove the first line from the origin string
frst_line_mo = RE_FIRST_LINE.match(ori)
ori = frst_line_mo.group(1)
# strip out numbers and spaces
ori = RE_NUMBERS_AND_WHITESPACE.sub('', ori)
hash['sequence'] = ori
else:
hash['sequence'] = ''
return hash
#
# PRIVATE UTILITY FUNCTIONS USED IN PARSER
#
# These functions are implementation details and should not be used outside of
# this parser. There is no guarantee that any of these will be maintained or
# necessarily function the same as the parser evolves. The call signature and
# return values of the 'parse_genbank' function are the only supported public
# interface.
#
def extract_features(raw_features):
features = RE_FEATURE_SPLITTER.split(raw_features)
# first list entry after the split is the FEATURES header line
# throw it away
features.pop(0)
return [parse_feature(x) for x in features]
def extract_source_feature(raw_features):
source = []
features = RE_FEATURE_SPLITTER.split(raw_features)
# if there are any features at all, then after the split
# index 0 will hold the FEATURES header line
# and the first actual feature at index 1 will be the 'source' feature
if len(features) > 1:
return [parse_feature(features[1])]
return None
def parse_feature(feature):
parsed = []
# split up the feature; sub-entry ids are prefixed by '/'
feature_entries = feature.split('/')
# the first sub-entry of any feature is its position
position_line = feature_entries.pop(0)
# NOTE this assumes a simple feature position
# will probably break on complex positions
# TODO make more robust to complex positions
position_entries = position_line.split()
feature_name = position_entries[0]
position = position_entries[1]
parsed.append(position)
# for all other sub-entries, split by '='
# with a maximum split of 1
entries = [x.split('=', 1) for x in feature_entries]
for x in entries:
if 1 == len(x):
parsed.append((clean_up(x[0]), True))
elif 2 == len(x):
parsed.append((clean_up(x[0]), clean_up(x[1])))
return (feature_name, parsed)
def clean_up(strng):
"""Strip excess whitespace and de-quote a Genbank feature value string."""
strng = strng.strip()
# remove indentation whitespace (if this is a multiline string)
strng = RE_FEATURE_INDENT.sub('', strng)
# remove opening and closing "s if present
quote_mo = RE_QUOTED.match(strng)
if quote_mo:
strng = quote_mo.group(1)
return strng
#
# REGULAR EXPRESSIONS USED IN PARSER
#
RE_LOCUS = re.compile(r"^LOCUS[^\r\n]*")
"""Match opening LOCUS line from full entry text."""
#: Match date in LOCUS line
RE_DATE = re.compile(r"\d\d-[A-Z][A-Z][A-Z]-\d\d\d\d")
#: Match DEFINITION line from full entry text
RE_DEFINITION = re.compile(r"[\r\n]DEFINITION(.*?)[\r\n]\S")
#: Match VERSION line from full entry text
RE_VERSION = re.compile(r"[\r\n]VERSION (.+?)\.(\d)\s+GI:(\d+)\s*?[\r\n]\S")
#: Match SOURCE line in main body from full entry text
RE_SOURCE = re.compile(r"[\r\n]SOURCE ([^\r\n]+)[\r\n] ORGANISM\s+([^\r\n]+)\s+(.*?)\S", re.S)
#: Match complete REFERENCE block from full entry text
#RE_REFERENCE = re.compile(r"[\r\n](REFERENCE.*?)[\r\n]\S", re.S)
#: Match complete COMMENT block from full entry text
#RE_COMMENT = re.compile(r"[\r\n](COMMENT.*?)[\r\n]\S", re.S)
#: Match complete FEATURE block from full entry text
RE_FEATURE = re.compile(r"[\r\n](FEATURES.*?)[\r\n]\S", re.S)
#: Match indent inbetween feature entries in FEATURE block
RE_FEATURE_SPLITTER \
= re.compile(r"[\r\n] (?=\S)")
#: Match SEGMENT block from full entry text
#RE_SEGMENT = re.compile(r"[\r\n](SEGMENT.*?)[\r\n]\S")
#: Match CONTIG block from full entry text
#RE_CONTIG = re.compile(r"[\r\n](CONTIG.*?)[\r\n]\S")
#: Match ORIGIN block (contains the sequence) from full entry text
RE_SEQUENCE = re.compile(r"[\r\n](ORIGIN.*?)[\r\n]\S", re.S)
#: Match "quoted" text, capturing the inner text
RE_QUOTED = re.compile(r'^"(.*?)"$')
#: Match the 21 space indent in multiline feature entries
RE_FEATURE_INDENT \
= re.compile(r'[\r\n] {21}')
#: Match semi-colon with whitespace in taxonomy listing.
RE_TAXONOMY_WHITESPACE \
= re.compile(r";\s+")
#: Match a single line (used here to remove the ORIGIN line from the sequence)
RE_FIRST_LINE = re.compile(r"^[^\r\n]+(.*)", re.S)
#: Match numbers and whitespace (used here to clear out all formatting from
#: the sequence in the ORIGIN block)
RE_NUMBERS_AND_WHITESPACE \
= re.compile(r"[\d\s]")
|
ryanraaum/oldowan.genbank
|
oldowan/genbank/parse.py
|
Python
|
mit
| 6,831
|
"""These are the actual Sphinx directives we provide, but they are skeletal.
The real meat is in their parallel renderer classes, in renderers.py. The split
is due to the unfortunate trick we need here of having functions return the
directive classes after providing them the ``app`` symbol, where we store the
JSDoc output, via closure. The renderer classes, able to be top-level classes,
can access each other and collaborate.
"""
from docutils.parsers.rst import Directive
from docutils.parsers.rst.directives import flag
from .renderers import AutoFunctionRenderer, AutoClassRenderer, AutoAttributeRenderer
class JsDirective(Directive):
"""Abstract directive which knows how to pull things out of JSDoc output"""
has_content = True
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = True
option_spec = {
'short-name': flag
}
def auto_function_directive_bound_to_app(app):
class AutoFunctionDirective(JsDirective):
"""js:autofunction directive, which spits out a js:function directive
Takes a single argument which is a JS function name combined with an
optional formal parameter list, all mashed together in a single string.
"""
def run(self):
return AutoFunctionRenderer.from_directive(self, app).rst_nodes()
return AutoFunctionDirective
def auto_class_directive_bound_to_app(app):
class AutoClassDirective(JsDirective):
"""js:autoclass directive, which spits out a js:class directive
Takes a single argument which is a JS class name combined with an
optional formal parameter list for the constructor, all mashed together
in a single string.
"""
option_spec = JsDirective.option_spec.copy()
option_spec.update({
'members': lambda members: ([m.strip() for m in members.split(',')]
if members else []),
'exclude-members': _members_to_exclude,
'private-members': flag})
def run(self):
return AutoClassRenderer.from_directive(self, app).rst_nodes()
return AutoClassDirective
def auto_attribute_directive_bound_to_app(app):
class AutoAttributeDirective(JsDirective):
"""js:autoattribute directive, which spits out a js:attribute directive
Takes a single argument which is a JS attribute name.
"""
def run(self):
return AutoAttributeRenderer.from_directive(self, app).rst_nodes()
return AutoAttributeDirective
def _members_to_exclude(arg):
"""Return a set of members to exclude given a comma-delim list them.
Exclude none if none are passed. This differs from autodocs' behavior,
which excludes all. That seemed useless to me.
"""
return set(a.strip() for a in (arg or '').split(','))
|
erikrose/sphinx-js
|
sphinx_js/directives.py
|
Python
|
mit
| 2,863
|
from django.db import models
from django.db.models.signals import post_save
from django.contrib.auth.models import User
class UserProfile(models.Model):
user = models.ForeignKey(User, unique=True)
luckyday = models.DateField('lucky day', blank=True, null=True)
def __unicode__(self):
return u'Profile of user: %s' % self.user.username
def user_post_save(sender, instance, created, **kwargs):
"""
create a user profile when a new account is created
"""
if created == True:
UserProfile.objects.create(user=instance)
post_save.connect(user_post_save, sender=User)
|
youtaya/mothertree
|
monthertree/muser/models.py
|
Python
|
mit
| 576
|
from common import *
import unittest
import run4v
run4v.VERBOSE = False
class TestVaspFile(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_createFile(self):
fileName = "TEST_VASPFILE"
vaspFile = run4v.VASPFile(fileName = fileName, autogen = False, verbose=True)
vaspFile.createFile()
self.assertTrue(vaspFile.exists())
self.assertTrue(vaspFile.rm())
def test_raw_content(self):
fileName = "TEST_VASPFILE_RAW_CONTENT"
raw_content = "Hello folks"
vaspFile = run4v.VASPFile(raw_content = raw_content, fileName = fileName, autogen = True, verbose=True)
fd = open(fileName)
self.assertTrue(vaspFile.exists())
self.assertTrue(raw_content in fd.read())
fd.close()
self.assertTrue(vaspFile.rm())
class TestIncar(TestVaspFile):
def setUp(self):
pass
def test_getContents(self):
settings = {'ISPIN':'2', 'IBRION':2, 'ENCUT' : '400'}
fileName = 'TEST_INCAR'
vaspFile = run4v.INCAR(settings, fileName = fileName)
vaspFile.createFile()
contents = vaspFile.getContents()
expectedContents = "ISPIN=2\nIBRION=2\nENCUT=400"
self.assertTrue(expectedContents in contents)
self.assertTrue(vaspFile.rm())
def test_getContents_with_false_value(self):
settings = {'ISPIN':'2', 'IBRION':2, 'ENCUT' :False}
fileName = 'TEST_INCAR_WITHOUT_ENCUT'
vaspFile = run4v.INCAR(settings, fileName = fileName)
vaspFile.createFile()
contents = vaspFile.getContents()
self.assertTrue("ENCUT" not in contents)
self.assertTrue(vaspFile.rm())
|
alejandrogallo/run4v
|
run4v/tests/test_vaspfiles.py
|
Python
|
mit
| 1,712
|
# -*- coding: utf-8 -*-
#
# GopPy documentation build configuration file, created by
# sphinx-quickstart on Sun Mar 16 15:40:24 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'matplotlib.sphinxext.plot_directive',
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.mathjax',
'numpydoc'
]
numpydoc_show_class_members = False
intersphinx_mapping = {
'numpy': ('http://docs.scipy.org/doc/numpy/', None)
}
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'GopPy'
copyright = u'2014, Jan Gosmann'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.0'
# The full version, including alpha/beta/rc tags.
release = '1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'pyramid'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'GopPydoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'GopPy.tex', u'GopPy Documentation',
u'Jan Gosmann', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'goppy', u'GopPy Documentation',
[u'Jan Gosmann'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'GopPy', u'GopPy Documentation',
u'Jan Gosmann', 'GopPy', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
|
jgosmann/goppy
|
doc/conf.py
|
Python
|
mit
| 8,469
|
"""
Example of a manual_report() function that can be used either as a context manager
(with statement), or a decorator, to manually add entries to Locust's statistics.
Usage as a context manager:
with manual_report("stats entry name"):
# Run time of this block will be reported under a stats entry called "stats entry name"
# do stuff here, if an Exception is raised, it'll be reported as a failure
Usage as a decorator:
@task
@manual_report
def my_task(self):
# The run time of this task will be reported under a stats entry called "my task" (type "manual").
# If an Exception is raised, it'll be reported as a failure
"""
import random
from contextlib import contextmanager, ContextDecorator
from time import time, sleep
from locust import User, task, constant, events
@contextmanager
def _manual_report(name):
start_time = time()
try:
yield
except Exception as e:
events.request.fire(
request_type="manual",
name=name,
response_time=(time() - start_time) * 1000,
response_length=0,
exception=e,
)
raise
else:
events.request.fire(
request_type="manual",
name=name,
response_time=(time() - start_time) * 1000,
response_length=0,
exception=None,
)
def manual_report(name_or_func):
if callable(name_or_func):
# used as decorator without name argument specified
return _manual_report(name_or_func.__name__)(name_or_func)
else:
return _manual_report(name_or_func)
class MyUser(User):
wait_time = constant(1)
@task
def successful_task(self):
with manual_report("successful_task"):
sleep(random.random())
@task
@manual_report
def decorator_test(self):
if random.random() > 0.5:
raise Exception("decorator_task failed")
sleep(random.random())
@task
def failing_task(self):
with manual_report("failing_task"):
sleep(random.random())
raise Exception("Oh nooes!")
|
locustio/locust
|
examples/manual_stats_reporting.py
|
Python
|
mit
| 2,145
|
import h5py
class AuthException(Exception):
pass
class NotFoundException(Exception):
pass
class LoginError(Exception):
def __init__(self, message, redirect_url=None):
super(LoginError, self).__init__(message)
self.redirect_url = redirect_url
def validate_file(path: str) -> bool:
"""
Check if the file is an HDF5 file
Modify this to allow for other file types!
(Note some records can be text or arbitrary binary files but we don't need to validate them)
:param path:
:return:
"""
return h5py.is_hdf5(path)
|
BiRG/Omics-Dashboard
|
omics/omics_dashboard/data_tools/util.py
|
Python
|
mit
| 574
|
"""
<Program Name>
util.py
<Author>
Konstantin Andrianov
<Started>
March 24, 2012. Derived from original util.py written by Geremy Condra.
<Copyright>
See LICENSE for licensing information.
<Purpose>
Provides utility services. This module supplies utility functions such as:
get_file_details() that computes the length and hash of a file, import_json
that tries to import a working json module, load_json_* functions, etc.
"""
import json
import os
import logging
from securesystemslib import exceptions
from securesystemslib import formats
from securesystemslib.hash import digest_fileobject
from securesystemslib.storage import FilesystemBackend, StorageBackendInterface
from typing import Any, Dict, IO, List, Optional, Sequence, Tuple, Union
logger = logging.getLogger(__name__)
def get_file_details(
filepath: str,
hash_algorithms: List[str] = ['sha256'],
storage_backend: Optional[StorageBackendInterface] = None
) -> Tuple[int, Dict[str, str]]:
"""
<Purpose>
To get file's length and hash information. The hash is computed using the
sha256 algorithm. This function is used in the signerlib.py and updater.py
modules.
<Arguments>
filepath:
Absolute file path of a file.
hash_algorithms:
A list of hash algorithms with which the file's hash should be computed.
Defaults to ['sha256']
storage_backend:
An object which implements
securesystemslib.storage.StorageBackendInterface. When no object is
passed a FilesystemBackend will be instantiated and used.
<Exceptions>
securesystemslib.exceptions.FormatError: If hash of the file does not match
HASHDICT_SCHEMA.
securesystemslib.exceptions.StorageError: The file at "filepath" cannot be
opened or found.
<Returns>
A tuple (length, hashes) describing 'filepath'.
"""
# Making sure that the format of 'filepath' is a path string.
# 'securesystemslib.exceptions.FormatError' is raised on incorrect format.
formats.PATH_SCHEMA.check_match(filepath)
formats.HASHALGORITHMS_SCHEMA.check_match(hash_algorithms)
if storage_backend is None:
storage_backend = FilesystemBackend()
file_length = get_file_length(filepath, storage_backend)
file_hashes = get_file_hashes(filepath, hash_algorithms, storage_backend)
return file_length, file_hashes
def get_file_hashes(
filepath: str,
hash_algorithms: List[str] = ['sha256'],
storage_backend: Optional[StorageBackendInterface] = None
) -> Dict[str, str]:
"""
<Purpose>
Compute hash(es) of the file at filepath using each of the specified
hash algorithms. If no algorithms are specified, then the hash is
computed using the SHA-256 algorithm.
<Arguments>
filepath:
Absolute file path of a file.
hash_algorithms:
A list of hash algorithms with which the file's hash should be computed.
Defaults to ['sha256']
storage_backend:
An object which implements
securesystemslib.storage.StorageBackendInterface. When no object is
passed a FilesystemBackend will be instantiated and used.
<Exceptions>
securesystemslib.exceptions.FormatError: If hash of the file does not match
HASHDICT_SCHEMA.
securesystemslib.exceptions.StorageError: The file at "filepath" cannot be
opened or found.
<Returns>
A dictionary conforming to securesystemslib.formats.HASHDICT_SCHEMA
containing information about the hashes of the file at "filepath".
"""
# Making sure that the format of 'filepath' is a path string.
# 'securesystemslib.exceptions.FormatError' is raised on incorrect format.
formats.PATH_SCHEMA.check_match(filepath)
formats.HASHALGORITHMS_SCHEMA.check_match(hash_algorithms)
if storage_backend is None:
storage_backend = FilesystemBackend()
file_hashes = {}
with storage_backend.get(filepath) as fileobj:
# Obtaining hash of the file.
for algorithm in hash_algorithms:
digest_object = digest_fileobject(fileobj, algorithm)
file_hashes.update({algorithm: digest_object.hexdigest()})
# Performing a format check to ensure 'file_hash' corresponds HASHDICT_SCHEMA.
# Raise 'securesystemslib.exceptions.FormatError' if there is a mismatch.
formats.HASHDICT_SCHEMA.check_match(file_hashes)
return file_hashes
def get_file_length(
filepath: str,
storage_backend: Optional[StorageBackendInterface] = None
) -> int:
"""
<Purpose>
To get file's length information.
<Arguments>
filepath:
Absolute file path of a file.
storage_backend:
An object which implements
securesystemslib.storage.StorageBackendInterface. When no object is
passed a FilesystemBackend will be instantiated and used.
<Exceptions>
securesystemslib.exceptions.StorageError: The file at "filepath" cannot be
opened or found.
<Returns>
The length, in bytes, of the file at 'filepath'.
"""
# Making sure that the format of 'filepath' is a path string.
# 'securesystemslib.exceptions.FormatError' is raised on incorrect format.
formats.PATH_SCHEMA.check_match(filepath)
if storage_backend is None:
storage_backend = FilesystemBackend()
return storage_backend.getsize(filepath)
def persist_temp_file(
temp_file: IO,
persist_path: str,
storage_backend: Optional[StorageBackendInterface] = None,
should_close: bool = True
) -> None:
"""
<Purpose>
Copies 'temp_file' (a file like object) to a newly created non-temp file at
'persist_path'.
<Arguments>
temp_file:
File object to persist, typically a file object returned by one of the
interfaces in the tempfile module of the standard library.
persist_path:
File path to create the persistent file in.
storage_backend:
An object which implements
securesystemslib.storage.StorageBackendInterface. When no object is
passed a FilesystemBackend will be instantiated and used.
should_close:
A boolean indicating whether the file should be closed after it has been
persisted. Default is True, the file is closed.
<Exceptions>
securesystemslib.exceptions.StorageError: If file cannot be written.
<Return>
None.
"""
if storage_backend is None:
storage_backend = FilesystemBackend()
storage_backend.put(temp_file, persist_path)
if should_close:
temp_file.close()
def ensure_parent_dir(
filename: str,
storage_backend: Optional[StorageBackendInterface] = None
) -> None:
"""
<Purpose>
To ensure existence of the parent directory of 'filename'. If the parent
directory of 'name' does not exist, create it.
Example: If 'filename' is '/a/b/c/d.txt', and only the directory '/a/b/'
exists, then directory '/a/b/c/d/' will be created.
<Arguments>
filename:
A path string.
storage_backend:
An object which implements
securesystemslib.storage.StorageBackendInterface. When no object is
passed a FilesystemBackend will be instantiated and used.
<Exceptions>
securesystemslib.exceptions.FormatError: If 'filename' is improperly
formatted.
securesystemslib.exceptions.StorageError: When folder cannot be created.
<Side Effects>
A directory is created whenever the parent directory of 'filename' does not
exist.
<Return>
None.
"""
# Ensure 'filename' corresponds to 'PATH_SCHEMA'.
# Raise 'securesystemslib.exceptions.FormatError' on a mismatch.
formats.PATH_SCHEMA.check_match(filename)
if storage_backend is None:
storage_backend = FilesystemBackend()
# Split 'filename' into head and tail, check if head exists.
directory = os.path.split(filename)[0]
# Check for cases where filename is without directory like 'file.txt'
# and as a result directory is an empty string
if directory:
storage_backend.create_folder(directory)
def file_in_confined_directories(
filepath: str,
confined_directories: Sequence[str]
) -> bool:
"""
<Purpose>
Check if the directory containing 'filepath' is in the list/tuple of
'confined_directories'.
<Arguments>
filepath:
A string representing the path of a file. The following example path
strings are viewed as files and not directories: 'a/b/c', 'a/b/c.txt'.
confined_directories:
A sequence (such as list, or tuple) of directory strings.
<Exceptions>
securesystemslib.exceptions.FormatError: On incorrect format of the input.
<Return>
Boolean. True, if path is either the empty string
or in 'confined_paths'; False, otherwise.
"""
# Do the arguments have the correct format?
# Raise 'securesystemslib.exceptions.FormatError' if there is a mismatch.
formats.PATH_SCHEMA.check_match(filepath)
formats.NAMES_SCHEMA.check_match(confined_directories)
for confined_directory in confined_directories:
# The empty string (arbitrarily chosen) signifies the client is confined
# to all directories and subdirectories. No need to check 'filepath'.
if confined_directory == '':
return True
# Normalized paths needed, to account for up-level references, etc.
# callers have the option of setting the list of directories in
# 'confined_directories'.
filepath = os.path.normpath(filepath)
confined_directory = os.path.normpath(confined_directory)
# A caller may restrict himself to specific directories on the
# remote repository. The list of paths in 'confined_path', not including
# each path's subdirectories, are the only directories the client will
# download targets from.
if os.path.dirname(filepath) == confined_directory:
return True
return False
def load_json_string(data: Union[str, bytes]) -> Any:
"""
<Purpose>
Deserialize 'data' (JSON string) to a Python object.
<Arguments>
data:
A JSON string.
<Exceptions>
securesystemslib.exceptions.Error, if 'data' cannot be deserialized to a
Python object.
<Side Effects>
None.
<Returns>
Deserialized object. For example, a dictionary.
"""
deserialized_object = None
try:
deserialized_object = json.loads(data)
except TypeError:
message = 'Invalid JSON string: ' + repr(data)
raise exceptions.Error(message)
except ValueError:
message = 'Cannot deserialize to a Python object: ' + repr(data)
raise exceptions.Error(message)
else:
return deserialized_object
def load_json_file(
filepath: str,
storage_backend: Optional[StorageBackendInterface] = None
) -> Any:
"""
<Purpose>
Deserialize a JSON object from a file containing the object.
<Arguments>
filepath:
Absolute path of JSON file.
storage_backend:
An object which implements
securesystemslib.storage.StorageBackendInterface. When no object is
passed a FilesystemBackend will be instantiated and used.
<Exceptions>
securesystemslib.exceptions.FormatError: If 'filepath' is improperly
formatted.
securesystemslib.exceptions.Error: If 'filepath' cannot be deserialized to
a Python object.
securesystemslib.exceptions.StorageError: If file cannot be loaded.
IOError in case of runtime IO exceptions.
<Side Effects>
None.
<Return>
Deserialized object. For example, a dictionary.
"""
# Making sure that the format of 'filepath' is a path string.
# securesystemslib.exceptions.FormatError is raised on incorrect format.
formats.PATH_SCHEMA.check_match(filepath)
if storage_backend is None:
storage_backend = FilesystemBackend()
deserialized_object = None
with storage_backend.get(filepath) as file_obj:
raw_data = file_obj.read().decode('utf-8')
try:
deserialized_object = json.loads(raw_data)
except (ValueError, TypeError):
raise exceptions.Error('Cannot deserialize to a'
' Python object: ' + filepath)
else:
return deserialized_object
def digests_are_equal(digest1: str, digest2: str) -> bool:
"""
<Purpose>
While protecting against timing attacks, compare the hexadecimal arguments
and determine if they are equal.
<Arguments>
digest1:
The first hexadecimal string value to compare.
digest2:
The second hexadecimal string value to compare.
<Exceptions>
securesystemslib.exceptions.FormatError: If the arguments are improperly
formatted.
<Side Effects>
None.
<Return>
Return True if 'digest1' is equal to 'digest2', False otherwise.
"""
# Ensure the arguments have the appropriate number of objects and object
# types, and that all dict keys are properly named.
# Raise 'securesystemslib.exceptions.FormatError' if there is a mismatch.
formats.HEX_SCHEMA.check_match(digest1)
formats.HEX_SCHEMA.check_match(digest2)
if len(digest1) != len(digest2):
return False
are_equal = True
for element in range(len(digest1)):
if digest1[element] != digest2[element]:
are_equal = False
return are_equal
|
secure-systems-lab/securesystemslib
|
securesystemslib/util.py
|
Python
|
mit
| 12,971
|