repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
|---|---|---|---|---|---|---|---|---|
gandrewstone/yadog
|
PyHtmlGen/json.py
|
Python
|
gpl-3.0
| 2,038
| 0.034838
|
from htmldoc import *
from chunk import *
from module import *
from js import *
#? This global variable is used to specify where the javascript files are on the server.
JavascriptDir = "./"
#<_ view="internal">A Marker showing where the RPCs are installed</_>
rpcim = Marker("js")
rpcs=["""
<script language='JavaScript'>
var server = {};
// RPC calls are installed here
""", rpcim,"// End RPC call installation.\n</script>\n"]
#? The json module is defined
jsonModule = Module("json",jsm,[("head",["<script language='JavaScript' src='%sjson2.js'>1;</script>\n<script language='JavaScript' src='%sjsonreq.js'>1;</script>\n" % (JavascriptDir,JavascriptDir)] + rpcs) ])
class DefineRpc:
def __init__(self,rpcname):
self.name = rpcname
def call(self,*jsargs):
args = ",".join([str(x) for x in jsargs])
return "server.%s(%s)" % (self.name,args)
def gen(self,doc):
doc.AddModule(jsModule,LocationBoth)
doc.AddModule(jsonModule,LocationBoth)
doc.Insert("InstallFunction(server, '%s');\n" % self.name,rpcim)
def actionDynGet(element,uri):
eid = None
try: # If its a chunk then use the id.
eid = element.getId()
except: # Otherwise assume that the user is passing the id in
eid = str(element)
return "ReplaceChildrenWithUri('%s','%s');" % (eid,str(uri))
def actionDynGetScript(element,uri,js):
eid = None
try: # If its a chunk then use the id.
eid = element.getId()
except: # Otherwise assume that the user is passing the id in
eid = str(element)
return "ReplaceChildrenWithUri('%s','%s'); LoadScript('%s','%s');" % (eid,str(uri),eid + "script", js)
#<example>
def Test():
import gen
from attribute import *
cdlt = Chunk("Click for Dynamic load test")
replaceme = Chunk("this will be replaced")
action(cdlt,"onClick",actionDynGet(replaceme,"testjsondyn.html"))
rpc = DefineRpc("rpctest")
b1 = Chunk("RPC")
action(b1,"onClick",rpc.call("'arg1'",5))
d = [cdlt,replaceme,rpc,b1]
|
gen.WriteFile("testjson.html",d)
#</examp
|
le>
|
commtrack/commtrack-core
|
apps/django_tables/tests/test_templates.py
|
Python
|
bsd-3-clause
| 5,249
| 0.004585
|
"""Test template specific functionality.
Make sure tables expose their functionality to templates right. This
generally about testing "out"-functionality of the tables, whether
via templates or otherwise. Whether a test belongs here or, say, in
``test_basic``, is not always a clear-cut decision.
"""
from django.template import Template, Context, add_to_builtins
from django.http import HttpRequest
import django_tables as tables
def test_order_by():
class BookTable(tables.Table):
id = tables.Column()
name = tables.Column()
books = BookTable([
{'id': 1, 'name': 'Foo: Bar'},
])
# cast to a string we get a value ready to be passed to the querystring
books.order_by = ('name',)
assert str(books.order_by) == 'name'
books.order_by = ('name', '-id')
assert str(books.order_by) == 'name,-id'
def test_columns_and_rows():
class CountryTable(tables.Table):
name = tables.TextColumn()
capital = tables.TextColumn(sortable=False)
population = tables.NumberColumn(verbose_name="Population Size")
currency = tables.NumberColumn(visible=False, inaccessible=True)
tld = tables.TextColumn(visible=False, verbose_name="Domain")
calling_code = tables.NumberColumn(name="cc", verbose_name="Phone Ext.")
countries = CountryTable(
[{'name': 'Germany', 'capital': 'Berlin', 'population': 83, 'currency': 'Euro (€)', 'tld': 'de', 'cc': 49},
{'name': 'France', 'population': 64, 'currency': 'Euro (€)', 'tld': 'fr', 'cc': 33},
{'name': 'Netherlands', 'capital': 'Amsterdam', 'cc': '31'},
{'name': 'Austria', 'cc': 43, 'currency': 'Euro (€)', 'population': 8}])
assert len(list(countries.columns)) == 4
assert len(list(countries.rows)) == len(list(countries)) == 4
# column name override, hidden columns
assert [c.name for c in countries.columns] == ['name', 'capital', 'population', 'cc']
# verbose_name, and fallback to field name
assert [unicode(c) for c in countries.columns] == ['Name', 'Capital', 'Population Size', 'Phone Ext.']
# data yielded by each row matches the defined columns
for row in countries.rows:
assert len(list(row)) == len(list(countries.columns))
# we can access each column and row by name...
assert countries.columns['population'].column.verbose_name == "Population Size"
assert countries.columns['cc'].column.verbose_name == "Phone Ext."
# ...even invisible ones
assert countries.columns['tld'].column.verbose_name == "Domain"
# ...and even inaccessible ones (but accessible to the coder)
assert countries.columns['currency'].column == countries.base_columns['currency']
# this also works for rows
for row in countries
|
:
row['tld'], row['cc'], row['population']
# certain data is available on columns
assert countries.columns['currency'].sortable == True
assert countries.columns['capital'].sortable == F
|
alse
assert countries.columns['name'].visible == True
assert countries.columns['tld'].visible == False
def test_render():
"""For good measure, render some actual templates."""
class CountryTable(tables.Table):
name = tables.TextColumn()
capital = tables.TextColumn()
population = tables.NumberColumn(verbose_name="Population Size")
currency = tables.NumberColumn(visible=False, inaccessible=True)
tld = tables.TextColumn(visible=False, verbose_name="Domain")
calling_code = tables.NumberColumn(name="cc", verbose_name="Phone Ext.")
countries = CountryTable(
[{'name': 'Germany', 'capital': 'Berlin', 'population': 83, 'currency': 'Euro (€)', 'tld': 'de', 'calling_code': 49},
{'name': 'France', 'population': 64, 'currency': 'Euro (€)', 'tld': 'fr', 'calling_code': 33},
{'name': 'Netherlands', 'capital': 'Amsterdam', 'calling_code': '31'},
{'name': 'Austria', 'calling_code': 43, 'currency': 'Euro (€)', 'population': 8}])
assert Template("{% for column in countries.columns %}{{ column }}/{{ column.name }} {% endfor %}").\
render(Context({'countries': countries})) == \
"Name/name Capital/capital Population Size/population Phone Ext./cc "
assert Template("{% for row in countries %}{% for value in row %}{{ value }} {% endfor %}{% endfor %}").\
render(Context({'countries': countries})) == \
"Germany Berlin 83 49 France None 64 33 Netherlands Amsterdam None 31 Austria None 8 43 "
print Template("{% for row in countries %}{% if countries.columns.name.visible %}{{ row.name }} {% endif %}{% if countries.columns.tld.visible %}{{ row.tld }} {% endif %}{% endfor %}").\
render(Context({'countries': countries})) == \
"Germany France Netherlands Austria"
def test_templatetags():
add_to_builtins('django_tables.app.templatetags.tables')
# [bug] set url param tag handles an order_by tuple with multiple columns
class MyTable(tables.Table):
f1 = tables.Column()
f2 = tables.Column()
t = Template('{% set_url_param x=table.order_by %}')
table = MyTable([], order_by=('f1', 'f2'))
assert t.render({'request': HttpRequest(), 'table': table}) == '?x=f1%2Cf2'
|
chubbymaggie/datasketch
|
benchmark/lshensemble_benchmark.py
|
Python
|
mit
| 10,558
| 0.004262
|
"""
Benchmark dataset from:
https://github.com/ekzhu/set-similarity-search-benchmark.
Use "Canada US and UK Open Data":
Indexed sets: canada_us_uk_opendata.inp.gz
Query sets (10 stratified samples from 10 percentile intervals):
Size from 10 - 1k: canada_us_uk_opendata_queries_1k.inp.gz
Size from 10 - 10k: canada_us_uk_opendata_queries_10k.inp.gz
Size from 10 - 100k: canada_us_uk_opendata_queries_100k.inp.gz
"""
import time, argparse, sys, json
import numpy as np
import scipy.stats
import random
import collections
import gzip
import random
import os
import pickle
import pandas as pd
from SetSimilaritySearch import SearchIndex
import farmhash
from datasketch import MinHashLSHEnsemble, MinHash
def _hash_32(d):
return farmhash.hash32(d)
def bootstrap_sets(sets_file, sample_ratio, num_perms, skip=1,
pad_for_asym=False):
print("Creating sets...")
sets = collections.deque([])
random.seed(41)
with gzip.open(sets_file, "rt") as f:
for i, line in enumerate(f):
if i < skip:
# Skip lines
continue
if random.random() > sample_ratio:
continue
s = np.array([int(d) for d in \
line.strip().split("\t")[1].split(",")])
sets.append(s)
sys.stdout.write("\rRead {} sets".format(len(sets)))
sys.stdout.write("\n")
sets = list(sets)
keys = list(range(len(sets)))
# Generate paddings for asym.
max_size = max(len(s) for s in sets)
paddings = dict()
if pad_for_asym:
padding_sizes = sorted(list(set([max_size-len(s) for s in sets])))
for num_perm in num_perms:
paddings[num_perm] = dict()
for i, padding_size in enumerate(padding_sizes):
if i == 0:
prev_size = 0
pad = MinHash(num_perm, hashfunc=_hash_32)
else:
prev_size = padding_sizes[i-1]
pad = paddings[num_perm][prev_size].copy()
for w in range(prev_size, padding_size):
pad.update(str(w)+"_tmZZRe8DE23s")
paddings[num_perm][padding_size] = pad
# Generate minhash
print("Creating MinHash...")
minhashes = dict()
for num_perm in num_perms:
print("Using num_parm = {}".format(num_perm))
ms = []
for s in sets:
m = MinHash(num_perm, hashfunc=_hash_32)
for word in s:
m.update(str(word))
if pad_for_asym:
# Add padding to the minhash
m.merge(paddings[num_perm][max_size-len(s)])
ms.append(m)
sys.stdout.write("\rMinhashed {} sets".format(len(ms)))
sys.stdout.write("\n")
minhashes[num_perm] = ms
return (minhashes, sets, keys)
def benchmark_lshensemble(threshold, num_perm, num_part, m, storage_config,
index_data, query_data):
print("Building LSH Ensemble index")
(minhashes, indexed_sets, keys) = index_data
lsh = MinHashLSHEnsemble(threshold=threshold, num_perm=num_perm,
num_part=num_part, m=m, storage_config=storage_config)
lsh.index((key, minhash, len(s))
for key, minhash, s in \
zip(keys, minhashes[num_perm], indexed_sets))
print("Querying")
(minhashes, sets, keys) = query_data
probe_times = []
process_times = []
results = []
for qs, minhash in zip(sets, minhashes[num_perm]):
# Record probing time
start = time.perf_counter()
result = list(lsh.query(minhash, len(qs)))
probe_times.append(time.perf_counter() - start)
# Record post processing time.
start = time.perf_counter()
[_compute_containment(qs, indexed_sets[key]) for key in result]
process_times.append(time.perf_counter() - start)
results.append(result)
sys.stdout.write("\rQueried {} sets".format(len(results)))
sys.stdout.write("\n")
return results, probe_times, process_times
def benchmark_ground_truth(threshold, index, query_data):
(_, query_sets, _) = query_data
times = []
results = []
for q in query_sets:
start = time.perf_counter()
result = [key for key, _ in index.query(q)]
duration = time.perf_counter() - start
times.append(duration)
results.append(result)
sys.stdout.write("\rQueried {} sets".format(len(results)))
sys.stdout.write("\n")
return results, times
def _compute_containment(x, y):
if len(x) == 0 or len(y) == 0:
return 0.0
intersection = len(np.intersect1d(x, y, assume_unique=True))
return float(intersection) / float(len(x))
levels = {
"test": {
"thresholds": [1.0,],
"num_parts": [4,],
"num_perms": [32,],
"m": 2,
},
"lite": {
"thresholds": [0.5, 0.75, 1.0],
"num_parts": [8, 16],
"num_perms": [32, 64],
"m": 8,
},
"medium": {
"thresholds": [0.5, 0.6, 0.7, 0.8, 0.9, 1.0],
"num_parts": [8, 16, 32],
"num_perms": [32, 128, 224],
"m": 8,
},
"complete": {
"thresholds": [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0],
"num_parts": [8, 16, 32],
"num_perms": [32, 64, 96, 128, 160, 192, 224, 256],
"m": 8,
},
}
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Run LSH Ensemble benchmark using data sets obtained "
"from https://github.com/ekzhu/set-similarity-search-benchmarks.")
parser.add_argument("--indexed-sets", type=str, required=True,
help="Input indexed set file (gzipped), each line is a set: "
"<set_size> <1>,<2>,<3>..., where each <?> is an element.")
parser.add_argument("--query-sets", type=str, required=True,
help="Input query set file (gzipped), each line is a set: "
"<set_size> <1>,<2>,<3>..., where each <?> is an element.")
parser.add_argument("--query-results", type=str,
default="lshensemble_benchmark_query_results.csv")
parser.add_argument("--ground-truth-results", type=str,
default="lshensemble_benchmark_ground_truth_results.csv")
parser.add_argument("--indexed-sets-sample-ratio", type=float, default=0.1)
parser.add_argument("--level", type=str, choices=levels.keys(),
default="complete")
parser.add_argument("--skip-ground-truth
|
", action="store_true")
parser.add_argument("--use-asym-minhash", action="store_true")
parser.add_argument("--use-redis", action="store_true")
parser.add_argument("--redis-host", type=str, default="localhost")
parser.add_argument("--redis-port", type=int, default=6379)
args = parser.parse_args(sys.argv[1:])
level = levels[args.level]
index_data, query_data = None, None
|
index_data_cache = "{}.pickle".format(args.indexed_sets)
query_data_cache = "{}.pickle".format(args.query_sets)
if os.path.exists(index_data_cache):
print("Using cached indexed sets {}".format(index_data_cache))
with open(index_data_cache, "rb") as d:
index_data = pickle.load(d)
else:
print("Using indexed sets {}".format(args.indexed_sets))
index_data = bootstrap_sets(args.indexed_sets,
args.indexed_sets_sample_ratio, num_perms=level["num_perms"],
pad_for_asym=args.use_asym_minhash)
with open(index_data_cache, "wb") as d:
pickle.dump(index_data, d)
if os.path.exists(query_data_cache):
print("Using cached query sets {}".format(query_data_cache))
with open(query_data_cache, "rb") as d:
query_data = pickle.load(d)
else:
print("Using query sets {}".format(args.query_sets))
query_data = bootstrap_sets(args.query_sets, 1.0,
num_perms=level["num_perms"], skip=0)
with open(query_data_cache, "wb") as d:
pickle.dump(query_data, d)
if not args.skip_ground_truth:
rows = []
# Build search index separately, only works for containm
|
DayGitH/Python-Challenges
|
DailyProgrammer/DP20120709A.py
|
Python
|
mit
| 2,709
| 0.005537
|
"""
The Fibonacci numbers, which we are all familiar with, start like this:
0,1,1,2,3,5,8,13,21,34,...
Where each new number in the sequence is the sum of the previous two.
It turns out that by summing different Fibonacci numbers with each other, you can create every single positive integer.
|
In fact, a much stronger statement holds:
Every single positive integer can be represented in one and only one way as a sum of non-consecutive Fibonacci numbers.
This is called the number's "Zeckendorf representation" [http://en.wikipedia.org/wiki/Zeckendorf%27s_theorem].
For instance, the Zeckendorf representation of the number 100 is 89 + 8 + 3, and the Zeckendorf representation of 1234
is 987 + 233 + 13 + 1. Note that all these n
|
umbers are Fibonacci numbers, and that they are non-consecutive (i.e. no
two numbers in a Zeckendorf representation can be next to each other in the Fibonacci sequence).
There are other ways of summing Fibonacci numbers to get these numbers. For instance, 100 is also equal to 89 + 5 + 3 +
2 + 1, but 1, 2, 3, 5 are all consecutive Fibonacci numbers. If no consecutive Fibonacci numbers are allowed, the
representation is unique.
Finding the Zeckendorf representation is actually not very hard. Lets use the number 100 as an example of how it's done:
First, you find the largest fibonacci number less than or equal to 100. In this case that is 89. This number will always
be of the representation, so we remember that number and proceed recursively, and figure out the representation of
100 - 89 = 11.
The largest Fibonacci number less than or equal to 11 is 8. We remember that number and proceed recursively with
11 - 8 = 3.
3 is a Fibonacci number itself, so now we're done. The answer is 89 + 8 + 3.
Write a program that finds the Zeckendorf representation of different numbers.
What is the Zeckendorf representation of 315 ?
Thanks to SwimmingPastaDevil for suggesting this problem in /r/dailyprogrammer_ideas! Do you have a problem you
think would be good for us? Why not head over there and post it?
"""
def zeckendorf(target, fib_list):
res = []
for f in fib_list[::-1]:
if f <= target:
res.append(f)
target -= f
return res
def get_fibonacci_list(target):
""" returns fibonacci numbers upto less than the target and not including zero"""
fib = [1, 1]
while fib[-1] < target:
fib.append(fib[-1] + fib[-2])
return fib[:-1]
def main():
target = 3**15
fib_list = get_fibonacci_list(target)
zeck = zeckendorf(target, fib_list)
print(zeck)
print(' 3**15 = {} \nsum of zeckendorf = {}'.format(3**15, sum(zeck)))
if __name__ == "__main__":
main()
|
thomasbilk/django-filer
|
filer/fields/file.py
|
Python
|
bsd-3-clause
| 5,490
| 0.002186
|
#-*- coding: utf-8 -*-
import inspect
from django import forms
from django.conf import settings as globalsettings
from django.contrib.admin.widgets import ForeignKeyRawIdWidget
from django.contrib.admin.sites import site
from django.core.exceptions import ImproperlyConfigured
from django.core.urlresolvers import reverse
from django.db import models
from django.template.loader import render_to_string
from django.utils.safestring import mark_safe
from filer.utils.compatibility import truncate_words
from filer.mod
|
els import File
from filer import settings
|
as filer_settings
import logging
logger = logging.getLogger(__name__)
class AdminFileWidget(ForeignKeyRawIdWidget):
choices = None
def render(self, name, value, attrs=None):
obj = self.obj_for_value(value)
css_id = attrs.get('id', 'id_image_x')
css_id_thumbnail_img = "%s_thumbnail_img" % css_id
css_id_description_txt = "%s_description_txt" % css_id
related_url = None
if value:
try:
file_obj = File.objects.get(pk=value)
related_url = file_obj.logical_folder.\
get_admin_directory_listing_url_path()
except Exception,e:
# catch exception and manage it. We can re-raise it for debugging
# purposes and/or just logging it, provided user configured
# proper logging configuration
if filer_settings.FILER_ENABLE_LOGGING:
logger.error('Error while rendering file widget: %s',e)
if filer_settings.FILER_DEBUG:
raise e
if not related_url:
related_url = reverse('admin:filer-directory_listing-last')
params = self.url_parameters()
if params:
lookup_url = '?' + '&'.join(
['%s=%s' % (k, v) for k, v in params.items()])
else:
lookup_url = ''
if not 'class' in attrs:
# The JavaScript looks for this hook.
attrs['class'] = 'vForeignKeyRawIdAdminField'
# rendering the super for ForeignKeyRawIdWidget on purpose here because
# we only need the input and none of the other stuff that
# ForeignKeyRawIdWidget adds
hidden_input = super(ForeignKeyRawIdWidget, self).render(
name, value, attrs)
filer_static_prefix = filer_settings.FILER_STATICMEDIA_PREFIX
if not filer_static_prefix[-1] == '/':
filer_static_prefix += '/'
context = {
'hidden_input': hidden_input,
'lookup_url': '%s%s' % (related_url, lookup_url),
'thumb_id': css_id_thumbnail_img,
'span_id': css_id_description_txt,
'object': obj,
'lookup_name': name,
'filer_static_prefix': filer_static_prefix,
'clear_id': '%s_clear' % css_id,
'id': css_id,
}
html = render_to_string('admin/filer/widgets/admin_file.html', context)
return mark_safe(html)
def label_for_value(self, value):
obj = self.obj_for_value(value)
return ' <strong>%s</strong>' % truncate_words(obj, 14)
def obj_for_value(self, value):
try:
key = self.rel.get_related_field().name
obj = self.rel.to._default_manager.get(**{key: value})
except:
obj = None
return obj
class Media:
js = (filer_settings.FILER_STATICMEDIA_PREFIX + 'js/popup_handling.js',)
class AdminFileFormField(forms.ModelChoiceField):
widget = AdminFileWidget
def __init__(self, rel, queryset, to_field_name, *args, **kwargs):
self.rel = rel
self.queryset = queryset
self.to_field_name = to_field_name
self.max_value = None
self.min_value = None
other_widget = kwargs.pop('widget', None)
if 'admin_site' in inspect.getargspec(self.widget.__init__)[0]: # Django 1.4
widget_instance = self.widget(rel, site)
else: # Django <= 1.3
widget_instance = self.widget(rel)
forms.Field.__init__(self, widget=widget_instance, *args, **kwargs)
def widget_attrs(self, widget):
widget.required = self.required
return {}
class FilerFileField(models.ForeignKey):
default_form_class = AdminFileFormField
default_model_class = File
def __init__(self, **kwargs):
# we call ForeignKey.__init__ with the Image model as parameter...
# a FilerImageFiled can only be a ForeignKey to a Image
return super(FilerFileField, self).__init__(
self.default_model_class, **kwargs)
def formfield(self, **kwargs):
# This is a fairly standard way to set up some defaults
# while letting the caller override them.
defaults = {
'form_class': self.default_form_class,
'rel': self.rel,
}
defaults.update(kwargs)
return super(FilerFileField, self).formfield(**defaults)
def south_field_triple(self):
"Returns a suitable description of this field for South."
# We'll just introspect ourselves, since we inherit.
from south.modelsinspector import introspector
field_class = "django.db.models.fields.related.ForeignKey"
args, kwargs = introspector(self)
# That's our definition!
return (field_class, args, kwargs)
|
0x00ach/zer0m0n
|
signatures/sniffer_winpcap.py
|
Python
|
gpl-3.0
| 1,246
| 0.00321
|
# Copyright (C) 2012 Thomas "stacks" Birn (@stacksth)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GN
|
U General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from lib.cuckoo.common.abstracts import Signature
class InstallsWinpcap(Signature):
name = "sniffer_winpcap"
description = "Installs WinPCAP"
severity = 3
categories = ["sniffer"]
authors = ["Thomas Birn", "n
|
ex"]
minimum = "0.5"
def run(self):
indicators = [
".*\\\\packet\.dll$",
".*\\\\npf\.sys$",
".*\\\\wpcap\.dll$"
]
for indicator in indicators:
if self.check_file(pattern=indicator, regex=True):
return True
return False
|
cantino/newspaper
|
newspaper/urls.py
|
Python
|
mit
| 9,141
| 0.004376
|
# -*- coding: utf-8 -*-
"""
Newspaper treats urls for news articles as critical components.
Hence, we have an entire module dedicated to them.
"""
__title__ = 'newspaper'
__author__ = 'Lucas Ou-Yang'
__license__ = 'MIT'
__copyright__ = 'Copyright 2014, Lucas Ou-Yang'
import logging
import re
from urlparse import (
urlparse, urljoin, urlsplit, urlunsplit, parse_qs)
from .packages.tldextract import tldextract
log = logging.getLogger(__name__)
MAX_FILE_MEMO = 20000
GOOD_PATHS = ['story', 'article', 'feature', 'featured', 'slides',
'slideshow', 'gallery', 'news', 'video', 'media',
'v', 'radio', 'press']
BAD_CHUNKS = ['careers', 'contact', 'about', 'faq', 'terms', 'privacy',
'advert', 'preferences', 'feedback', 'info', 'browse', 'howto',
'account', 'subscribe', 'donate', 'shop', 'admin']
BAD_DOMAINS = ['amazon', 'doubleclick', 'twitter']
def remove_args(url, keep_params=(), frags=False):
"""
Remove all param arguments from a url.
"""
parsed = urlsplit(url)
filtered_query= '&'.join(
qry_item for qry_item in parsed.query.split('&')
if qry_item.startswith(keep_params)
)
if frags:
frag = parsed[4:]
else:
frag = ('',)
return urlunsplit(parsed[:3] + (filtered_query,) + frag)
def redirect_back(url, source_domain):
"""
Some sites like Pinterest have api's that cause news
args to direct to their site with the real news url as a
GET param. This method catches that and returns our param.
"""
parse_data = urlparse(url)
domain = parse_data.netloc
query = parse_data.query
# If our url is even from a remotely similar domain or
# sub domain, we don't need to redirect.
if source_domain in domain or domain in source_domain:
return url
query_item = parse_qs(query)
if query_item.get('url'):
# log.debug('caught redirect %s into %s' % (url, query_item['url'][0]))
return query_item['url'][0]
return url
def prepare_url(url, source_url=None):
"""
Operations that purify a url, removes arguments,
redirects, and merges relatives with absolutes.
"""
try:
if source_url is not None:
source_domain = urlparse(source_url).netloc
|
proper_url = urljoin(source_url, url)
proper_url = redirect_back(proper_url, source_domain)
proper_url = remove_args(proper_url)
else:
proper_url = remove_args(url)
except ValueError, e:
log.critical('url %s failed on err %s' % (url, str(e)))
# print 'url %s failed on err %s' % (url, str(e))
proper_url = u''
return proper_url
def valid_url(url, verbose=False, test=False):
"""
Perform a regex che
|
ck on an absolute url.
First, perform a few basic checks like making sure the format of the url
is right, (scheme, domain, tld).
Second, make sure that the url isn't some static resource, check the
file type.
Then, search of a YYYY/MM/DD pattern in the url. News sites
love to use this pattern, this is a very safe bet.
Separators can be [\.-/_]. Years can be 2 or 4 digits, must
have proper digits 1900-2099. Months and days can be
ambiguous 2 digit numbers, one is even optional, some sites are
liberal with their formatting also matches snippets of GET
queries with keywords inside them. ex: asdf.php?topic_id=blahlbah
We permit alphanumeric, _ and -.
Our next check makes sure that a keyword is within one of the
separators in a url (subdomain or early path separator).
cnn.com/story/blah-blah-blah would pass due to "story".
We filter out articles in this stage by aggressively checking to
see if any resemblance of the source& domain's name or tld is
present within the article title. If it is, that's bad. It must
be a company link, like 'cnn is hiring new interns'.
We also filter out articles with a subdomain or first degree path
on a registered bad keyword.
"""
DATE_REGEX = r'([\./\-_]{0,1}(19|20)\d{2})[\./\-_]{0,1}(([0-3]{0,1}[0-9][\./\-_])|(\w{3,5}[\./\-_]))([0-3]{0,1}[0-9][\./\-]{0,1})?'
ALLOWED_TYPES = ['html', 'htm', 'md', 'rst'] # TODO add more!
# if we are testing this method in the testing suite, we actually
# need to preprocess the url like we do in the article's constructor!
if test:
url = prepare_url(url)
# 11 chars is shortest valid url length, eg: http://x.co
if url is None or len(url) < 11:
if verbose: print '\t%s rejected because len of url is less than 11' % url
return False
r1 = ('mailto:' in url) # TODO not sure if these rules are redundant
r2 = ('http://' not in url) and ('https://' not in url)
if r1 or r2:
if verbose: print '\t%s rejected because len of url structure' % url
return False
path = urlparse(url).path
# input url is not in valid form (scheme, netloc, tld)
if not path.startswith('/'):
return False
# the '/' which may exist at the end of the url provides us no information
if path.endswith('/'):
path = path[:-1]
# '/story/cnn/blahblah/index.html' --> ['story', 'cnn', 'blahblah', 'index.html']
path_chunks = [x for x in path.split('/') if len(x) > 0]
# siphon out the file type. eg: .html, .htm, .md
if len(path_chunks) > 0:
last_chunk = path_chunks[-1].split('.') # last chunk == file usually
file_type = last_chunk[-1] if len(last_chunk) >= 2 else None
# if the file type is a media type, reject instantly
if file_type and file_type not in ALLOWED_TYPES:
if verbose: print '\t%s rejected due to bad filetype' % url
return False
# the file type is not of use to use anymore, remove from url
if len(last_chunk) > 1:
path_chunks[-1] = last_chunk[-2]
# Index gives us no information
if 'index' in path_chunks:
path_chunks.remove('index')
# extract the tld (top level domain)
tld_dat = tldextract.extract(url)
subd = tld_dat.subdomain
tld = tld_dat.domain.lower()
url_slug = path_chunks[-1] if path_chunks else u''
if tld in BAD_DOMAINS:
if verbose: print '%s caught for a bad tld' % url
return False
if len(path_chunks) == 0:
dash_count, underscore_count = 0, 0
else:
dash_count = url_slug.count('-')
underscore_count = url_slug.count('_')
# If the url has a news slug title
if url_slug and (dash_count > 4 or underscore_count > 4):
if dash_count >= underscore_count:
if tld not in [ x.lower() for x in url_slug.split('-') ]:
if verbose: print '%s verified for being a slug' % url
return True
if underscore_count > dash_count:
if tld not in [ x.lower() for x in url_slug.split('_') ]:
if verbose: print '%s verified for being a slug' % url
return True
# There must be at least 2 subpaths
if len(path_chunks) <= 1:
if verbose: print '%s caught for path chunks too small' % url
return False
# Check for subdomain & path red flags
# Eg: http://cnn.com/careers.html or careers.cnn.com --> BAD
for b in BAD_CHUNKS:
if b in path_chunks or b == subd:
if verbose: print '%s caught for bad chunks' % url
return False
match_date = re.search(DATE_REGEX, url)
# if we caught the verified date above, it's an article
if match_date is not None:
if verbose: print '%s verified for date' % url
return True
if verbose: print '%s caught for default false' % url
return False
def get_domain(abs_url, **kwargs):
"""
returns a url's domain, this method exists to
encapsulate all url code into this file
"""
if abs_url is None:
return None
return urlparse(abs_url, **kwargs).netloc
def get_scheme(abs_url, **kwargs):
"""
"""
if abs_url is None:
return None
return urlparse(abs_url, **kwargs).scheme
def get_path(abs_url, **kwargs):
"""
"""
if
|
wolfiex/DSMACC-testing
|
zgraph.py
|
Python
|
gpl-3.0
| 2,805
| 0.023173
|
import networkx as nx
import numpy as np
import pandas as pd
def normalise(x):
x = x[:]#deepcopy error
x -= min(x)
x /= max(x)
return x
def jgraph(posjac):
'''
networkx graph object from posjac at timestep
'''
posjac = 1 - normalise(np.log10(posjac).replace([np.inf,-np.inf],np.nan).dropna())
split = [i.split('->') for i in posjac.index]
#graph
G = nx.DiGraph()
for e in range(len(split)):
G.add_edge(split[e][0],split[e][1],weight=posjac[e])
G.remove_edges_from(G.selfloop_edges())
return G
def getnx(self, ts ,save=False):
'''
Create a networkx graph from a DSMACC new class
Usage:
getnx(a,a.ts[-1], 'propane')
'''
self.create_posjac()
G = nx.DiGraph()
posjac = self.posjac.loc[ts,:]
split = [i.split('->') for i in posjac.index]
for e in range(len(split)):
G.add_edge(split[e][0],split[e][1],weight=posjac[e])
G.remove_edges_from(G.selfloop_edges())
if save:
nx.write_weighted_edgelist(G, save+'.wedgelist')
#G=nx.read_weighted_edgelist('propane.wedgelist',create_using=nx.DiGraph)
return G
def pagerank(a):
return geobj2df(metric(tograph(group_hour(a.jacsp))))
def tograph(jac):
'''
Use hourly avg
'''
rt = []
for t in jac.iterrows():
jacsp=t[1]
#inverse negative links
index = np.array(jacsp.index)
lt = list(jacsp<0)
index[lt] = map(lambda x: '->'.join(reversed(x.split('->'))),index[lt])
jacsp.index = index
jacsp = jacsp.abs()
#normalize jacsp
jacsp = jacsp*1.01 - jacsp.min().min()
jacsp /= jacsp.max().max()
split = [i.split('->') for i in jacsp.index]
#graph
G = nx.DiGraph()
for e in range(len(split)):
G.add_edge(split[e][0],split[e][1],weight=jacsp[e])
G.remove_edges_from(G.selfloop_edges())
rt.append({'graph':G,'time':
|
t[0]})
return rt
def metric(GS,met = 'nx.pagerank'):
'''
GS - out array from to_graph
'''
metfn = eval(met)
for gt in range(len(GS)):
res = metfn(GS[gt]['graph'])
res = [[key, res[key]] for key, value in sorted(res.iteritems(), key=lambda k,v: (v,k))]
|
GS[gt][met] = res
return GS
def geobj2df(GS,what = 'nx.pagerank'):
res = []
index = []
for s in GS:
index.append(s['time'])
s = pd.DataFrame(s[what])
s.index = s[0]
s=s[1]
res.append(s)
df = pd.concat(res,axis = 1).T
df.index = index
df = (df*1.1).subtract(df.min(axis=0
))
df=df.divide(df.max(axis=1),axis=0)
import zcreate_centrality as p
#p.createhtml(df)
return df
|
wpoely86/easybuild-easyblocks
|
easybuild/easyblocks/q/quantumespresso.py
|
Python
|
gpl-2.0
| 15,419
| 0.003178
|
##
# Copyright 2009-2016 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://vscentrum.be/nl/en),
# Flemish Research Foundation (FWO) (http://www.fwo.be/en)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# http://github.com/hpcugent/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
EasyBuild support for Quantum ESPRESSO, implemented as an easyblock
@author: Kenneth Hoste (Ghent University)
"""
import fileinput
import os
import re
import shutil
import sys
from distutils.version import LooseVersion
import easybuild.tools.environment as env
import easybuild.tools.toolchain as toolchain
from easybuild.easyblocks.generic.configuremake import ConfigureMake
from easybuild.framework.easyconfig import CUSTOM
from easybuild.tools.build_log import EasyBuildError
from easybuild.tools.modules import get_software_root
class EB_QuantumESPRESSO(ConfigureMake):
"""Support for building and installing Quantum ESPRESSO."""
@staticmethod
def extra_options():
"""Custom easyconfig parameters for Quantum ESPRESSO."""
extra_vars = {
'hybrid': [False, "Enable hybrid build (with OpenMP)", CUSTOM],
'with_scalapack': [True, "Enable ScaLAPACK support", CUSTOM],
}
return ConfigureMake.extra_options(extra_vars)
def __init__(self, *args, **kwargs):
"""Add extra config options specific to Quantum ESPRESSO."""
super(EB_QuantumESPRESSO, self).__init__(*args, **kwargs)
self.build_in_installdir = True
self.install_subdir = "espresso-%s" % self.version
def patch_step(self):
"""Patch files from build dir (not start dir)."""
super(EB_QuantumESPRESSO, self).patch_step(beginpath=self.builddir)
def configure_step(self):
"""Custom configuration procedure for Quantum ESPRESSO."""
if self.cfg['hybrid']:
self.cfg.update('configopts', '--enable-openmp')
if not self.toolchain.options.get('usempi', None):
self.cfg.update('configopts', '--disable-parallel')
if not self.cfg['with_scalapack']:
self.cfg.update('configopts', '--without-scalapack')
repls = []
if self.toolchain.comp_family() in [toolchain.INTELCOMP]:
# set preprocessor command (-E to stop after preprocessing, -C to preserve comments)
cpp = "%s -E -C" % os.getenv('CC')
repls.append(('CPP', cpp, False))
env.setvar('CPP', cpp)
# also define $FCCPP, but do *not* include -C (comments should not be preserved when preprocessing Fortran)
env.setvar('FCCPP', "%s -E" % os.getenv('CC'))
super(EB_QuantumESPRESSO, self).configure_step()
# compose list of DFLAGS (flag, value, keep_stuff)
# for guidelines, see include/defs.h.README in sources
dflags = []
comp_fam_dflags = {
toolchain.INTELCOMP: '-D__INTEL',
toolchain.GCC: '-D__GFORTRAN -D__STD_F95',
}
dflags.append(comp_fam_dflags[self.toolchain.comp_family()])
libfft = os.getenv('LIBFFT')
if libfft:
if "fftw3" in libfft:
dflags.append('-D__FFTW3')
else:
dflags.append('-D__FFTW')
env.setvar('FFTW_LIBS', libfft)
if get_software_root('ACML'):
dflags.append('-D__ACML')
if self.toolchain.options.get('usempi', None):
dflags.append('-D__MPI -D__PARA')
if self.cfg['hybrid']:
dflags.append(" -D__OPENMP")
if self.cfg['with_scalapack']:
dflags.append(" -D__SCALAPACK")
# always include -w to supress warnings
dflags.append('-w')
repls.append(('DFLAGS', ' '.join(dflags), False))
# complete C/Fortran compiler and LD flags
if self.cfg['hybrid']:
repls.append(('LDFLAGS', self.toolchain.get_flag('openmp'), True))
repls.append(('(?:C|F90|F)FLAGS', self.toolchain.get_flag('openmp'), True))
# obtain library settings
libs = []
for lib in ['BLAS', 'LAPACK', 'FFT', 'SCALAPACK']:
val = os.getenv('LIB%s' % lib)
repls.append(('%s_LIBS' % lib, val, False))
libs.append(val)
libs = ' '.join(libs)
repls.append(('BLAS_LIBS_SWITCH', 'external', False))
repls.append(('LAPACK_LIBS_SWITCH', 'external', False))
repls.append(('LD_LIBS', os.getenv('LIBS'), False))
self.log.debug("List of replacements to perform: %s" % repls)
# patch make.sys file
fn = os.path.join(self.cfg['start_dir'], 'make.sys')
try:
for line in fileinput.input(fn, inplace=1, backup='.orig.eb'):
for (k, v, keep) in repls:
# need to use [ \t]* instead of \s*, because vars may be undefined as empty,
# and we don't want to include newlines
|
if keep:
line = re.sub(r"^(%s\s*=[ \t]*)(.*)$" % k, r"\1\2 %s" % v, line)
else:
line = re.sub(r"^(%s\s*=[ \t]*).*$" % k, r"\1%s" % v, line)
# fix preprocessing directives for .f90 files in make.sys if required
if self.toolchain.comp_family() in [toolchain.GCC]:
line = re.sub(r"\$\(MPIF90\) \$\(F90FLAGS
|
\) -c \$<",
"$(CPP) -C $(CPPFLAGS) $< -o $*.F90\n" +
"\t$(MPIF90) $(F90FLAGS) -c $*.F90 -o $*.o",
line)
sys.stdout.write(line)
except IOError, err:
raise EasyBuildError("Failed to patch %s: %s", fn, err)
self.log.debug("Contents of patched %s: %s" % (fn, open(fn, "r").read()))
# patch default make.sys for wannier
if LooseVersion(self.version) >= LooseVersion("5"):
fn = os.path.join(self.cfg['start_dir'], 'install', 'make_wannier90.sys')
else:
fn = os.path.join(self.cfg['start_dir'], 'plugins', 'install', 'make_wannier90.sys')
try:
for line in fileinput.input(fn, inplace=1, backup='.orig.eb'):
line = re.sub(r"^(LIBS\s*=\s*).*", r"\1%s" % libs, line)
sys.stdout.write(line)
except IOError, err:
raise EasyBuildError("Failed to patch %s: %s", fn, err)
self.log.debug("Contents of patched %s: %s" % (fn, open(fn, "r").read()))
# patch Makefile of want plugin
wantprefix = 'want-'
wantdirs = [d for d in os.listdir(self.builddir) if d.startswith(wantprefix)]
if len(wantdirs) > 1:
raise EasyBuildError("Found more than one directory with %s prefix, help!", wantprefix)
if len(wantdirs) != 0:
wantdir = os.path.join(self.builddir, wantdirs[0])
make_sys_in_path = None
cand_paths = [os.path.join('conf', 'make.sys.in'), os.path.join('config', 'make.sys.in')]
for path in cand_paths:
full_path = os.path.join(wantdir, path)
if os.path.exists(full_path):
make_sys_in_path = full_path
break
if make_sys_in_path is None:
raise EasyBuildError("Failed to find make.sys.in in want directory %s, paths considered: %s",
wantdir, ', '.join(cand_paths))
|
Magicked/crits
|
crits/dashboards/views.py
|
Python
|
mit
| 14,127
| 0.00446
|
import json
from django.contrib.auth.decorators import user_passes_test
try:
from django.urls import reverse
except ImportError:
from django.core.urlresolvers import reverse
from django.http import HttpResponse
from django.shortcuts import render
from crits.core.user_tools import user_can_view_data
from crits.core.handlers import generate_global_search
from crits.dashboards.dashboard import Dashboard
from crits.dashboards.handlers import (
toggleTableVisibility,
get_saved_searches_list,get_dashboard,
clear_dashboard,
save_data,
get_table_data,
generate_search_for_saved_table,
delete_table,
getRecordsForDefaultDashboardTable,
switch_existing_search_to_dashboard,
add_existing_search_to_dashboard,
renameDashboard,
changeTheme,
deleteDashboard,
getDashboardsForUser,
createNewDashboard,
setDefaultDashboard,
cloneDashboard,
setPublic,
updateChildren
)
@user_passes_test(user_can_view_data)
def saved_searches_list(request):
"""
Renders the saved_searches_list html
"""
args = get_saved_searches_list(request.user)
return render(request, 'saved_searches_list.html', args)
@user_passes_test(user_can_view_data)
def dashboard(request, dashId=None):
"""
renders the dasboard
"""
args = get_dashboard(request.user,dashId)
if not args["success"]:
return respondWithError(args['message'], request=request)
return render(request, 'dashboard.html', args)
@user_passes_test(user_can_view_data)
def new_save_search(request):
"""
Renders the initial search results on save_search.html.
Called only from crits core
"""
args = generate_global_search(request)
if 'Result' in args and args['Result'] == "ERROR":
return respondWithError(args['Message'], request=request)
args['dashboards'] = getDashboardsForUser(request.user)
return render(request, "save_search.html", args)
@user_passes_test(user_can_view_data)
def edit_save_search(request, id):
"""
Called when editing a saved table on the dashboard.
Renders the saved_search.html with the customized table
"""
args = generate_search_for_saved_table(user=request.user, id=id, request=request)
if 'Result' in args and args['Result'] == "ERROR":
return respondWithError(args['Message'], request=request)
args['dashboards'] = getDashboardsForUser(request.user)
return render(request, "save_search.html", args)
@user_passes_test(user_can_view_data)
def delete_save_search(request):
"""
Called via ajax to delete a table. Only called from the saved_search.html
"""
id = request.GET.get("id", None)
if not id:
return respondWithError("Saved search cannot be found."\
" Please refresh and try again", True)
response = delete_table(request.user.id, id)
return httpResponse(response)
@user_passes_test(user_can_view_data)
def load_data(request, obj):
"""
Ajax call to load the data for the table.
"""
sortBy = request.GET.get("sortBy", 'null')
pageNumber = request.GET.get("pageNumber", 1)
maxRows = request.GET.get("maxRows", 25)
if sortBy == 'null':
sortBy = {}
else:
sortBy = json.loads(sortBy)
return get_table_data(request, obj, sort=sortBy, pageNumber=pageNumber, maxRows=maxRows)
@user_passes_test(user_can_view_data)
def save_search(request):
"""
Ajax call to save the table. Only called from the saved_search.html
"""
dashId = request.GET.get('dashId', None)
newDashName = request.GET.get('newDashName', None)
tableId = request.GET.get("tableId", None)
errorMessage = None
clone = False
try:
if newDashName:
newDash = createNewDashboard(request.user.id, newDashName)
if not newDash:
raise(Exception, "Dashboard already exists")
dashboard = newDash
elif dashId:
dashboard = Dashboard.objects(id=dashId).first()
if dashboard.isPublic and dashboard.analystId != request.user.id:
newDash = cloneDashboard(request.user.id, dashboard, cloneSearc
|
hes = True, skip=tableId)
dashboard = newDash
clone = True
newDashName = newDash.name
elif dashboard.isPublic:
updateChildren(dashboard.id)
else:
errorMessage = "Error finding dashboard. Please refresh and try again."
except Exception as e:
print e
errorMessage = "You already have a dashboard with th
|
at name."
if errorMessage:
return respondWithError(errorMessage, True)
userId = request.GET.get('userId', None)
tableName = request.GET.get('tableName', None)
searchTerm = request.GET.get('query', None)
objType = request.GET.get('object_type', None)
columns = json.loads(request.GET.get("columns", ""))
sortBy = request.GET.get("sortBy", None)
isDefault = request.GET.get("isDefaultOnDashboard", "False")
sizex = request.GET.get("sizex", None)
maxRows = request.GET.get("maxRows", None)
if isDefault.lower() == "true":
isDefault = True
else:
isDefault = False
if sortBy:
sortBy = json.loads(sortBy)
response = save_data(userId, columns, tableName, searchTerm, objType, sortBy,
tableId, sizex=sizex, isDefaultOnDashboard=isDefault,
maxRows=maxRows,
dashboard=dashboard, clone=clone)
if newDashName:
response["newDashId"] = str(newDash.id)
response["newDashName"] = newDash.name
response["isClone"] = clone
response["newDashUrl"] = reverse("crits-dashboards-views-dashboard",
kwargs={"dashId":newDash.id})
return httpResponse(response)
@user_passes_test(user_can_view_data)
def save_new_dashboard(request):
"""
Ajax call to save the dashboard and the positioning and width of the
tables on it. Called from the dashboard.html
"""
data = json.loads(request.POST.get('data', ''))
userId = request.POST.get('userId', None)
dashId = request.POST.get('dashId', None)
user = request.user
clone = False
if not dashId:
return respondWithError("Error finding dashboard. Please refresh and try again.", True)
else:
dashboard = Dashboard.objects(id=dashId).first()
if dashboard.isPublic and dashboard.analystId != user.id:
dashboard = cloneDashboard(userId, dashboard)
if not dashboard:
return respondWithError("You already have a dashboard with that name.", True)
clone = True
if not user.defaultDashboard:
setDefaultDashboard(user, dashboard.id)
elif dashboard.isPublic:
updateChildren(dashboard.id)
for table in data:
isDefault = False
if table['isDefault'].lower() == "true":
isDefault = True
sortBy = None
if 'sortDirection' in table and 'sortField' in table:
sortBy = {'field':table['sortField'],'direction':table['sortDirection']}
response = save_data(userId, table['columns'], table['tableName'],
tableId=table['id'], isDefaultOnDashboard=isDefault,
sortBy=sortBy, dashboard=dashboard,
clone=clone, row=table['row'], grid_col=table['col'],
sizex=table['sizex'], sizey=table['sizey'])
if not response['success']:
return httpResponse(response)
return httpResponse({"success":True,
"clone":clone,
"dashId": str(dashboard.id),
"message":"Dashboard saved successfully!"})
@user_passes_test(user_can_view_data)
def get_dashboard_table_data(request, tableName):
"""
Ajax call to get the records for a default dashboard table.
Only called from the saved_search.html when editing the table
"""
response = getRecordsForDefaultDashboardTable(request.user, tableName)
return httpRespo
|
rdo-management/heat
|
heat/api/cfn/v1/signal.py
|
Python
|
apache-2.0
| 2,047
| 0
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from heat.api.aws import exception
from heat.common import identifier
from heat.common import wsgi
from heat.rpc import client as rpc_client
class SignalController(object):
def __init__(self, options):
self.options = options
self.rpc_client = rpc_client.EngineClient()
def update_waitcondition(self, req, body, arn):
con = req.context
identity = identifier.ResourceIdentifier.from_arn(arn)
try:
md = self.rpc_client.resource_signal(
con,
stack_identity=dict(identity.stack()),
resource_name=identity.resource_name,
details=body,
sync_call=True)
except Exception as ex:
return exception.map_remote_error(ex)
return {'resource': identity.resource_name, 'metadata': md}
def signal(self, req, arn, body=None):
con = req.context
identity = identifier.ResourceIdentifier.from_arn(arn)
try:
self.rpc_client.resource_signal(
con,
stack_identity=dict(identity.stack()),
|
resource_name=identity.resource_name,
details=body)
except Exception as ex:
return exception.map_remote_error(ex)
def create_resource(options):
"""
Signal resource factory method.
"""
de
|
serializer = wsgi.JSONRequestDeserializer()
return wsgi.Resource(SignalController(options), deserializer)
|
sdpython/ensae_teaching_cs
|
_todo/pvalues/pvalues_sigma.py
|
Python
|
mit
| 2,697
| 0.071932
|
import numpy, matplotlib, random, pylab, math
def matrix_square_root(sigma) :
eigen, vect = numpy.linalg.eig(sigma)
dim = len(sigma)
res = numpy.identity(dim)
for i in range(0,dim) :
res[i,i] = eigen[i]**0.5
return vect * res * vect.transpose()
def chi2_level (alpha = 0.95) :
N = 1000
x = [ random.gauss(0,1) for _ in range(0,N) ]
y = [ random.gauss(0,1) for _ in range(0,N) ]
r = map ( lambda c : (c[0]**2+c[1]**2)**0.5, zip(x,y))
r = list(r)
r.sort()
res = r [ int (alpha * N) ]
return res
def square_figure(mat, a) :
x = [ ]
y = [ ]
for i in range (0,100) :
x.append ( a * mat[0][0]**0.5 )
y.append ( (random.random ()-0.5) * a * mat[1][1]**0.5*2 )
x.append ( -a * mat[0][0]**0.5 )
y.append ( (random.random ()-0.5) * a * mat[1][1]**0.5*2 )
y.append ( a * mat[1][1]**0.5 )
x.append ( (random.random ()-0.5) * a * mat[0][0]**0.5*2 )
y.append ( -a * mat[1][1]**0.5 )
x.append ( (random.random ()-0.5) * a * mat[0][0]**0.5*2 )
pylab.plot(x,y, 'ro')
x = [ ]
y = [ ]
for i in range (0,100) :
x.append ( a )
y.append ( (random.random ()-0.5) * a*2 )
x.append ( -a )
y.append ( (random.random ()-0.
|
5) * a*2 )
y.append ( a )
x.append ( (random.random ()-0.5) * a*2 )
y.append ( -a )
x.append ( (random.random ()-0.5
|
) * a*2 )
xs,ys = [],[]
for a,b in zip (x,y) :
ar = numpy.matrix( [ [a], [b] ] ).transpose()
we = ar * root
xs.append ( we [0,0] )
ys.append ( we [0,1] )
pylab.plot(xs,ys, 'bo')
pylab.show()
def circle_figure (mat, a) :
x = [ ]
y = [ ]
for i in range (0,200) :
z = random.random() * math.pi * 2
i = a * mat[0][0]**0.5 * math.cos(z)
j = a * mat[0][0]**0.5 * math.sin(z)
x.append ( i )
y.append ( j )
pylab.plot(x,y, 'ro')
x = [ ]
y = [ ]
for i in range (0,200) :
z = random.random() * math.pi * 2
i = a * math.cos(z)
j = a * math.sin(z)
x.append ( i )
y.append ( j )
xs,ys = [],[]
for a,b in zip (x,y) :
ar = numpy.matrix( [ [a], [b] ] ).transpose()
we = ar * root
xs.append ( we [0,0] )
ys.append ( we [0,1] )
pylab.plot(xs,ys, 'bo')
pylab.show()
if __name__ == "__main__" :
level = chi2_level ()
mat = [ [0.1, 0.05], [0.05, 0.2] ]
npmat = numpy.matrix(mat)
root = matrix_square_root (npmat)
square_figure (mat, 1.96)
circle_figure (mat, level)
|
cactusbin/nyt
|
matplotlib/examples/statistics/errorbar_demo.py
|
Python
|
unlicense
| 200
| 0.005
|
"""
Demo of the errorb
|
ar function.
"""
import numpy as np
import matplotlib.pyplot as plt
|
# example data
x = np.arange(0.1, 4, 0.5)
y = np.exp(-x)
plt.errorbar(x, y, xerr=0.2, yerr=0.4)
plt.show()
|
naitoh/py2rb
|
tests/strings/split.py
|
Python
|
mit
| 266
| 0.003759
|
s="the quick brown fox jumped over the lazy dog"
t = s.split(" ")
for v
|
in t:
print(v)
r = s.split("e")
for v in r:
print(v)
x = s.split()
for v in x:
print(v)
#
|
2-arg version of split not supported
# y = s.split(" ",7)
# for v in y:
# print v
|
gpndata/cattle
|
tests/integration/cattletest/core/test_cluster.py
|
Python
|
apache-2.0
| 6,493
| 0
|
from common_fixtures import * # NOQA
def _clean_clusterhostmap_for_host(host):
for cluster in host.clusters():
cluster.removehost(hostId=str(host.id))
def _resource_is_inactive(resource):
return resource.state == 'inactive'
def _resource_is_active(resource):
return resource.state == 'active'
@pytest.mark.skipif('True')
def test_cluster_add_remove_host_actions(super_client, new_context):
host1 = super_client.reload(new_context.host)
account = new_context.project
_clean_clusterhostmap_for_host(host1)
cluster = super_client.create_cluster(
accountId=account.id,
name='testcluster1', port=9000)
cluster = wait_for_condition(
super_client, cluster, _resource_is_inactive,
lambda x: 'State is: ' + x.state)
# Add one host to cluster
cluster = cluster.addhost(hostId=str(host1.id))
cluster = wait_for_condition(
super_client, cluster,
lambda x: len(x.hosts()) == 1,
lambda x: 'Number of hosts in cluster is: ' + len(x.hosts()))
assert cluster.hosts()[0].id == host1.id
assert len(host1.clusters()) == 1
assert host1.clusters()[0].id == cluster.id
# activate cluster
cluster.activate()
cluster = wait_for_condition(
super_client, cluster, _resource_is_active,
lambda x: 'State is: ' + x.state)
# verify that the agent got created
uri = 'sim:///?clusterId={}&managingHostId={}'. \
format(get_plain_id(super_client, cluster),
get_plain_id(super_client, host1))
agents = super_client.list_agent(uri=uri)
assert len(agents) == 1
# verify that the agent instance got created
agent_instances = super_client.list_instance(agentId=agents[0].id)
assert len(agent_instances) == 1
try:
cluster.addhost(hostId=str(host1.id))
assert False
except cattle.ApiError as e:
assert e.error.code == 'InvalidReference'
cluster = cluster.removehost(hostId=str(host1.id))
cluster = wait_for_condition(
super_client, cluster,
lambda x: len(x.hosts()) == 0,
lambda x: 'Number of hosts in cluster is: ' + len(x.hosts()))
try:
cluster = cluster.removehost(hostId=str(host1.id))
assert False
except cattle.ApiError as e:
assert e.error.code == 'InvalidReference'
cluster = cluster.addhost(hostId=str(host1.id))
assert len(cluster.hosts()) == 1
# Add 2nd host to cluster
host2 = register_simulated_host(new_context)
cluster = cluster.addhost(hostId=str(host2.id))
cluster = wait_for_condition(
super_client, cluster,
lambda x: len(x.hosts()) == 2,
lambda x: 'Number of hosts in cluster is: ' + len(x.hosts()))
# Remove 2nd host from cluster
cluster = cluster.removehost(hostId=str(host2.id))
cluster = wait_for_condition(
super_client, cluster,
lambda x: len(x.hosts()) == 1,
lambda x: len(x.hosts()))
# temporarily skipping since this was inadvertently deleting the
# real host causing downstream TFs
@pytest.mark.skipif('True')
def test_host_purge(super_client, new_context):
host1 = super_client.reload(new_context.host)
_clean_clusterhostmap_for_host(host1)
cluster = super_client.create_cluster(
accountId=new_context.project.id,
name='testcluster2', port=9000)
cluster = wait_for_condition(
super_client, cluster, _resource_is_inactive,
lambda x: 'State is: ' + x.state)
cluster = cluster.addhost(hostId=str(host1.id))
host1 = super_client.wait_success(host1.deactivate())
host1 = super_client.wait_success(super_client.delete(host1))
super_client.wait_success(host1.purge())
wait_for_condition(
super_client, cluster, lambda x: len(x.hosts()) == 0)
@pytest.mark.skipif('True')
def test_cluster_purge(super_client, new_context):
host1 = super_client.reload(new_context.host)
_clean_clusterhostmap_for_host(host1)
cluster = super_client.create_cluster(
accountId=new_context.project.id,
name='testcluster3', port=9000)
cluster = wait_for_condition(
super_client, cluster, _resource_is_inactive,
lambda x: 'State is: ' + x.state)
cluster = cluster.addhost(hostId=str(host1.id))
cluster = wait_for_condition(
super_client, cluster, lambda x: len(x.hosts()) == 1)
cluster.activate()
cluster = wait_for_condition(
super_client, cluster, _resource_is_active,
lambda x: 'State is: ' + x.state)
# verify that the agent got created
uri = 'sim:///?clusterId={}&managingHostId={}'. \
format(get_plain_id(super_client, cluster),
get_plain_id(super_client, host1))
agents = super_client.list_agent(uri=uri)
assert len(agents) == 1
# verify that the agent instance got created
agentId = agents[0].id
agent_instances = super_client.list_instance(agentId=agentId)
assert len(agent_instances) == 1
# deactivate, remove, and purge cluster
cluster = super_clie
|
nt.wait_success(cluster.deactivate())
cluster = super_client.wait_success(super_client.delete(cluster))
cluster = super_client.wait_success(cluster.purge())
# check no hosts is registered to this cluster
wait_for_condition(
super_client, cluster, lambda x: len(x.hosts()) == 0)
# verify that the agent is removed
agents = super_client.list_agent(uri=uri)
wait_for_condition(
super_client, agents[0],
lambda x: x.state == 'removed',
lamb
|
da x: 'State is: ' + x.state)
# verify that the agent instance is removed as well
agent_instances = super_client.list_instance(agentId=agentId)
wait_for_condition(
super_client, agent_instances[0],
lambda x: x.state == 'removed',
lambda x: 'State is: ' + x.state)
@pytest.mark.skipif('True')
def test_cluster_actions_invalid_host_ref(super_client, new_context):
host1 = super_client.reload(new_context.host)
_clean_clusterhostmap_for_host(host1)
cluster = super_client.create_cluster(
accountId=new_context.project.id,
name='testcluster4', port=9000)
try:
cluster.addhost(hostId='badvalue')
assert False
except cattle.ApiError as e:
assert e.error.code == 'InvalidReference'
try:
cluster.removehost(hostId='badvalue')
assert False
except cattle.ApiError as e:
assert e.error.code == 'InvalidReference'
|
drawquest/drawquest-web
|
website/canvas/migrations/0021_auto__chg_field_comment_parent_content__chg_field_comment_reply_conten.py
|
Python
|
bsd-3-clause
| 9,191
| 0.007399
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Renaming column for 'Comment.parent_content' to match new field type.
db.rename_column('canvas_comment', 'parent_content', 'parent_content_id')
# Changing field 'Comment.parent_content'
db.alter_column('canvas_comment', 'parent_content_id', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['canvas.Content']))
# Renaming column for 'Comment.reply_content' to match new field type.
db.rename_column('canvas_comment', 'reply_content', 'reply_content_id')
# Changing field 'Comment.reply_content'
db.alter_column('canvas_comment', 'reply_content_id', self.gf('django.db.models.fields.related.ForeignKey')(null=True, to=orm['canvas.Content']))
def backwards(self, orm):
# Renaming column for 'Comment.parent_content' to match new field type.
db.rename_column('canvas_comment', 'parent_content_id', 'parent_content')
# Changing field 'Comment.parent_content'
db.alter_column('canvas_comment', 'parent_content', self.gf('django.db.models.fields.CharField')(max_length=32))
# Renaming column for 'Comment.reply_content' to match new field type.
db.rename_column('canvas_comment', 'reply_content_id', 'reply_content')
# Changing field 'Comment.reply_content'
db.alter_column('canvas_comment', 'reply_content', self.gf('django.db.models.fields.CharField')(max_length=32))
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'canvas.comment': {
'Meta': {'object_name': 'Comment'},
'anonymous': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip': ('django.db.models.fields.IPAddressField', [], {'default': "'0.0.0.0'", 'max_length': '15'}),
'parent_content': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'comments'", 'to': "orm['canvas.Content']"}),
'reply_content': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'used_in_comments'", 'null': 'True', 'to': "orm['canvas.Content']"}),
'reply_text': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'blank': 'True'}),
'timestamp': ('django.db.models.fields.FloatField', [], {})
},
'canvas.content': {
'Meta': {'object_name': 'Content'},
'id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'primary_key': 'True'}),
'ip': ('django.db.models.fields.IPAddressField', [], {'default': "'0.0.0.0'", 'max_length': '15'}),
'timestamp': ('django.db.models.fields.FloatField', [], {})
},
'canvas.contentsticker': {
'Meta': {'object_name': 'ContentSticker'},
'content': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'stickers'", 'to': "orm['canvas.Content']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip': ('django.db.models.fields.IPAddressField', [], {'max_length': '15'}),
'timestamp': ('django.db.models.fields.FloatField', [], {}),
'type_id': ('django.db.models.fields.IntegerField', [], {})
},
'canvas.hashtag': {
'Meta': {'object_name': 'Hashtag'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50', 'db_index': 'True'})
},
'canvas.post': {
'Meta': {'object_name': 'Post'},
'anonymous': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'blacklisted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'content_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'hidden': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip': ('django.db.models.fields.IPAddressField', [], {'default': "'0.0.0.0'", 'max_length': '15'}),
'post_id': ('django.db.models.fields.IntegerField', [], {}),
'thread': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'posts'", 'to': "orm['canvas.Thread']"}),
'thumb_down': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'thumb_up': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'timestamp': ('django.db.models.fields.FloatField', [], {})
},
'canvas.stashcontent': {
'Meta': {'object_name': 'StashContent
|
'},
'content_id': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'post': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'thread': ('django.db.models.fields.related.
|
ForeignKey', [], {'to': "orm['canvas.Thread']", 'null': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'canv
|
stackforge/cloudkitty
|
cloudkitty/rating/pyscripts/datamodels/script.py
|
Python
|
apache-2.0
| 1,854
| 0
|
# -*- coding: utf-8 -*-
# Copyright 2015 Objectif Libre
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from wsme import types as wtypes
from cloudkitty.api.v1 import types as ck_types
class Script(wtypes.Base):
"""Type describing a script.
"""
script_id = wtypes.wsattr(ck_types.UuidType(), mandatory=False)
"""UUID of the script."""
name = wtypes.wsattr(wtypes.text, mandatory=True)
"""Name of the script."""
data = wtypes.wsattr(wtypes.text, mandatory=False)
"""Data of the script."""
checksum = wtypes.wsattr(wtypes.text, mandatory=False, readonly=True)
"""Checksum of the script data."""
@classmethod
def sample(cls):
sample = cls(script_id='bc05108d-f515-4984-8077-de319cbf35aa',
name='policy1',
data='return 0',
checksum='cf83e1357eefb8bdf1542850d66d8007d620e40
|
50b5715d'
'c83f4a921d36ce9ce47d0d13c5d85f2b0ff8318d2877eec'
'2f63b931bd47417a81a538327af927da3e')
return sample
class ScriptCollection(wtypes.Base):
"""Type describing a list of scripts.
"""
scripts = [Script]
"""List of scripts."""
@classmethod
def sample(cls):
sam
|
ple = Script.sample()
return cls(scripts=[sample])
|
laroque/couchdb-python3
|
couchdb/tests/testutil.py
|
Python
|
bsd-3-clause
| 1,378
| 0.000726
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2007-2009 Christopher Lenz
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have rece
|
ived as part of this distribution.
import random
import sys
from couchdb import client
from couchdb import ServerError
class TempDatabaseMixin(object):
temp_dbs = None
_db = None
def setUp(self):
self.server = client.Server(full_commit=False)
def tearDown(self):
if self.temp_dbs:
for name in self.temp_dbs:
try:
|
self.server.delete(name)
except ServerError as err:
if err.args[0] == (500, ('error', 'eacces')):
continue
raise
def temp_db(self):
if self.temp_dbs is None:
self.temp_dbs = {}
# Find an unused database name
while True:
name = 'couchdb-python/%d' % random.randint(0, sys.maxsize)
if name not in self.temp_dbs:
break
db = self.server.create(name)
self.temp_dbs[name] = db
return name, db
def del_db(self, name):
del self.temp_dbs[name]
self.server.delete(name)
@property
def db(self):
if self._db is None:
name, self._db = self.temp_db()
return self._db
|
adlr/naclports
|
build_tools/naclports.py
|
Python
|
bsd-3-clause
| 7,198
| 0.010975
|
#!/usr/bin/env python
# Copyright (c) 2013 The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Library for manipulating naclports packages in python.
This library can be used to build tools for working with naclports
packages. For example, it is used by 'update_mirror.py' to iterate
through all packages and mirror them on commondatastorage.
"""
import optparse
import os
import urlparse
import shlex
import shutil
import subprocess
import sys
import tempfile
import sha1check
MIRROR_URL = 'http://commondatastorage.googleapis.com/nativeclient-mirror/nacl'
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
NACLPORTS_ROOT = os.path.dirname(SCRIPT_DIR)
OUT_DIR = os.path.join(NACLPORTS_ROOT, 'out')
ARCH = os.environ.get('NACL_ARCH', 'i686')
BUILD_ROOT = os.path.join(OUT_DIR, 'repository')
ARCHIVE_ROOT = os.path.join(OUT_DIR, 'tarballs')
NACL_SDK_ROOT = os.environ.get('NACL_SDK_ROOT')
# TODO(sbc): use this code to replace the bash logic in build_tools/common.sh
class Error(Exception):
pass
class Package(object):
"""Representation of a single naclports package.
Package objects correspond to folders on disk which
contain a 'pkg_info' file.
"""
def __init__(self, pkg_root):
self.root = os.path.abspath(pkg_root)
info = os.path.join(pkg_root, 'pkg_info')
keys = []
self.URL_FILENAME = None
self.URL = None
self.LICENSE = None
if not os.path.exists(info):
raise Error('Invalid package folder: %s' % pkg_root)
with open(info) as f:
for i, line in enumerate(f):
if line[0] == '#':
continue
if '=' not in line:
raise Error('Invalid pkg_info line %d: %s' % (i + 1, pkg_root))
key, value = line.split('=', 1)
key = key.strip()
value = shlex.split(value.strip())[0]
keys.append(key)
setattr(self, key, value)
assert 'PACKAGE_NAME' in keys
def GetBasename(self):
basename = os.path.splitext(self.GetArchiveFilename())[0]
if basename.endswith('.tar'):
basename = os.path.splitext(basename)[0]
return basename
def __cmp__(self, other):
return cmp(self.PACKAGE_NAME, other.PACKAGE_NAME)
def GetBuildLocation(self):
package_dir = getattr(self, 'PACKAGE_DIR', self.PACKAGE_NAME)
return os.path.join(BUILD_ROOT, package_dir)
def GetArchiveFilename(self):
if self.URL_FILENAME:
return self.URL_FILENAME
elif self.URL:
return os.path.basename(urlparse.urlparse(self.URL)[2])
def DownloadLocation(self):
archive = self.GetArchiveFilename()
if not archive:
return
return os.path.join(ARCHIVE_ROOT, archive)
def Verify(self, verbose=False):
if not self.GetArchiveFilename():
print "no archive: %s" % self.PACKAGE_NAME
return True
self.Download()
olddir = os.getcwd()
sha1file = os.path.join(self.root, self.PACKAGE_NAME + '.sha1')
try:
os.chdir(ARCHIVE_ROOT)
with open(sha1file) as f:
try:
filenames = sha1check.VerifyFile(f, False)
print "verified: %s" % (filenames)
except sha1check.Error as e:
print "verification failed: %s: %s" % (sha1file, str(e))
return False
finally:
os.chdir(olddir)
return True
def Extract(self):
self.ExtractInto(BUILD_ROOT)
def ExtractInto(self, output_path):
"""Extract the package archive into the given location.
This method assumes the package has already been downloaded.
"""
if not os.path.exists(output_path):
os.makedirs(output_path)
new_foldername = os.path.dirname(self.GetBuildLocation())
if os.path.exists(os.path.join(output_path, new_foldername)):
return
tmp_output_path = tempfile.mkdtemp(dir=OUT_DIR)
try:
arch
|
ive = self.DownloadLocation()
ext = os.path.splitext(archive)[1]
if ext in ('.gz', '.tgz', '.bz2'):
cmd = ['tar', 'xf', archive, '-C', t
|
mp_output_path]
elif ext in ('.zip',):
cmd = ['unzip', '-q', '-d', tmp_output_path, archive]
else:
raise Error('unhandled extension: %s' % ext)
print cmd
subprocess.check_call(cmd)
src = os.path.join(tmp_output_path, new_foldername)
dest = os.path.join(output_path, new_foldername)
os.rename(src, dest)
finally:
shutil.rmtree(tmp_output_path)
def GetMirrorURL(self):
return MIRROR_URL + '/' + self.GetArchiveFilename()
def Enabled(self):
if hasattr(self, 'LIBC'):
if os.environ.get('NACL_GLIBC') == '1':
if self.LIBC != 'glibc':
raise Error('Package cannot be built with glibc.')
else:
if self.LIBC != 'newlib':
raise Error('Package cannot be built with newlib.')
if hasattr(self, 'DISABLED_ARCH'):
arch = os.environ.get('NACL_ARCH', 'x86_64')
if arch == self.DISABLED_ARCH:
raise Error('Package is disabled for current arch: %s.' % arch)
if hasattr(self, 'BUILD_OS'):
sys.path.append(os.path.join(NACL_SDK_ROOT, 'tools'))
import getos
if getos.GetPlatform() != self.BUILD_OS:
raise Error('Package can only be built on: %s.' % self.BUILD_OS)
def Download(self):
filename = self.DownloadLocation()
if not filename or os.path.exists(filename):
return
if not os.path.exists(os.path.dirname(filename)):
os.makedirs(os.path.dirname(filename))
try:
mirror = self.GetMirrorURL()
print 'Downloading: %s [%s]' % (mirror, filename)
cmd = ['wget', '-O', filename, mirror]
subprocess.check_call(cmd)
except subprocess.CalledProcessError:
print 'Downloading: %s [%s]' % (self.URL, filename)
cmd = ['wget', '-O', filename, self.URL]
subprocess.check_call(cmd)
def PackageIterator(folders=None):
"""Iterator which yield a Package object for each
naclport package."""
if not folders:
folders = [os.path.join(NACLPORTS_ROOT, 'ports')]
for folder in folders:
for root, dirs, files in os.walk(folder):
if 'pkg_info' in files:
yield Package(root)
def main(args):
try:
parser = optparse.OptionParser()
parser.add_option('-v', '--verbose', action='store_true',
help='Output extra information.')
parser.add_option('-C', dest='dirname', default='.',
help='Change directory before executing commands.')
options, args = parser.parse_args(args)
if not args:
parser.error("You must specify a build command")
if len(args) > 1:
parser.error("More than one command specified")
command = args[0]
if not options.dirname:
options.dirname = '.'
if not NACL_SDK_ROOT:
Error("$NACL_SDK_ROOT not set")
p = Package(options.dirname)
if command == 'download':
p.Download()
elif command == 'check':
pass # simply check that the package is valid.
elif command == 'enabled':
p.Enabled()
elif command == 'verify':
p.Verify()
except Error as e:
sys.stderr.write('naclports: %s\n' % e)
return 1
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
frdb194/ubuntu-tweak
|
ubuntutweak/janitor/chrome_plugin.py
|
Python
|
gpl-2.0
| 377
| 0.002653
|
from ubuntutweak.janitor import JanitorCachePlugin
class ChromeCachePlugin(JanitorCachePlugin):
__title__ = _('Chrome Cache')
__category__ = 'application'
root_path = '~/.cache/google-chrome/Default'
|
class ChromiumCachePlugin(JanitorCachePlugin):
__title__ = _('Chromium Cache')
__category__ = 'application'
root_path = '~/.cache/chromium/De
|
fault'
|
chrisseto/osf.io
|
scripts/refresh_addon_tokens.py
|
Python
|
apache-2.0
| 3,392
| 0.003833
|
#!/usr/bin/env python
# encoding: utf-8
import logging
import math
import time
from django.utils import timezone
import django
from modularodm import Q
from oauthlib.oauth2 import OAuth2Error
from dateutil.relativedelta import relativedelta
django.setup()
from framework.celery_tasks import app as celery_app
from scripts import utils as scripts_utils
from website.app import init_app
from addons.box.models import Provider as Box
from addons.googledrive.models import GoogleDriveProvider
from addons.mendeley.models import Mendeley
from osf.models import ExternalAccount
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
PROVIDER_CLASSES = (Box, GoogleDriveProvider, Mendeley, )
def look_up_provider(addon_short_name):
for Provider in PROVIDER_CLASSES:
if Provider.short_name == addon_short_name:
return Provider
return None
def get_targets(delta, addon_short_name):
# NOTE: expires_at is the access_token's expiration date,
# NOT the refresh token's
return ExternalAccount.find(
Q('expires_at', 'lt', timezone.now() - delta) &
Q('date_last_refreshed', 'lt', timezone.now() - delta) &
Q('provider', 'eq', addon_short_name)
)
def main(delta, Provider, rate_limit, dry_run):
allowance = rate_limit[0]
last_call = time.time()
for record in get_targets(delta, Provider.short_name):
if Provider(record).has_expired_credentials:
logger.info(
'Found expired record {}, skipping'.format(record.__repr__())
)
continue
logger.info(
'Refreshing tokens on record {0}; expires at {1}'.format(
record.__repr__(),
record.expires_at.strftime('%c')
)
)
if not dry_run:
if allowance < 1:
try:
time.sleep(rate_limit[1] - (time.time() - last_call))
except (ValueError, IOError):
pass # Value/IOError indicates negative sleep time in Py 3.5/2.7, respectively
allowance = rate_limit[0]
allowance -= 1
last_call = time.time()
success = False
try:
success = Provider(record).refresh_oauth_key(force=True)
except OAuth2Error as e:
logger.error(e)
else:
logger.info(
'Status of record {}: {}'.format(
record.__repr__(),
'SUCCESS' if success else 'FAILURE')
)
@celery_app.task(name='scripts.refresh_addon_tokens')
def run_main(addons=None, rate_limit=(5, 1), dry_run=True):
"""
:param dict addons: of form {'<addon_short_name>': int(<refresh_token validity duration in days>)}
:param tuple rate_limit: of form (<requests>, <seconds>). Default is five per second
"""
init_app(set_backends=True, routes=False)
if not dry_run:
scripts_utils.add_file_logger(logger, __file__)
for addon in addons:
days = math.ceil(int(addons[addon])*0.75)
|
delta = relativedelta(days=days)
Provider = look_up_provider(addon)
if not Provider:
logger.error('Unable to find Provider cla
|
ss for addon {}'.format(addon))
else:
main(delta, Provider, rate_limit, dry_run=dry_run)
|
barun-saha/ns2web
|
ns2trace/metrics.py
|
Python
|
gpl-2.0
| 8,308
| 0.011916
|
__author__= "barun"
__date__ = "$20 May, 2011 12:19:25 PM$"
## Defines a collection of metrics that can be used to analyze the performance
# of a network.
class Metrics(object):
## Calculate average throughput as: total_bytes_rcvd / duration.
#
# @param pkts_list An iterator object in the format [(timestamp, size),]
# @param duration Time duration (in s) over which thruput is to be computed. Typically it is the simulation period.
# @return Average throughput in Kbps; return -1 if duration is not positive
@staticmethod
def average_throughput(pkts_list, duration):
#print 'Average throughput'
avg_thruput = 0
start = -1
stop = 0
if pkts_list:
for record in pkts_list:
#print record
try:
avg_thruput += long(record[1])
if start == -1:
start = float(record[0])
stop = float(record[0])
#print record[0], record[1]
except IndexError:
pass
if duration <= 0:
duration = stop - start + 0.00000001
#print 'duration:', duration
avg_thruput = 8 * float(avg_thruput) / (1024 * duration) # Since pkt len is in bytes
return avg_thruput
@staticmethod
## Calculate instantaneous throughput as total bytes_rcvd at each time instant.
#
# <b>Logic</b>: To determine total bytes received at any instant, say, at t = 5, sum
# up sizes of all packets received in the interval 5.00000... to 5.99999...
#
# This procedure is repeated for all the time instances.
# @param pkts_list An iterator object in the format [(timestamp, size),]
# @return A list in the form [(time_instance, total_Kbytes),]
def instantaneous_throughput(pkts_list=None):
#print 'Instantaneous throughput'
result = []
start_time = -1 # Anything less than 0
this_instance = 0
bytes_this_instance = 0
#i_duration = long(duration)
if pkts_list:
for record in pkts_list:
try:
if start_time < 0: # This is the first record encountered
start_time = float(record[0])
#print start_time
this_instance = int(start_time)
#print this_instance
bytes_this_instance = long(record[1])
continue
cur_time = float(record[0])
if this_instance < cur_time and\
cur_time < (this_instance + 1):
bytes_this_instance += long(record[1])
else:
result.append( (this_instance, bytes_this_instance * 8 / 1024) )
this_instance += 1
bytes_this_instance = long(record[1])
except IndexError:
pass
# Append the last record
result.append( (this_instance, bytes_this_instance * 8 / 1024) )
return result
@staticmethod
def cumulative_bytes_received(pkts_list=None):
#print 'Cumulative plot of bytes received'
result = []
start_time = -1 # Anything less than 0
this_instance = 0
bytes_this_instance = 0
if pkts_list:
for record in pkts_list:
try:
if start_time < 0:
start_time = float(record[0])
this_instance = int(start_time)
bytes_this_instance = long(record[1])
continue
cur_time = float(record[0])
bytes_this_instance += long(record[1])
if this_instance < cur_time and\
cur_time < (this_instance + 1):
continue
else:
result.append( (this_instance, ( float(bytes_this_instance / 1024) ) * 8 ) )
this_instance += 1
#print cur_time
except IndexError:
pass
# Append the last record
result.append( (this_instance, ( float(bytes_this_instance / 1024) ) * 8 ) )
re
|
turn result
@staticmethod
## Calculate throughput as total bytes_rcvd upto current instance of time / total duration upto current instance
|
# @param pkts_list An iterator object in the format [(timestamp, size),]
# @return A list in the form [(time_instance, total_bytes),]
def cumulative_throughput(pkts_list=None):
#print 'Current throughput'
result = []
start_time = -1 # Anything less than 0
this_instance = 0
bytes_this_instance = 0
if pkts_list:
for record in pkts_list:
try:
if start_time < 0:
start_time = float(record[0])
this_instance = int(start_time)
bytes_this_instance = long(record[1])
continue
cur_time = float(record[0])
bytes_this_instance += long(record[1])
if this_instance < cur_time and\
cur_time < (this_instance + 1):
continue
else:
result.append( (this_instance, ( float(bytes_this_instance / 1024) / ( this_instance - int(start_time) + 1) ) * 8 ) )
this_instance += 1
except IndexError:
pass
# Append the last record
result.append( (this_instance, ( float(bytes_this_instance / 1024) / ( this_instance - int(start_time) + 1) ) * 8 ) )
return result
## Return the end to end delay for each packet moving between a source and
# destination node, and identified by a flow ID. The delay is computed as
# the difference between sending time of the packet at source node and
# receiving time of the packet at the destination node.
# @param send_pkts_list An iterator object in the format [(seq_num, timestamp)]
# @param rcvd_pkts_list An iterator object in the format [(seq_num, timestamp)]
# @return A list in the form [(seq_num, delay),]
@staticmethod
def end2end_delay(send_pkts_list=None, rcvd_pkts_list=None):
#print 'End to end delay'
send_pkts = {}
rcvd_pkts = {}
for pkt in send_pkts_list:
send_pkts[pkt[0]] = float(pkt[1])
for pkt in rcvd_pkts_list:
rcvd_pkts[pkt[0]] = float(pkt[1])
pkt_delay = []
for seq_num in send_pkts:
if seq_num in rcvd_pkts:
if rcvd_pkts[seq_num] >= send_pkts[seq_num]:
delay = rcvd_pkts[seq_num] - send_pkts[seq_num]
pkt_delay.append( (seq_num, delay) )
# Sort pkt_delay in integer order of seq_num -- otherwise displayed
# graph would be garbage
pkt_delay = [ ( int(e[0]), e[1], ) for e in pkt_delay ]
pkt_delay.sort()
return pkt_delay
# @param send_pkts_list An iterator object in the format [seq_num]
@staticmethod
def packet_retransmissions(send_pkts_list=None):
#print 'Packet retransmissions'
send_pkts = {}
send_pkts_list = [ int(item) for item in send_pkts_list ]
for seq_num in send_pkts_list:
if seq_num in send_pkts:
send_pkts[seq_num] += 1
else:
send_pkts[seq_num] = 0
pkt_retransmits = []
for (seq_num, retransmits) in send_pkts.items():
if retransmits != 0:
|
sysbot/pastedown
|
vendor/pygments/tests/run.py
|
Python
|
mit
| 1,247
| 0.00401
|
# -*- coding: utf-8 -*-
"""
Pygments unit tests
~~~~~~~~~~~~~~~~~~
Usage::
python run.py [testfile ...]
:copyright: Copyright 2006-2012 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import sys, os
if sys.version_info >= (3,):
# copy test suite over to "build/lib" and convert it
print ('Copying and converting sources to build/lib/test...')
from distutils.util import copydir
|
_run_2to3
testroot = os.path.dirname(__file__)
newroot = os.path.join(testroot, '..', 'build/lib/test')
copydir_run_2to3(testroot, newroot)
# make nose believe that we run from the converted dir
os.chdir(newroot)
else:
# only find tests in this directory
os.chdir(os.path.dirname(__file__))
try:
import nose
except ImportError:
print ('nose is required to run the Pygments test suite')
sys.exit(1)
try:
# make sur
|
e the current source is first on sys.path
sys.path.insert(0, '..')
import pygments
except ImportError:
print ('Cannot find Pygments to test: %s' % sys.exc_info()[1])
sys.exit(1)
else:
print ('Pygments %s test suite running (Python %s)...' %
(pygments.__version__, sys.version.split()[0]))
nose.main()
|
synergeticsedx/deployment-wipro
|
openedx/core/djangoapps/cache_toolbox/relation.py
|
Python
|
agpl-3.0
| 3,965
| 0.000252
|
"""
Caching instances via ``related_name``
--------------------------------------
``cache_relation`` adds utility methods to a model to obtain ``related_name``
instances via the cache.
Usage
~~~~~
::
from django.db import models
from django.contrib.auth.models import User
class Foo(models.Model):
user = models.OneToOneField(
User,
primary_key=True,
related_name='foo',
)
name = models.CharField(max_length=20)
cache_relation(User.foo)
::
>>> user = User.objects.get(pk=1)
>>> user.foo_cache # Cache miss - hits the database
<Foo: >
>>> user = User.objects.get(pk=1)
>>> user.foo_cache # Cache hit - no database access
<Foo: >
>>> user = User.objects.get(pk=2)
>>> user.foo # Regular lookup - hits the database
<Foo: >
>>> user.foo_cache # Special-case: Will not hit cache or database.
<Foo: >
Accessing ``user_instance.foo_cache`` (note the "_cache" suffix) will now
obtain the related ``Foo`` instance via the cache. Accessing the original
``user_instance.foo`` attribute will perform the lookup as normal.
Invalidation
~~~~~~~~~~~~
Upon saving (or deleting) the instance, the cache is cleared. For example::
>>> user = User.objects.get(pk=1)
>>> foo = user.foo_cache # (Assume cache hit fro
|
m previous session)
>>> foo.name = "New n
|
ame"
>>> foo.save() # Cache is cleared on save
>>> user = User.objects.get(pk=1)
>>> user.foo_cache # Cache miss.
<Foo: >
Manual invalidation may also be performed using the following methods::
>>> user_instance.foo_cache_clear()
>>> User.foo_cache_clear_fk(user_instance_pk)
Manual invalidation is required if you use ``.update()`` methods which the
``post_save`` and ``post_delete`` hooks cannot intercept.
Support
~~~~~~~
``cache_relation`` currently only works with ``OneToOneField`` fields. Support
for regular ``ForeignKey`` fields is planned.
"""
from django.db.models.signals import post_save, post_delete
from .core import get_instance, delete_instance
def cache_relation(descriptor, timeout=None):
"""
Adds utility methods to a model to obtain related
model instances via a cache.
"""
rel = descriptor.related
related_name = '%s_cache' % rel.field.related_query_name()
@property
def get(self):
"""
Returns the cached value of the related model if found
in the cache. Otherwise gets and caches the related model.
"""
# Always use the cached "real" instance if available
try:
return getattr(self, descriptor.cache_name)
except AttributeError:
pass
# Lookup cached instance
try:
return getattr(self, '_%s_cache' % related_name)
except AttributeError:
pass
instance = get_instance(rel.model, self.pk, timeout)
setattr(self, '_%s_cache' % related_name, instance)
return instance
setattr(rel.parent_model, related_name, get)
# Clearing cache
def clear(self):
"""
Clears the cache of all related models of self.
"""
delete_instance(rel.model, self)
@classmethod
def clear_pk(cls, *instances_or_pk): # pylint: disable=unused-argument
"""
Clears the cache of all related models of
the provided instances_or_pk.
"""
delete_instance(rel.model, *instances_or_pk)
def clear_cache(sender, instance, *args, **kwargs): # pylint: disable=unused-argument
"""
Clears the cache of all related models of the
given instance.
"""
delete_instance(rel.model, instance)
setattr(rel.parent_model, '%s_clear' % related_name, clear)
setattr(rel.parent_model, '%s_clear_pk' % related_name, clear_pk)
post_save.connect(clear_cache, sender=rel.model, weak=False)
post_delete.connect(clear_cache, sender=rel.model, weak=False)
|
indradhanush/Instamojo-Clone
|
clone/migrations/0002_auto__add_field_product_date_added.py
|
Python
|
gpl-3.0
| 4,593
| 0.00762
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Product.date_added'
db.add_column(u'clone_product', 'date_added',
self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, default=datetime.datetime(2014, 8, 3, 0, 0), blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Product.date_added'
db.delete_column(u'clone_product', 'date_added')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fi
|
elds.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [
|
], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'clone.product': {
'Meta': {'object_name': 'Product'},
'base_price': ('django.db.models.fields.FloatField', [], {}),
'currency': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'username': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['clone']
|
jabbalaci/PrimCom
|
data/python/my_except.py
|
Python
|
gpl-2.0
| 660
| 0.007576
|
# found at http://stackoverflow.com/questions/855759/python-try-else
# The statements in the else block are executed if execution falls off
# the bottom of the try, i.e. if there was no exception.
try:
operation_that_can_throw_ioerror()
except IOError:
handle_the_exception_somehow()
else:
# we don't want to catch the IOError if it's raised
another_operation_that_can_throw_ioerror()
finally:
something_we_always_need_to_do()
# The else lets you make s
|
ure:
#
# * another_operation_that_can_throw_ioerror() is only run if there's no exception,
# * it's run before the finally block, and
# * any I
|
OErrors it raises aren't caught here
|
kobejean/tensorflow
|
tensorflow/contrib/distributions/python/ops/vector_student_t.py
|
Python
|
apache-2.0
| 10,470
| 0.001242
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Vector Student's t distribution classes."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.distributions.python.ops import bijectors
from tensorflow.contrib.distributions.python.ops import distribution_util
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops.distributions import student_t
from tensorflow.python.ops.distributions import transformed_distribution
from tensorflow.python.util import deprecation
class _VectorStudentT(transformed_distribution.TransformedDistribution):
"""A vector version of Student's t-distribution on `R^k`.
#### Mathematical details
The probability density function (pdf) is,
```none
pdf(x; df, mu, Sigma) = (1 + ||y||**2 / df)**(-0.5 (df + 1)) / Z
where,
y = inv(Sigma) (x - mu)
Z = abs(det(Sigma)) ( sqrt(df pi) Gamma(0.5 df) / Gamma(0.5 (df + 1)) )**k
```
where:
* `loc = mu`; a vector in `R^k`,
* `scale = Sigma`; a lower-triangular matrix in `R^{k x k}`,
* `Z` denotes the normalization constant, and,
* `Gamma` is the [gamma function](
https://en.wikipedia.org/wiki/Gamma_function), and,
* `||y||**2` denotes the [squared Euclidean norm](
https://en.wikipedia.org/wiki/Norm_(mathematics)#Euclidean_norm) of `y`.
The VectorStudentT distribution is a member of the [location-scale family](
https://en.wikipedia.org/wiki/Location-scale_family), i.e., it can be
constructed as,
```none
X ~ StudentT(df, loc=0, scale=1)
Y = loc + scale * X
```
Notice that the `scale` matrix has semantics closer to std. deviation than
covariance (but it is not std. deviation).
This distribution is an Affine transformation of iid
[Student's t-distributions](
https://en.wikipedia.org/wiki/Student%27s_t-distribution)
and should not be confused with the [Multivariate Student's t-distribution](
https://en.wikipedia.org/wiki/Multivariate_t-distribution). The
traditional Multivariate Student's t-distribution is type of
[elliptical distribution](
https://en.wikipedia.org/wiki/Elliptical_distribution); it has PDF:
```none
pdf(x; df, mu, Sigma) = (1 + ||y||**2 / df)**(-0.5 (df + k)) / Z
where,
y = inv(Sigma) (x - mu)
Z = abs(det(Sigma)) sqrt(df pi)**k Gamma(0.5 df) / Gamma(0.5 (df + k))
```
Notice that the Multivariate Student's t-distribution uses `k` where the
Vector Student's t-distribution has a `1`. Conversely the Vector version has a
broader application of the power-`k` in the normalization constant.
#### Examples
A single instance of a "Vector Student's t-distribution" is defined by a mean
vector of length `k` and a scale matrix of shape `k x k`.
Extra leading dimensions, if provided, allow for batches.
```python
import tensorflow_probability as tfp
tfd = tfp.distributions
# Initialize a single 3-variate vector Student's t-distribution.
mu = [1., 2, 3]
chol = [[1., 0, 0.],
[1, 3, 0],
[1, 2, 3]]
vt = tfd.VectorStudentT(df=2, loc=mu, scale_tril=chol)
# Evaluate this on an observation in R^3, returning a scalar.
vt.prob([-1., 0, 1])
# Initialize a batch of two 3-variate vector Student's t-distributions.
mu = [[1., 2, 3],
[11, 22, 33]]
chol = ... # shape 2 x 3 x 3, lower triangular, positive diagonal.
vt = tfd.VectorStudentT(loc=mu, scale_tril=chol)
# Evaluate this on a two observations, each in R^3, returning a length two
# tensor.
x = [[-1, 0, 1],
[-11, 0, 11]]
vt.prob(x)
```
For more examples of how to construct the `scale` matrix, see the
`tf.contrib.distributions.bijectors.Affine` docstring.
"""
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def __init__(self,
df,
loc=None,
scale_identity_multiplier=None,
scale_diag=None,
scale_tril=None,
scale_perturb_factor=None,
scale_perturb_diag=None,
validate_args=False,
allow_nan_stats=True,
name="VectorStudentT"):
"""Instantiates the vector Student's t-distributions on `R^k`.
The `batch_shape` is the broadcast between `df.batch_shape` and
`Affine.batch_shape` where `Affine` is constructed from `loc` and
`scale_*` arguments.
The `event_shape` is the event shape of `Affine.event_shape`.
Args:
df: Floating-point `Tensor`. The degrees of freedom of the
distribution(s). `df` must contain only positive values. Must
|
be
scalar if `loc`, `scale_*` imply non-scalar batch_shape or must have the
same `batch_shape` implied by `loc`, `scale_*`.
loc: Floating-point `Tensor`. If this is set to `None`, no `loc` is
applied.
scale_identity_multiplier: floating point rank 0 `Tensor` representing a
scaling done to the identity matrix. When `scale_identity_multiplier =
scale_diag=scale_tril = None` then `scale += IdentityMatrix`
|
. Otherwise
no scaled-identity-matrix is added to `scale`.
scale_diag: Floating-point `Tensor` representing the diagonal matrix.
`scale_diag` has shape [N1, N2, ..., k], which represents a k x k
diagonal matrix. When `None` no diagonal term is added to `scale`.
scale_tril: Floating-point `Tensor` representing the diagonal matrix.
`scale_diag` has shape [N1, N2, ..., k, k], which represents a k x k
lower triangular matrix. When `None` no `scale_tril` term is added to
`scale`. The upper triangular elements above the diagonal are ignored.
scale_perturb_factor: Floating-point `Tensor` representing factor matrix
with last two dimensions of shape `(k, r)`. When `None`, no rank-r
update is added to `scale`.
scale_perturb_diag: Floating-point `Tensor` representing the diagonal
matrix. `scale_perturb_diag` has shape [N1, N2, ..., r], which
represents an r x r Diagonal matrix. When `None` low rank updates will
take the form `scale_perturb_factor * scale_perturb_factor.T`.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`,
statistics (e.g., mean, mode, variance) use the value "`NaN`" to
indicate the result is undefined. When `False`, an exception is raised
if one or more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
"""
parameters = dict(locals())
graph_parents = [df, loc, scale_identity_multiplier, scale_diag,
scale_tril, scale_perturb_factor, scale_perturb_diag]
with ops.name_scope(name) as name:
with ops.name_scope("init", values=graph_parents):
# The shape of the _VectorStudentT distribution is governed by the
# relationship between df
|
NoMod-Programming/PyRobotC
|
pyRobotC.py
|
Python
|
mit
| 19,127
| 0.023997
|
import ast
import traceback
import os
import sys
userFunctions = {}
renames = ['vex.pragma','vex.motor','vex.slaveMotors','vex.motorReversed']
classNames = []
indent = ' '
sameLineBraces = True
compiled = {}
def module_rename(aNode):
if aNode.func.print_c() == 'vex.pragma':
asC = '#pragma '
useComma = False
pragmaDirective = aNode.args.pop(0)
asC += pragmaDirective.s
if aNode.args:
asC += '('
for arg in aNode.args:
if useComma:
asC += ', '
else:
useComma = True
asC += arg.print_c()
asC += ')'
asC += '\n'
return asC
elif aNode.func.print_c() == 'vex.motor':
asC = 'motor[' + aNode.args[0].print_c()
asC += '] = ' + aNode.args[1].print_c()
return asC
elif aNode.func.print_c() == 'vex.slaveMotors':
masterMotor = aNode.args.pop(0).print_c()
asC = ''
for slave in aNode.args:
asC += 'slaveMotor(' + slave.print_c() + ', ' + masterMotor + ');\n'
return asC[:-2]
elif aNode.func.print_c() == 'vex.motorReversed':
asC = 'bMotorReflected[' + aNode.args[0].print_c()
asC += '] = ' + aNode.args[1].print_c()
return asC
return 'Unknown function. This should not happen'
def escape_string(s, unicode = False, max_length = 200):
ret = []
# Try to split on whitespace, not in the middle of a word.
split_at_space_pos = max_length - 10
if split_at_space_pos < 10:
split_at_space_pos = None
position = 0
if unicode:
position += 1
ret.append('L')
ret.append('"')
position += 1
for c in s:
newline = False
if c == "\n":
to_add = r"\n"
newline = True
elif ord(c) < 32 or 0x80 <= ord(c) <= 0xff:
to_add = r"\x{:02X}".format(ord(c))
elif ord(c) > 0xff:
if not unicode:
raise ValueError("string contains unicode character but unicode=False")
to_add = r"\u{:04X}".format(ord(c))
elif r'\"'.find(c) != -1:
to_add = r"\{}".format(c)
else:
to_add = c
ret.append(to_add)
position += len(to_add)
if newline:
position = 0
if split_at_space_pos is not None and position >= split_at_space_pos and " \t".find(c) != -1:
ret.append("\\\n")
position = 0
elif position >= max_length:
ret.append("\\\n")
position = 0
ret.append('"')
return "".join(ret)
class C_Module(ast.Module):
def prepare(self):
pass
def print_c(self):
asC = ''
for node in self.body:
try:
asC += node.print_c()
except Exception as e:
print(traceback.format_exc())
print("Current code:")
print(asC)
return asC
class C_Bytes(ast.Bytes):
def prepare(self):
pass
def print_c(self):
return escape_string(self.s.decode('utf-8'),True)
class C_Str(ast.Str):
def prepare(self):
pass
def print_c(self):
return escape_string(self.s)
class C_Num(ast.Num):
def prepare(self):
pass
def print_c(self):
return str(self.n)
class C_FunctionDef(ast.FunctionDef):
def prepare(self):
"""Prepare for writing. Take note of return types, class names, etc..."""
if self.returns:
userFunctions[self.name] = self.returns.print_c()
def print_c(self):
asC = '\n'
if ast.get_docstring(self):
asC += '/*\n'
asC += ast.get_docstring(self)
self.body.pop(0)
asC += '\n*/\n'
asC += self.returns.id + ' ' + self.name + '('
isFirst = True
for i, argNode in enumerate(self.args.args):
arg = argNode.arg
try:
argType = argNode.annotation.print_c()
except:
argType = argNode.annotation
if isFirst:
isFirst = False
else:
asC += ', '
asC += argType + ' ' + arg
if i >= self.args.minArgs:
asC += ' = ' + (self.args.defaults[i - self.args.minArgs]).print_c()
if sameLineBraces:
asC += ') {\n'
else:
asC += ')\n{\n'
for childNode in self.body:
try:
unindented = childNode.print_c()
unindented = '\n'.join([indent + x for x in unindented.split('\n')])
if not unindented.endswith('}'):
unindented += ';'
unindented += '\n'
asC += unindented
except Exception as e:
print(traceback
|
.format_exc())
print(ast.dump(childNode))
return asC
asC += '}\n'
return asC
class C_arguments(ast.arguments):
def prepare(self):
self.minArgs = len(self.args) - len(self.defaults)
self.maxArgs = len(self.args)
def print_c(self):
retu
|
rn self
class C_Name(ast.Name):
def prepare(self):
pass
def print_c(self):
if self.id == 'True':
return 'true'
elif self.id == 'False':
return 'false'
elif self.id == 'None':
return '0'
return self.id
if "NameConstant" in ast.__dict__:
class C_NameConstant(ast.NameConstant):
def prepare(self):
pass
def print_c(self):
if self.value == True:
# True
return 'true'
elif self.value == False:
# False
return 'false'
else:
return '0'
class C_Expr(ast.Expr):
def prepare(self):
pass
def print_c(self):
return self.value.print_c()
class C_UnaryOp(ast.UnaryOp):
def prepare(self):
pass
def print_c(self):
return self.op.print_c() + self.operand.print_c()
class C_UAdd(ast.UAdd):
def prepare(self):
pass
def print_c(self):
return '+'
class C_USub(ast.USub):
def prepare(self):
pass
def print_c(self):
return '-'
class C_Not(ast.Not):
def prepare(self):
pass
def print_c(self):
return '!'
class C_Invert(ast.Invert):
def prepare(self):
pass
def print_c(self):
return '~'
class C_BinOp(ast.BinOp):
def prepare(self):
pass
def print_c(self):
return '({left} {op} {right})'.format(
left = self.left.print_c(),
op = self.op.print_c(),
right = self.right.print_c())
class C_Add(ast.Add):
def prepare(self):
pass
def print_c(self):
return '+'
class C_Sub(ast.Sub):
def prepare(self):
pass
def print_c(self):
return '-'
class C_Mult(ast.Mult):
def prepare(self):
pass
def print_c(self):
return '*'
class C_Div(ast.Div):
def prepare(self):
pass
def print_c(self):
return '/'
class C_Mod(ast.Mod):
def prepare(self):
pass
def print_c(self):
return '%'
class C_LShift(ast.LShift):
def prepare(self):
pass
def print_c(self):
return '<<'
class C_RShift(ast.RShift):
def prepare(self):
pass
def print_c(self):
return '>>'
class C_BitOr(ast.BitOr):
def prepare(self):
pass
def print_c(self):
return '|'
class C_BitXor(ast.BitXor):
def prepare(self):
pass
def print_c(self):
return '^'
class C_BitAnd(ast.BitAnd):
def prepare(self):
pass
def print_c(self):
return '&'
class C_BoolOp(ast.BoolOp):
def prepare(self):
pass
def print_c(self):
asC = '(' + self.values.pop(0).print_c()
for value in self.values:
asC += ' ' + self.op.print_c() + ' '
asC += value.print_c()
return asC + ')'
class C_And(ast.And):
def prepare(self):
pass
def print_c(self):
return '&&'
class C_Or(ast.Or):
def prepare(self):
pass
def print_c(self):
return '||'
class C_Compare(ast.Compare):
def prepare(self):
pass
def print_c(self):
asC = ''
self.comparators.insert(0,self.left)
addAnd = False
for i,op in enumerate(self.ops):
if addAnd:
asC += ' && '
else:
addAnd = True
asC += '(' + self.comparators[i].print_c() + ' '
asC += op.print_c()
asC += ' ' + self.comparators[i + 1].print_c() + ')'
return asC
class C_Eq(ast.Eq):
def prepare(self):
pass
def print_c(self):
return '=='
class C_NotEq(ast.NotEq):
def prepare(self):
pass
def print_c(self):
return '!='
class C_Lt(ast.Lt):
def prepare(self):
pass
def print_c(self):
return '<'
class C_LtE(ast.
|
stoewer/nixpy
|
nixio/pycore/h5group.py
|
Python
|
bsd-3-clause
| 10,059
| 0
|
# Copyright (c) 2016, German Neuroinformatics Node (G-Node)
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted under the terms of the BSD License. See
# LICENSE file in the root of the Project.
from __future__ import (absolute_import, division, print_function)
import h5py
import numpy as np
from .h5dataset import H5DataSet
from ..value import DataType
from .block import Block
from .section import Section
from . import util
from .exceptions import InvalidEntity
class H5Group(object):
def __init__(self, parent, name, create=False):
self._parent = parent
self.name = name
self.group = None
if create or name in self._parent:
self._create_h5obj()
def _create_h5obj(self):
if self.name in self._parent:
self.group = self._parent[self.name]
else:
gcpl = h5py.h5p.create(h5py.h5p.GROUP_CREATE)
flags = h5py.h5p.CRT_ORDER_TRACKED | h5py.h5p.CRT_ORDER_INDEXED
gcpl.set_link_creation_order(flags)
name = self.name.encode("utf-8")
gid = h5py.h5g.create(self._parent.id, name, gcpl=gcpl)
self.group = h5py.Group(gid)
def create_link(self, target, name):
self._create_h5obj()
if name in self.group:
del self.group[name]
self.group[name] = target._h5group.group
@classmethod
def create_from_h5obj(cls, h5obj):
parent = h5obj.parent
name = h5obj.name.split("/")[-1]
if isinstance(h5obj, h5py.Group):
return cls(parent, name)
elif isinstance(h5obj, h5py.Dataset):
return H5DataSet(parent, name)
else:
raise ValueError("Invalid object: "
"{} must be either h5py.Group of h5py.Dataset.")
def open_group(self, name, create=False):
"""
Returns a new H5Group with the given name contained in the current
group. If the current group does not exist in the file,
it is automatically created.
:param name: the name of the group
:param create: creates the child group in the file if it does not exist
:return: a new H5Group object
"""
self._create_h5obj()
return H5Group(self.group, name, create)
def create_dataset(self, name, shape, dtype):
"""
Creates a dataset object under the current group with a given name,
shape, and type.
:param name: the name of the dataset
:param shape: tuple representing the shape of the dataset
:param dtype: the type of the data for this dataset (DataType)
:return: a new H5DataSet object
"""
self._create_h5obj()
return H5DataSet(self.group, name, dtype, shape)
def get_dataset(self, name):
"""
Returns a contained H5DataSet object.
:param name: name of the dataset
:return: H5DataSet object
"""
notfound = KeyError("No DataSet named {} found.")
if self.group is None:
raise notfound
if name in self.group:
dset = self.group[name]
return H5DataSet.create_from_h5obj(dset)
else:
raise notfound
def write_data(self, name, data, dtype=None):
"""
Writes the data to a Dataset contained in the group with the
given name. Creates the Dataset if necessary.
:param name: name of the Dataset object
:param data: the data to write
:param dtype: optionally specify the data type, otherwise it will be
automatically determined by the data
"""
shape = np.shape(data)
if self.has_data(name):
dset = self.get_dataset(name)
dset.shape = shape
else:
if dtype is None:
dtype = DataType.get_dtype(data[0])
dset = self.create_dataset(name, shape, dtype)
dset.write_data(data)
def get_data(self, name):
"""
Returns the data contained in the dataset identified by 'name', or an
empty list if a dataset of that name does not exist in the Group.
:param name: The name of the dataset
:return: The data contained in the dataset as a numpy array or None
"""
if name not in self.group:
return []
dset = self.group[name]
# TODO: Error if dset is Group?
return dset[:]
def has_data(self, name):
"""
Return True if the Group contains a Dataset object with the given name.
:param name: name of Dataset
:return: True if Dataset exists in Group, False if it does not exist,
or exists and is not a Dataset
"""
if self.group.get(name, getclass=True) == h5py.Dataset:
return True
else:
return False
def has_by_id(self, id_or_name):
if not self.group:
return False
if util.is_uuid(id_or_name):
for item in self:
if item.get_attr("entity_id") == id_or_name:
return True
else:
return False
else:
return id_or_name in self.group
def get_by_id_or_name(self, id_or_name):
if util.is_uuid(id_or_name):
return self.get_by_id(id_or_name)
else:
return self.get_by_name(id_or_name)
def get_by_name(self, name):
if self.group and name in self.group:
return self.create_from_h5obj(self.group[name])
else:
raise ValueError("No item with name {} found in {}".format(
name, self.group.name
))
def get_by_id(self, id_):
if self.group:
for item in self:
if item.get_attr("entity_id") == id_:
return item
raise ValueError("No item with ID {} found in {}".format(
id_, self.name
))
def get_by_pos(self, pos):
if not self.group:
raise ValueError
# Using low level interface to specify iteration order
name, _ = self.group.id.links.iterate(lambda n: n,
idx_type=h5py.h5.INDEX_CRT_ORDER,
order=h5py.h5.ITER_INC,
idx=pos)
return self.get_by_name(name)
def delete(self, id_or_name):
if util.is_uuid(id_or_name):
name = self.get_by_id_or_name(id_or_name).name
else:
name = id_or_name
try:
del self.group[n
|
ame]
except
|
Exception:
raise ValueError("Error deleting {} from {}".format(name,
self.name))
# Delete if empty and non-root container
groupdepth = len(self.group.name.split("/")) - 1
if not len(self.group) and groupdepth > 1:
del self.parent.group[self.name]
# del self.group
self.group = None
def set_attr(self, name, value):
self._create_h5obj()
if value is None:
if name in self.group.attrs:
del self.group.attrs[name]
else:
self.group.attrs[name] = value
def get_attr(self, name):
if self.group is None:
return None
attr = self.group.attrs.get(name)
if isinstance(attr, bytes):
attr = attr.decode()
return attr
def find_children(self, filtr=None, limit=None):
result = []
start_depth = len(self.group.name.split("/"))
def match(name, obj):
curdepth = name.split("/")
if limit is not None and curdepth == start_depth + limit:
return None
h5grp = H5Group.create_from_h5obj(obj)
if filtr(h5grp):
result.append(h5grp)
self.group.visititems(match)
return result
@property
def file(self):
"""
An H5Group object which represents the file root.
:return:
|
abahdanovich/distorm
|
disOps/disOps.py
|
Python
|
gpl-3.0
| 22,792
| 0.031985
|
#
# disOps.py v 1.0.0
#
# Copyright (C) 2011 Gil Dabah, http://ragestorm.net/disops/
#
# disOps is a part of the diStorm project, but can be used for anything.
# The generated output is tightly coupled with diStorm data structures which can be found at instructions.h.
# The code in diStorm that actually walks these structures is found at instructions.c.
#
# Since the DB was built purposely for diStorm, there are some
# Known issues:
# 1. ARPL/MOVSXD information in DB is stored as ARPL.
# Since ARPL and MOVSXD share the same opcode this DB doesn't support this mix.
# Therefore, if you use this DB for x64 instructions, you have to take care of this one.
#
# 2. SSE CMP pseudo instructions have the DEFAULT suffix letters of its type in the second mnemonic,
# the third operand, Imm8 which is respoinsible for determining the suffix,
# doesn't appear in the operands list but rather an InstFlag.PSEUDO_OPCODE implies this behavior.
#
# 3. The WAIT instruction is a bit problematic from a static DB point of view, read the comments in init_FPU in x86sets.py.
#
# 4. The OpLen.OL_33, [0x66, 0x0f, 0x78, 0x0], ["EXTRQ"] is very problematic as well.
# Since there's another 8 group table after the 0x78 byte in this case, but it's already a Prefixed table.
# Therefore, we will handle it as a normal 0x78 instruction with a mandatory prefix of 0x66.
# But the REG (=0) field of the ModRM byte will be checked in the decoder by a flag that states so.
# Otherwise, another normal table after Prefixed table really complicates matters,
# and doesn't worth the hassle for one exceptional instruction.
#
# 5. The NOP (0x90) instruction is really set in the DB as xchg rAX, rAX. Rather than true NOP, this is because of x64 behavior.
# Hence, it will be decided in runtime when decoding streams according to the mode.
#
# 6. The PAUSE (0xf3, 0x90) instruction isn't found in the DB, it will be returned directly by diStorm.
# This is because the 0xf3 in this case is not a mandatory prefix, and we don't want it to be built as part of a prefixed table.
#
# 7. The IO String instructions don't have explicit form and they don't support segments.
# It's up to diStorm to decide what to do with the operands and which segment is default and overrided.
#
# To maximize the usage of this DB, one should learn the documentation of diStorm regarding the InstFlag and Operands Types.
#
import time
import x86sets
import x86db
from x86header import *
FLAGS_BASE_INDEX = 5 # Used to reserve the first few flags in the table for manua
|
l defined instructions in x86defs.c
mnemonicsIds = {} # mnemonic : offset to mnemonics table of strings.
idsCounter = len("undefined") + 2 # Starts immediately after this one.
# Support SSE pseudo compare instructions. We wil
|
l have to add them manually.
def FixPseudo(mnems):
return [mnems[0] + i + mnems[1] for i in ["EQ", "LT", "LE", "UNORD", "NEQ", "NLT", "NLE", "ORD"]]
# Support AVX pseudo compare instructions. We will have to add them manually.
def FixPseudo2(mnems):
return [mnems[0] + i + mnems[1] for i in ["EQ", "LT", "LE", "UNORD", "NEQ", "NLT", "NLE", "ORD",
"EQ_UQ", "NGE", "NGT", "FLASE", "EQ_OQ", "GE", "GT", "TRUE",
"EQ_OS", "LT_OQ", "LE_OQ", "UNORD_S", "NEQ_US", "NLT_UQ", "NLE_UQ", "ORD_S",
"EQ_US"]]
def TranslateMnemonics(pseudoClassType, mnems):
global mnemonicsIds
global idsCounter
l = []
if pseudoClassType == ISetClass.SSE or pseudoClassType == ISetClass.SSE2:
mnems = FixPseudo(mnems)
elif pseudoClassType == ISetClass.AVX:
mnems = FixPseudo(mnems)
for i in mnems:
if len(i) == 0:
continue
if mnemonicsIds.has_key(i):
l.append(str(mnemonicsIds[i]))
else:
mnemonicsIds[i] = idsCounter
l.append(str(idsCounter))
idsCounter += len(i) + 2 # For len/null chars.
if idsCounter > 2**16:
raise "opcodeId is too big to fit into uint16_t"
return l
# All VIAL and diStorm3 code are based on the order of this list, do NOT edit!
REGISTERS = [
"RAX", "RCX", "RDX", "RBX", "RSP", "RBP", "RSI", "RDI", "R8", "R9", "R10", "R11", "R12", "R13", "R14", "R15", "XX",
"EAX", "ECX", "EDX", "EBX", "ESP", "EBP", "ESI", "EDI", "R8D", "R9D", "R10D", "R11D", "R12D", "R13D", "R14D", "R15D", "XX",
"AX", "CX", "DX", "BX", "SP", "BP", "SI", "DI", "R8W", "R9W", "R10W", "R11W", "R12W", "R13W", "R14W", "R15W", "XX",
"AL", "CL", "DL", "BL", "AH", "CH", "DH", "BH", "R8B", "R9B", "R10B", "R11B", "R12B", "R13B", "R14B", "R15B", "XX",
"SPL", "BPL", "SIL", "DIL", "XX",
"ES", "CS", "SS", "DS", "FS", "GS", "XX",
"RIP", "XX",
"ST0", "ST1", "ST2", "ST3", "ST4", "ST5", "ST6", "ST7", "XX",
"MM0", "MM1", "MM2", "MM3", "MM4", "MM5", "MM6", "MM7", "XX",
"XMM0", "XMM1", "XMM2", "XMM3", "XMM4", "XMM5", "XMM6", "XMM7", "XMM8", "XMM9", "XMM10", "XMM11", "XMM12", "XMM13", "XMM14", "XMM15", "XX",
"YMM0", "YMM1", "YMM2", "YMM3", "YMM4", "YMM5", "YMM6", "YMM7", "YMM8", "YMM9", "YMM10", "YMM11", "YMM12", "YMM13", "YMM14", "YMM15", "XX",
"CR0", "", "CR2", "CR3", "CR4", "", "", "", "CR8", "XX",
"DR0", "DR1", "DR2", "DR3", "", "", "DR6", "DR7"]
def DumpMnemonics():
global mnemonicsIds
# Add the hardcoded instruction which are not found in the DB.
# Warning: This should be updated synchronously with the code in diStorm.
map(lambda x: TranslateMnemonics(None, [x]), ["WAIT", "MOVSXD", "PAUSE"])
f = open("defs.txt", "w")
f.write("typedef enum {\n\tI_UNDEFINED = 0, ")
pos = 0
l2 = sorted(mnemonicsIds.keys())
for i in l2:
s = "I_%s = %d" % (i.replace(" ", "_").replace(",", ""), mnemonicsIds[i])
if i != l2[-1]:
s += ","
pos += len(s)
if pos >= 70:
s += "\n\t"
pos = 0
elif i != l2[-1]:
s += " "
f.write(s)
f.write("\n} _InstructionType;\n\n")
regsText = "const _WRegister _REGISTERS[] = {\n\t"
regsEnum = "typedef enum {\n\t"
old = "*"
unused = 0
for i in REGISTERS:
if old != "*":
if old == "XX":
regsText += "\n\t"
regsEnum += "\n\t"
old = i
continue
else:
regsText += "{%d, \"%s\"}," % (len(old), old)
if len(old):
regsEnum += "R_%s," % old
else:
regsEnum += "R_UNUSED%d," % unused
unused += 1
if i != "XX":
regsText += " "
regsEnum += " "
old = i
regsText += "{%d, \"%s\"}\n};\n" % (len(old), old)
regsEnum += "R_" + old + "\n} _RegisterType;\n"
f.write(regsEnum + "\n")
s = "const unsigned char* _MNEMONICS = \n\"\\x09\" \"UNDEFINED\\0\" "
l = zip(mnemonicsIds.keys(), mnemonicsIds.values())
l.sort(lambda x, y: x[1] - y[1])
for i in l:
s += "\"\\x%02x\" \"%s\\0\" " % (len(i[0]), i[0])
if len(s) - s.rfind("\n") >= 76:
s += "\\\n"
s = s[:-1] + ";\n\n" # Ignore last space.
f.write(s)
f.write(regsText + "\n")
f.close()
# Used for Python dictionary of opcodeIds-->mnemonics.
s = "\n"
for i in mnemonicsIds:
#s += "0x%x: \"%s\", " % (mnemonicsIds[i], i) # python
s += "%s (0x%x), " % (i.replace(" ", "_").replace(",", ""), mnemonicsIds[i]) # java
if len(s) - s.rfind("\n") >= 76:
s = s[:-1] + "\n"
#print s
O_NONE = 0
# REG standalone
O_REG = 1
# IMM standalone
O_IMM = 2
# IMM_1 standalone
O_IMM_1 = 4
# IMM_2 standalone
O_IMM_2 = 5
# DISP standlone
O_DISP = 3
# MEM uses DISP
O_MEM = 3
# PC uses IMM
O_PC = 2
# PTR uses IMM
O_PTR = 2
_OPT2T = {OperandType.NONE : O_NONE,
OperandType.IMM8 : O_IMM,
OperandType.IMM16 : O_IMM,
OperandType.IMM_FULL : O_IMM,
OperandType.IMM32 : O_IMM,
OperandType.SEIMM8 : O_IMM,
OperandType.IMM16_1 : O_IMM_1,
OperandType.IMM8_1 : O_IMM_1,
OperandType.IMM8_2 : O_IMM_2,
OperandType.REG8 : O_REG,
OperandType.REG16 : O_REG,
OperandType.REG_FULL : O_REG,
OperandType.REG32 : O_REG,
OperandType.REG32_64 : O_REG,
OperandType.FREG32_64_RM : O_REG,
OperandType.RM8 : O_MEM,
OperandType.RM16 : O_MEM,
OperandType.RM_FULL : O_MEM,
OperandType.RM32_64 : O_MEM,
OperandType.RM16_32 : O_MEM,
OperandType.FPUM16 : O_MEM,
OperandType.FPUM32 : O_MEM,
OperandType.FPUM64 : O_MEM,
OperandType.FPUM80 : O_MEM,
OperandType.R32_M
|
secgroup/MTFGatheRing
|
code/web.py
|
Python
|
mit
| 7,006
| 0.007708
|
#!/usr/bin/env python3
import time
import random
import socket
from flask import Flask, render_template, redirect, url_for, request, jsonify
import config
log = None
# classes
class Agent():
def __init__(self, ip, cw=True, node=None, state='initial'):
self.ip = ip
self.cw = cw
self.state = state
self.node = node
def __repr__(self):
return 'Agent: ip {}, direction CW: {}, state: {}, node: {}'.format(self.ip, self.cw, self.state, self.node)
class Node():
def __init__(self, label):
assert isinstance(label, int), 'Node constructor accepts numeric label only'
self.label = label
# list of agent ips in the current node
self.agents = []
def add_agent(self, agent_ip):
# add an agent ip to the list of agents in the current node
self.agents.append(agent_ip)
def __repr__(self):
return '<Node {}: [{}]>'.format(self.label, ' | '.join(str(app.agents[ip]) for ip in self.agents))
class Ring():
def __init__(self, n_nodes):
self._nodes = [Node(i) for i in range(n_nodes)]
self.n_nodes = n_nodes
def get_node(self, label):
return self._nodes[label]
def next(self, agent):
"""Return next node."""
i = 1 if agent.cw else -1
return self._nodes[(agent.node+i) % self.n_nodes]
def prev(self, agent):
"""Return prev node."""
i = -1 if agent.cw else 1
return self._nodes[(agent.node+i) % self.n_nodes]
def blocked(self, agent):
"""Check if the next node is blocked."""
next_node = self.next(agent)
if agent.ip == app.malicious_ip:
return len(next_node.agents) > 0
else:
return app.malicious_ip in next_node.agents
def random_place_agents(self):
"""Randomly place agents in the ring."""
#a = app.agents[app.agents_ips[0]]
#a.node = 1
#self.get_node(1).add_agent(a.ip)
#a.cw = False
#a = app.agents[app.agents_ips[1]]
#a.node = 2
#self.get_node(2).add_agent(a.ip)
#a.cw = False
#a = app.agents[app.agents_ips[2]]
#a.node = 4
#self.get_node(4).add_agent(a.ip)
#a.cw = True
#a = app.agents[app.malicious_ip]
#a.node = 6
#self.get_node(6).add_agent(a.ip)
#a.cw = True
# True = clockwise
# False = counterclockwise
a = app.agents[app.agents_ips[0]]
a.node = 3
self.get_node(3).add_agent(a.ip)
a.cw = False
a = app.agents[app.agents_ips[1]]
a.node = 6
self.get_node(6).add_agent(a.ip)
a.cw = False
a = app.agents[app.agents_ips[2]]
a.node = 5
self.get_node(5).add_agent(a.ip)
a.cw = True
a = app.agents[app.malicious_ip]
a.node = 1
self.get_node(1).add_agent(a.ip)
a.cw = False
return
# at most 1 agent per node, randomize direction in case of unoriented ring
for agent, node in zip(app.agents.values(), random.sample(self._nodes, len(app.agents.keys()))):
agent.cw = True if config.oriented else random.choice([True, False])
agent.node = node.label
self.get_node(node.label).add_agent(agent.ip)
def dump(self):
ring = dict()
for node in self._nodes:
ring[str(node.label)] = [(app.agents[a].ip, str(app.agents[a].cw), app.agents[a].state, app.agents[a].node) for a in node.agents]
return ring
def __repr__(self):
return ', '.join(str(node) for node in self._nodes)
class MTFGRServer(Flask):
'''Wrapper around the Flask class used to store additional information.'''
def __init__(self, *args, **kwargs):
super(MTFGRServer, self).__init__(*args, **kwargs)
self.ring = Ring(config.n_nodes)
self.agents_ips = config.agents_ips
self.agents = dict()
self.malicious_ip = config.malicious_ip
self.oriented = config.oriented
self.started = False
# instance of the web application
app = MTFGRServer(__name__)
# auxiliary functions
def _reset():
"""Reset the global variables by parsing again the config file."""
import config
global log
app.ring = Ring(config.n_nodes)
app.agents = {ip: Agent(ip) for ip in config.agents_ips}
app.malicious_ip = config.malicious_ip
app.agents[app.malicious_ip] = Agent(app.malicious_ip, state='malicious')
app.oriented = config.oriented
app.started = False
app.ring.random_place_agents()
log = open('/tmp/ev3.log', 'a')
log.write('\n\nIIIIIIIIIINNNNNNNNNIIIIIIIIIIITTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTT\\n\n')
# views
def _communicate_start():
"""Instruct each bot to start."""
port = 31337
for ip in app.agents_ips[::-1] + [app.malicious_ip]:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((ip, port))
# s.sendall(b'Go!\n')
s.close()
@app.route('/start')
def start():
app.started = True
try:
_communicate_start()
except Exception:
pass
return redirect(url_for('index'))
@app.route('/reset')
def reset():
_reset()
return redirect(url_for('index'))
@app.route('/status')
def global_status():
"""Get the whole ring status."""
return jsonify(**app.ring.dump())
@app.route('/get/<agent_ip>')
def get_status(agent_ip):
"""Get the list of agents in the current node."""
agent = app.agents[agent_ip]
# aggiungere blocked
return jsonify(agents=[app.agents[ip].state for ip in app.ring.get_node(agent.node).agents if ip != agent_ip],
blocked=app.ring.blocked(agent))
@app.route('/set/<agent_ip>', methods=['GET'])
def set_status(agent_ip):
global log
turned = request.args.get('turned') == '1'
state = request.args.get('state')
stopped = request.args.get('stopped') == '1'
# logging
sss = '\n\n[Request] {} - ip: {}, turned: {}, state: {}, stopped: {}\n'.format(time.time(), agent_ip, turned, state, stopped)
log.write(ss
|
s)
log.write('[Status pre]\n')
log.write(str(app.ring.dump()))
agent = app.agents[agent_ip]
agent.state = state
agent.cw = agent.cw if not turned else not agent.cw
blocked = app.ring.blocked(agent)
if not blocked and not stopped:
# advance to the next node if not blocked
node = app.ring.get_node(agent.node)
next_node = app.ring.next(agent)
agent.node = next_node.label
node.agents.remove(agent_ip)
next_node.add_agent
|
(agent_ip)
log.write('\n[Status post]\n')
log.write(str(app.ring.dump()))
return jsonify(blocked=blocked)
@app.route('/')
def index():
return render_template('base.html', started=app.started)
def main():
app.run(host='0.0.0.0', debug=config.debug)
if __name__ == '__main__':
main()
|
makinacorpus/ionyweb
|
ionyweb/plugin_app/plugin_video/admin.py
|
Python
|
bsd-3-clause
| 157
| 0
|
# -*- coding: utf-8 -*-
from django.contrib import admin
from ionyw
|
eb.plugin_app.plugin_video.models impo
|
rt Plugin_Video
admin.site.register(Plugin_Video)
|
opticode/eve
|
eve/auth.py
|
Python
|
bsd-3-clause
| 9,706
| 0.000103
|
from flask import request, Response, current_app as app, g, abort
from functools import wraps
def requires_auth(endpoint_class):
""" Enables Authorization logic for decorated functions.
:param endpoint_class: the 'class' to which the decorated endpoint belongs
to. Can be 'resource' (resource endpoint), 'item'
(item endpoint) and 'home' for the API entry point.
.. versionchanged:: 0.0.7
Passing the 'resource' argument when inoking auth.authenticate()
.. versionchanged:: 0.0.5
Support for Cross-Origin Resource Sharing (CORS): 'OPTIONS' request
method is now public by default. The actual method ('GET', etc.) will
still be protected if so configured.
.. versionadded:: 0.0.4
"""
def fdec(f):
@wraps(f)
def decorated(*args, **kwargs):
if args:
# resource or item endpoint
resource_name = args[0]
resource = app.config['DOMAIN'][args[0]]
if endpoint_class == 'resource':
public = resource['public_methods']
roles = resource['allowed_roles']
if request.method in ['GET', 'HEAD', 'OPTIONS']:
roles += resource['allowed_read_roles']
else:
roles += resource['allowed_write_roles']
elif endpoint_class == 'item':
public = resource['public_item_methods']
roles =
|
resource['allowed_item_roles']
if request.method in ['GET', 'HEAD', 'OPTIONS']:
roles += resource['allowed_item_read_roles']
else:
roles += resource['allowed_item_write_roles']
if callable(resource['authentication']):
auth = resource['authentica
|
tion']()
else:
auth = resource['authentication']
else:
# home
resource_name = resource = None
public = app.config['PUBLIC_METHODS'] + ['OPTIONS']
roles = app.config['ALLOWED_ROLES']
if request.method in ['GET', 'OPTIONS']:
roles += app.config['ALLOWED_READ_ROLES']
else:
roles += app.config['ALLOWED_WRITE_ROLES']
auth = app.auth
if auth and request.method not in public:
if not auth.authorized(roles, resource_name, request.method):
return auth.authenticate()
return f(*args, **kwargs)
return decorated
return fdec
class BasicAuth(object):
""" Implements Basic AUTH logic. Should be subclassed to implement custom
authentication checking.
.. versionchanged:: 0.4
ensure all errors returns a parseable body #366.
auth.request_auth_value replaced with getter and setter methods which
rely on flask's 'g' object, for enhanced thread-safity.
.. versionchanged:: 0.1.1
auth.request_auth_value is now used to store the auth_field value.
.. versionchanged:: 0.0.9
Support for user_id property.
.. versionchanged:: 0.0.7
Support for 'resource' argument.
.. versionadded:: 0.0.4
"""
def set_request_auth_value(self, value):
g.auth_value = value
def get_request_auth_value(self):
return g.get("auth_value")
def check_auth(self, username, password, allowed_roles, resource, method):
""" This function is called to check if a username / password
combination is valid. Must be overridden with custom logic.
:param username: username provided with current request.
:param password: password provided with current request
:param allowed_roles: allowed user roles.
:param resource: resource being requested.
:param method: HTTP method being executed (POST, GET, etc.)
"""
raise NotImplementedError
def authenticate(self):
""" Returns a standard a 401 response that enables basic auth.
Override if you want to change the response and/or the realm.
"""
resp = Response(None, 401, {'WWW-Authenticate': 'Basic realm:"%s"' %
__package__})
abort(401, description='Please provide proper credentials',
response=resp)
def authorized(self, allowed_roles, resource, method):
""" Validates the the current request is allowed to pass through.
:param allowed_roles: allowed roles for the current request, can be a
string or a list of roles.
:param resource: resource being requested.
"""
auth = request.authorization
return auth and self.check_auth(auth.username, auth.password,
allowed_roles, resource, method)
class HMACAuth(BasicAuth):
""" Hash Message Authentication Code (HMAC) authentication logic. Must be
subclassed to implement custom authorization checking.
.. versionchanged:: 0.4
Ensure all errors returns a parseable body #366.
.. versionchanged:: 0.0.9
Replaced the now deprecated request.data with request.get_data().
.. versionchanged:: 0.0.7
Support for 'resource' argument.
.. versionadded:: 0.0.5
"""
def check_auth(self, userid, hmac_hash, headers, data, allowed_roles,
resource, method):
""" This function is called to check if a token is valid. Must be
overridden with custom logic.
:param userid: user id included with the request.
:param hmac_hash: hash included with the request.
:param headers: request headers. Suitable for hash computing.
:param data: request data. Suitable for hash computing.
:param allowed_roles: allowed user roles.
:param resource: resource being requested.
:param method: HTTP method being executed (POST, GET, etc.)
"""
raise NotImplementedError
def authenticate(self):
""" Returns a standard a 401. Override if you want to change the
response.
"""
abort(401, description='Please provide proper credentials')
def authorized(self, allowed_roles, resource, method):
""" Validates the the current request is allowed to pass through.
:param allowed_roles: allowed roles for the current request, can be a
string or a list of roles.
:param resource: resource being requested.
"""
auth = request.headers.get('Authorization')
try:
userid, hmac_hash = auth.split(':')
except:
auth = None
return auth and self.check_auth(userid, hmac_hash, request.headers,
request.get_data(), allowed_roles,
resource, method)
class TokenAuth(BasicAuth):
""" Implements Token AUTH logic. Should be subclassed to implement custom
authentication checking.
.. versionchanged:: 0.4
Ensure all errors returns a parseable body #366.
.. versionchanged:: 0.0.7
Support for 'resource' argument.
.. versionadded:: 0.0.5
"""
def check_auth(self, token, allowed_roles, resource, method):
""" This function is called to check if a token is valid. Must be
overridden with custom logic.
:param token: decoded user name.
:param allowed_roles: allowed user roles
:param resource: resource being requested.
:param method: HTTP method being executed (POST, GET, etc.)
"""
raise NotImplementedError
def authenticate(self):
""" Returns a standard a 401 response that enables basic auth.
Override if you want to change the response and/or the realm.
"""
resp = Response(None, 401, {'WWW-Authenticate': 'Basic realm:"%s"' %
__package__})
abort(401, description='Please provide proper credentials',
|
androomerrill/scikit-nano
|
sknano/structures/tests/test_graphene.py
|
Python
|
bsd-2-clause
| 1,925
| 0
|
# -*- coding: utf-8 -*-
#
from __future__ import absolute_import, division, print_function
from __future__ import unicode_literals
import nose
from nose.tools import *
import numpy as np
from sknano.structures import Graphene, PrimitiveCellGraphene, \
ConventionalCellGraphene, GraphenePrimitiveCell, GrapheneConventionalCell
def test1():
s = Graphene(armchair_edge
|
_length=5, zigzag_edge_length=5)
assert_equals(s.zigzag_edge_length, 5)
assert_equals(s.armchair_edge_length, 5)
assert_true(isinstance(s, ConventionalCellGraphene))
assert_true(isinstance(s.unit_cell, GrapheneConventionalCell))
print(s.unit_cell)
def test2():
s = PrimitiveCellGraphene(edge_length=5)
assert_equals(s.edge_length, 5)
assert_true(isinstance(s
|
, PrimitiveCellGraphene))
assert_true(isinstance(s.unit_cell, GraphenePrimitiveCell))
print(np.degrees(s.r1.angle(s.r2)))
print(s.unit_cell)
print(s.area)
print(s)
def test3():
s = ConventionalCellGraphene(armchair_edge_length=5, zigzag_edge_length=5)
assert_equals(s.zigzag_edge_length, 5)
assert_equals(s.armchair_edge_length, 5)
assert_true(isinstance(s, ConventionalCellGraphene))
assert_true(isinstance(s.unit_cell, GrapheneConventionalCell))
print(s.unit_cell)
print(s.area)
print(s)
def test4():
s = Graphene.from_conventional_cell(armchair_edge_length=5,
zigzag_edge_length=5)
assert_equals(s.zigzag_edge_length, 5)
assert_equals(s.armchair_edge_length, 5)
assert_true(isinstance(s.unit_cell, GrapheneConventionalCell))
print(s.unit_cell)
assert_true(isinstance(s, ConventionalCellGraphene))
def test5():
s = Graphene.from_primitive_cell(edge_length=5)
assert_true(isinstance(s, PrimitiveCellGraphene))
assert_true(isinstance(s.unit_cell, GraphenePrimitiveCell))
if __name__ == '__main__':
nose.runmodule()
|
jumpifzero/morango
|
modelparser.py
|
Python
|
mit
| 5,062
| 0.01857
|
# ============================================================
# modelparser.py
#
# (C) Tiago Almeida 2016
#
# Still in early development stages.
#
# This module uses PLY (http://www.dabeaz.com/ply/ply.html)
# and a set of grammar rules to parse a custom model
# definition language.
# ============================================================
import functools as ftools
import pprint
import ply.lex as lex
import ply.yacc as yacc
import sys
import exceptions
# ============================================================
# Constants
# ============================================================
MULT_SINGLE = 1
MULT_ANY = 2
# ============================================================
# Lexer rules
# ============================================================
reserved = {
'String' : 'STRING',
'Date' : 'DATE',
}
# List of token names. This is always required
tokens = (
'MODELNAME',
'NUMBER',
'COMMA',
'STAR',
'LPAREN',
'RPAREN',
'COLON',
'LBRACKET',
'RBRACKET',
'SEMICOLON',
'EXCLAMATION',
'ID'
) + tuple(reserved.values())
# Regular expression rules for simple tokens
t_COMMA = r'\,'
t_STAR = r'\*'
t_LPAREN = r'\('
t_RPAREN = r'\)'
t_COLON = r'\:'
t_LBRACKET = r'\{'
t_RBRACKET = r'\}'
t_SEMICOLON = r'\;'
t_EXCLAMATION = r'\!'
# A regular expression rule with some action code
def t_NUMBER(t):
r'\d+'
t.value = int(t.value)
return t
# Identifier match
def t_ID(t):
r'[a-zA-Z_][a-zA-Z_0-9]*'
t.type = reserved.get(t.value,'ID') # Check for reserved words
return t
# Define a rule so we can track line numbers
def t_newline(t):
r'\n+'
t.lexer.line
|
no += len(t.value)
# A string containing ignored characters (spaces and tabs)
t_ignore = ' \t'
# Error handling rule
def t_error(t):
print("Illegal character '%s'" % t.value[0])
t.lexer.skip(1)
# ====================================================
|
========
# Parser rules
# ============================================================
# ----------------
# BNF Grammar
# ----------------
# model : MODELNAME { fields }
# fields : fields field ;
# | field ;
models = []
fields = []
def p_file(p):
"""
rules : models
"""
p[0] = p[1]
def p_modelsdecl(p):
"""
models : models model
| model
"""
if len(p) >= 3:
models.append(p[2])
else:
models.append(p[1])
def p_modeldecl(p):
'model : ID LBRACKET fields RBRACKET'
global fields
p[0] = { 'model': p[1],
'fields': fields
}
fields = []
def p_fields_decl(p):
"""
fields : fields field
| field
"""
if len(p) >= 3:
fields.append(p[2])
else:
fields.append(p[1])
def p_field_decl(p):
"""
field : ID COLON multiplicity datatype notnull SEMICOLON
"""
# return an object with the field data
#
p[0] = {
'name': p[1],
'type': p[4],
'mult': p[3],
'null': p[5]
}
def p_datatype(p):
"""
datatype : STRING
| DATE
| ID
"""
p[0] = p[1]
def p_field_multiplicity(p):
"""
multiplicity : STAR
| empty
"""
if p[1] == '*':
p[0] = MULT_ANY
else:
p[0] = MULT_SINGLE
def p_field_notnull(p):
"""
notnull : EXCLAMATION
| empty
"""
if p[1] == '!':
p[0] = False
else:
p[0] = True
def p_empty(p):
'empty :'
pass
def p_modeldecl_print_error(p):
'model : ID LBRACKET error RBRACKET'
print("Syntax error in model declaration. Bad body")
# Error rule for syntax errors
def p_error(p):
pass
def validate_models_unique(models):
"""
Given a list of models, validates there are no repetitions.
"""
index = {}
for m in models:
print(m['model'])
if m['model'] in index:
raise exceptions.ModelNotUnique(m['model'])
else:
index[m['model']] = True
def validate_fields_unique(models):
"""
Given a list of models, for each one validates there are no
repeated fields.
"""
pass
def parse(file_path, debug_lexer=False):
"""
"""
global models
models = []
# Build the lexer
lexer = lex.lex()
# Read argv(1) file
with open(file_path) as f:
data = f.read()
if debug_lexer:
lexer.input(data)
while True:
tok = lexer.token()
if not tok:
break # No more input
print(tok)
parser = yacc.yacc()
result = parser.parse(data)
return models
def parse_files(files_lst):
# parse the files and join the sublists of models into one
models_fragments = list(map(parse, files_lst))
models = list(ftools.reduce(lambda l1,l2: l1 + l2,
models_fragments))
validate_models_unique(models)
return models
def main():
parse(sys.argv[1])
if __name__ == '__main__':
main()
|
rishabhsixfeet/Dock-
|
MessagesApp/models.py
|
Python
|
mit
| 1,894
| 0.015312
|
from django.db import models
from django.contrib.auth.models import User
from helper_functions import my_strftime
# Create your models here.
#This only contains metadata about this thread (i.e. just the subject for now)
#It is used in a Many-to-Many relationship with User, with a through object that contains the has_been_read flag
class Thread(models.Model):
subject = models.CharField(max_length=64)
def getThread(self):
"""Returns list of most recent messages with corresponding info"""
return [message.getDetail() for message in self.message_set.order_by('time_sent')]
def getThreadInfo(self, user=None):
"""
Returns dictionary object containing basic info about thread,
such a
|
s most recent message/author, title, etc.
"""
if user == None:
has_been_read = False
else:
has_been_
|
read = ThreadMembership.objects.get(user=user, thread=self).has_been_read
last_message = self.message_set.order_by('-time_sent')[0]
return { 'subject' : self.subject, 'last_message' : last_message.getDetail(), 'id' : self.id,
'has_been_read' : has_been_read }
class Message(models.Model):
thread = models.ForeignKey(Thread)
user = models.ForeignKey('userInfo.UserProfile') #the author of this message
time_sent = models.DateTimeField(auto_now_add=True)
text = models.TextField()
def getDetail(self):
"""Returns dictionary object containing the info of this object"""
return { 'author' : self.user.getInfo(),
'timestamp' : my_strftime(self.time_sent),
'text' : self.text }
class ThreadMembership(models.Model):
user = models.ForeignKey('userInfo.UserProfile')
thread = models.ForeignKey(Thread)
#Meta data for user's relation to thread
has_been_read = models.BooleanField(default=False)
|
aksareen/balrog
|
auslib/test/admin/views/test_permissions.py
|
Python
|
mpl-2.0
| 45,228
| 0.004533
|
import mock
import simplejson as json
from auslib.global_state import dbo
from auslib.test.admin.views.base import ViewTest
class TestUsersAPI_JSON(ViewTest):
def testUsers(self):
ret = self._get('/users')
self.assertEqual(ret.status_code, 200)
data = json.loads(ret.data)
data['users'] = set(data['users'])
self.assertE
|
qual(data, dict(users=set(['bill', 'billy', 'bob', 'ashanti', 'mary', 'julie'])))
class TestCurrentUserAPI_JSON(ViewTest):
def testGetCurrentUser(self):
ret = self._get("/users/current", use
|
rname="bill")
self.assertEqual(ret.status_code, 200)
data = json.loads(ret.data)
expected = {
"username": "bill",
"permissions": {
"admin": {
"options": None, "data_version": 1,
},
},
"roles": {
"releng": {
"data_version": 1,
},
"qa": {
"data_version": 1,
},
},
}
self.assertEqual(data, expected)
def testGetCurrentUserWithoutRoles(self):
ret = self._get("/users/current", username="billy")
self.assertEqual(ret.status_code, 200)
data = json.loads(ret.data)
expected = {
"username": "billy",
"permissions": {
"admin": {
"options": {
"products": ["a"]
},
"data_version": 1,
}
},
"roles": {},
}
self.assertEqual(data, expected)
def testGetNamedUser(self):
ret = self._get("/users/mary", username="bill")
self.assertEqual(ret.status_code, 200)
data = json.loads(ret.data)
expected = {
"username": "mary",
"permissions": {
"scheduled_change": {
"options": {
"actions": ["enact"]
},
"data_version": 1,
}
},
"roles": {
"relman": {
"data_version": 1,
},
},
}
self.assertEqual(data, expected)
def testGetNamedUserWithSpecificPermission(self):
ret = self._get("/users/mary", username="bob")
self.assertEqual(ret.status_code, 200)
data = json.loads(ret.data)
expected = {
"username": "mary",
"permissions": {
"scheduled_change": {
"options": {
"actions": ["enact"]
},
"data_version": 1,
}
},
"roles": {
"relman": {
"data_version": 1,
},
},
}
self.assertEqual(data, expected)
def testGetNamedUserWithoutPermission(self):
ret = self._get("/users/bill", username="mary")
self.assertEqual(ret.status_code, 403)
def testGetNonExistentUser(self):
ret = self._get("/users/huetonhu", username="bill")
self.assertEqual(ret.status_code, 404)
class TestPermissionsAPI_JSON(ViewTest):
def testPermissionsCollection(self):
ret = self._get('/users/bill/permissions')
self.assertEqual(ret.status_code, 200)
self.assertEqual(json.loads(ret.data), dict(admin=dict(options=None, data_version=1)))
def testPermissionGet(self):
ret = self._get('/users/bill/permissions/admin')
self.assertEqual(ret.status_code, 200)
self.assertEqual(json.loads(ret.data), dict(options=None, data_version=1))
def testPermissionGetMissing(self):
ret = self.client.get("/users/bill/permissions/rule")
self.assertEqual(ret.status_code, 404)
def testPermissionPut(self):
ret = self._put('/users/bob/permissions/admin', data=dict(options=json.dumps(dict(products=["a"]))))
self.assertStatusCode(ret, 201)
self.assertEqual(ret.data, json.dumps(dict(new_data_version=1)), "Data: %s" % ret.data)
query = dbo.permissions.t.select()
query = query.where(dbo.permissions.username == 'bob')
query = query.where(dbo.permissions.permission == 'admin')
self.assertEqual(query.execute().fetchone(), ('admin', 'bob', {"products": ["a"]}, 1))
def testPermissionPutWithEmail(self):
ret = self._put('/users/bob@bobsworld.com/permissions/admin', data=dict(options=json.dumps(dict(products=["a"]))))
self.assertStatusCode(ret, 201)
self.assertEqual(ret.data, json.dumps(dict(new_data_version=1)), "Data: %s" % ret.data)
query = dbo.permissions.t.select()
query = query.where(dbo.permissions.username == 'bob@bobsworld.com')
query = query.where(dbo.permissions.permission == 'admin')
self.assertEqual(query.execute().fetchone(), ('admin', 'bob@bobsworld.com', {"products": ["a"]}, 1))
# This test is meant to verify that the app properly unquotes URL parts
# as part of routing, because it is required when running under uwsgi.
# Unfortunately, Werkzeug's test Client will unquote URL parts before
# the app sees them, so this test doesn't actually verify that case...
def testPermissionPutWithQuotedEmail(self):
ret = self._put('/users/bob%40bobsworld.com/permissions/admin', data=dict(options=json.dumps(dict(products=["a"]))))
self.assertStatusCode(ret, 201)
self.assertEqual(ret.data, json.dumps(dict(new_data_version=1)), "Data: %s" % ret.data)
query = dbo.permissions.t.select()
query = query.where(dbo.permissions.username == 'bob@bobsworld.com')
query = query.where(dbo.permissions.permission == 'admin')
self.assertEqual(query.execute().fetchone(), ('admin', 'bob@bobsworld.com', {"products": ["a"]}, 1))
def testPermissionsPostWithHttpRemoteUser(self):
ret = self._httpRemoteUserPost('/users/bob/permissions/release_read_only', username="bob", data=dict(options=json.dumps(dict(products=["a", "b"])),
data_version=1))
self.assertEqual(ret.status_code, 200, ret.data)
self.assertEqual(json.loads(ret.data), dict(new_data_version=2))
r = dbo.permissions.t.select().where(dbo.permissions.username == 'bob').where(dbo.permissions.permission == "release_read_only").execute().fetchall()
self.assertEqual(len(r), 1)
self.assertEqual(r[0], ('release_read_only', 'bob', {"products": ["a", "b"]}, 2))
def testPermissionsPost(self):
ret = self._post('/users/bob/permissions/release_read_only', data=dict(options=json.dumps(dict(products=["a", "b"])), data_version=1))
self.assertEqual(ret.status_code, 200, ret.data)
self.assertEqual(json.loads(ret.data), dict(new_data_version=2))
r = dbo.permissions.t.select().where(dbo.permissions.username == 'bob').where(dbo.permissions.permission == "release_read_only").execute().fetchall()
self.assertEqual(len(r), 1)
self.assertEqual(r[0], ('release_read_only', 'bob', {"products": ["a", "b"]}, 2))
def testPermissionsPostMissing(self):
ret = self._post("/users/bill/permissions/rule", data=dict(options="", data_version=1))
self.assertStatusCode(ret, 404)
def testPermissionsPostBadInput(self):
ret = self._post("/users/bill/permissions/admin")
self.assertStatusCode(ret, 400)
def testPermissionsPostWithoutPermission(self):
ret = self._post("/users/bob/permissions/rule", username="shane", data=dict(data_version=1, options=json.dumps(dict(actions=["create"]))))
self.assertStatusCode(ret, 403)
def testPermissionPutWithOption(self):
ret = self._put('/users/bob/permissions/release_locale', data=dict(options=json.dumps(dict(products=['a']))))
self.assertStatusCode(ret, 201)
self.assertEqual(ret.data, json.dumps(dict(new_data_version=1)), "Data: %s" % ret.data)
query = dbo.permissions.t.select()
query = query.where(dbo.p
|
UKPLab/deeplearning4nlp-tutorial
|
2017-07_Seminar/Session 4 - LSTM Sequence Classification/code/preprocess.py
|
Python
|
apache-2.0
| 6,664
| 0.012905
|
"""
The file preprocesses the files/train.txt and files/test.txt files.
I requires the dependency based embeddings by Levy et al.. Download them from his website and change
the embeddingsPath variable in the script to point to the unzipped deps.words file.
"""
from __future__ import print_function
import numpy as np
import gzip
import os
import sys
if (sys.version_info > (3, 0)):
import pickle as pkl
else:
#Python 2.7 imports
import cPickle as pkl
from io import open
#We download German word embeddings from here https://www.ukp.tu-darmstadt.de/research/ukp-in-challenges/germeval-2014/
embeddingsPath = 'embeddings/2014_tudarmstadt_german_50mincount.vocab.gz'
#Train, Dev, and Test files
folder = 'data/'
files = [folder+'NER-de-train.tsv', folder+'NER-de-dev.tsv', folder+'NER-de-test.tsv']
#At which column position is the token and the tag, starting at 0
tokenPosition=1
tagPosition=2
#Size of the context windo
window_size = 3
def createMatrices(sentences, windowsize, word2Idx, label2Idx, case2Idx):
unknownIdx = word2Idx['UNKNOWN_TOKEN']
paddingIdx = word2Idx['PADDING_TOKEN']
dataset = []
wordCount = 0
unknownWordCount = 0
for sentence in sentences:
wordIndices = []
caseIndices = []
labelIndices = []
for word, label in sentence:
wordCount += 1
if word in word2Idx:
wordIdx = word2Idx[word]
elif word.lower() in word2Idx:
wordIdx = word2Idx[word.lower()]
else:
wordIdx = unknownIdx
unknownWordCount += 1
#Get the label and map to int
wordIndices.append(wordIdx)
caseIndices.append(getCasing(word, case2Idx))
labelIndices.append(label2Idx[label])
dataset.append([wordIndices, caseIndices, labelIndices])
return dataset
def readFile(filepath, tokenPosition, tagPosition):
sentences = []
sentence = []
for line in open(filepath):
line = line.strip()
if len(line) == 0 or line[0] == '#':
if len(sentence) > 0:
sentences.append(sentence)
sentence = []
continue
splits = line.split('\t')
sentence.append([splits[tokenPosition], splits[tagPosition]])
if len(sentence) > 0:
sentences.append(sentence)
sentence = []
print(filepath, len(sentences), "sentences")
return sentences
def getCasing(word, caseLookup):
casing = 'other'
numDigits = 0
for char in word:
if char.isdigit():
numDigits += 1
digitFraction = numDigits / float(len(word))
if word.isdigit(): #Is a digit
casing = 'numeric'
elif digitFraction > 0.5:
casing = 'mainly_numeric'
elif word.islower(): #All lower case
casing = 'allLower'
elif word.isupper(): #All upper case
casing = 'allUpper'
elif word[0].isupper(): #is a title, initial char upper, then all lower
casing = 'initialUpper'
elif numDigits > 0:
casing = 'contains_digit'
return caseLookup[casing]
# ::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: #
# Start of the preprocessing
# ::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: #
outputFilePath = 'pkl/data.pkl.gz'
embeddingsPklPath = 'pkl/embeddings.pkl.gz'
trainSentences = readFile(files[0], tokenPosition, tagPosition)
devSentences = readFile(files[1], tokenPosition, tagPosition)
testSentences = readFile(files[2], tokenPosition, tagPosition)
#Mapping of the labels to integers
labelSet = set()
words = {}
for dataset in [trainSentences, devSentences, testSentences]:
for sentence in dataset:
for token, label in sentence:
labelSet.add(label)
words[token.lower()] = True
# :: Create a mapping for the labels ::
label2Idx = {}
for label in labelSet:
label2Idx[label] = len(label2Idx)
# :: Hard coded case lookup ::
case2Idx = {'numeric': 0, 'allLower':1, 'allUpper':2, 'initialUpper':3, 'other':4, 'mainly_numeric':5, 'contains_digit': 6, 'PADDING_TOKEN':7}
caseEmbeddings = np.identity(len(case2Idx), dtype='float32')
# :: Read in word embeddings ::
word2Idx = {}
wordEmbeddings = []
# :: Downloads the embeddings from the TU-Darmstadt.de webserver ::
if not os.path.isfile(embeddingsPath):
basename = os.path.basename(embeddingsPath)
if basename.startswith('2014_tudarmstadt_german_'):
print("Start downloading word embeddings for German using wget ...")
os.system("wget https://public.ukp.informatik.tu-darmstadt.de/reimers/2014_german_embeddings/"+basename+" -P embeddings/")
else:
print(embeddingsPath, "does not exist. Please provide pre-trained embeddings")
exit()
# :: Load the pre-trained embeddings file ::
fEmbeddings = gzip.open(embeddingsPath, "r") if embeddingsPath.endswith('.gz') else open(embeddingsPath, encoding="utf8")
for line in fEmbeddings:
split = line.decode("utf-8").strip().split(" ")
word = split[0]
if len(word2Idx) == 0: #Add padding+unknown
word2Idx["PADDING_TOKEN"] = len(word2Idx)
vector = np.zeros(len(split)-1) #Zero vector vor 'PADDING' word
wordEmbeddings.append(vector)
word2Idx["UNKNOWN_TOKEN"] = len(word2Idx)
vector = np.random.uniform(-0.25, 0.25, len(split)-1)
wordEmbeddings.append(vector)
if split[0].lower() in words:
vector = np.array([float(num) for num in split[1:]])
wordEmbeddings.append(vector)
word2Idx[split[0]] = len(word2Idx)
wordEmbeddings = np.array(wordEmbeddings)
print("Embeddings s
|
hape: ", wordEmbeddings.shape)
print("Len words: ", len(words))
embeddings = {'wordEmbeddings': wordEmbeddings, 'word2Idx': word2Idx,
'caseEmbeddings': caseEmbeddings, 'case2Idx': case2Idx,
'label2Idx': label2Idx}
f = gzip.open(embeddingsPklPath, 'wb')
pkl.dump(embeddings, f, -1)
f.close(
|
)
# :: Create matrices ::
train_set = createMatrices(trainSentences, window_size, word2Idx, label2Idx, case2Idx)
dev_set = createMatrices(devSentences, window_size, word2Idx, label2Idx, case2Idx)
test_set = createMatrices(testSentences, window_size, word2Idx, label2Idx, case2Idx)
f = gzip.open(outputFilePath, 'wb')
pkl.dump(train_set, f)
pkl.dump(dev_set, f)
pkl.dump(test_set, f)
f.close()
print("Data stored in pkl folder")
|
vigetlabs/dnsimple
|
tests/unit/test_client.py
|
Python
|
mit
| 3,371
| 0.019875
|
import pytest
from ..context import dnsimple, fixture_path
from ..request_helper import RequestHelper, request
from dnsimple.client import Client
class TestClient(RequestHelper, object):
def test_constructor_raises_errors_when_improperly_configured(self):
with pytest.raises(dnsimple.credentials.InvalidCredentialsException) as ex:
Client(email = 'user@host.com')
assert 'Invalid credentials supplied' in str(ex.value)
def test_constructor_raises_errors_when_no_credentials_found(self):
with pytest.raises(dnsimple.credentials.InvalidCredentialsException):
Client(credentials_search_paths = fixture_path('credentials'), credentials_filename = 'missing')
def test_constructor_configures_credentials_for_token_authentication(self):
subject = Client(email = 'user@host.com', user_token = 'toke')
assert isinstance(subject.request, dnsimple.connection.Request)
credentials = subject.request.credentials
assert credentials.email == 'user@host.com'
assert credentials.user_token == 'toke'
def test_constructor_configures_credentials_for_password_authentication(self):
subject = Client(email = 'user@host.com', password = 'password')
credentials = subject.request.credentials
assert credentials.email == 'user@host.com'
assert credentials.password == 'password'
def test_constructor_configures_credentials_for_domain_token_authentication(self):
subject = Client(domain_token = 'token')
assert subject.request.credentials.domain_token == 'token'
def test_constructor_configures_credentials_from_configuration_file(self):
subject = Client(credentials_search_paths = [fixture_path('credentials')], credentials_filename = 'basic')
credentials = subject.request.credentials
assert credentials.email == 'user@host.com'
assert credentials.user_token == 'user_token'
assert credentials.password == 'password'
def test_constructor_defaults_sandbox_to_false(self):
subject = Client(email = 'user@host.com', password = 'password')
assert subject.request.sandbox is False
def test_constructor_enables_sandbox(self):
subject = Client(sandbox = True, email = 'user@host.com', password = 'password')
assert subject.request.sandbox is True
def test_transfer_creates_domain_transfer(self, mocker, request):
method = self.stub_request(mocker, request, method_name = 'post', success = True, data = {})
subject = Client(email = 'user@host.com', password = 'password')
contact = dnsimple.models.Contact(request, {'id
|
': 1})
subject.request = request
result = subject.transfer('foo.com', contact)
method.assert_called_once_with('domain_transfers', {'domain': {'name': 'foo.com', 'registrant_id': 1}})
assert result == True
def test_transfer_returns_false_when_transfer_fails(self, mocker, request):
method = self.stub_request(mocker, request, method_name = 'post', success = False)
|
subject = Client(email = 'user@host.com', password = 'password')
contact = dnsimple.models.Contact(request, {'id': 1})
subject.request = request
result = subject.transfer('foo.com', contact)
assert result == False
|
esten/StoriTell
|
StoriTell/stories/api.py
|
Python
|
bsd-3-clause
| 583
| 0.015437
|
from storitell.tastypie.resources import ModelResource
from storitell.stories.models import Story
from storitell.stories.extra_methods import moderate_comment
from storitell.tastypie.validation import Validation
# Stories can be read through a REST-ful inter
|
face. It'd be nice
# to be able to POST as well, but t
|
hat requires validation I haven't
# had time to code yet. Want to add it? Be my guest.
class StoryResource(ModelResource):
class Meta:
queryset = Story.objects.all()
resource_name = 'story'
fields = ['maintext','pub_date','upvotes']
allowed_methods = ['get']
|
tstenner/bleachbit
|
tests/TestWinapp.py
|
Python
|
gpl-3.0
| 19,272
| 0.00109
|
# vim: ts=4:sw=4:expandtab
# BleachBit
# Copyright (C) 2008-2020 Andrew Ziem
# https://www.bleachbit.org
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Test cases for module Winapp
"""
import os
import shutil
import sys
import tempfile
import unittest
from tests import common
from bleachbit.Winapp import Winapp, detectos, detect_file, section2option
from bleachbit.Windows import detect_registry_key, parse_windows_build
from bleachbit import logger
def create_sub_key(sub_key):
"""Create a registry key"""
import winreg
hkey = winreg.CreateKey(winreg.HKEY_CURRENT_USER, sub_key)
hkey.Close()
KEYFULL = 'HKCU\\Software\\BleachBit\\DeleteThisKey'
def get_winapp2():
"""Download and cache winapp2.ini. Return local filename."""
url = "https://rawgit.com/bleachbit/winapp2.ini/master/Winapp2-BleachBit.ini"
tmpdir = None
if os.name == 'posix':
tmpdir = '/tmp'
if os.name == 'nt':
tmpdir = os.getenv('TMP')
fname = os.path.join(tmpdir, 'bleachbit_test_winapp2.ini')
if os.path.exists(fname):
import time
import stat
age_seconds = time.time() - os.stat(fname)[stat.ST_MTIME]
if age_seconds > (24 * 36 * 36):
logger.info('deleting stale file %s ', fname)
os.remove(fname)
if not os.path.exists(fname):
fobj = open(fname, 'w')
import urllib.request
txt = urllib.request.urlopen(url).read()
fobj.write(txt)
return fname
class WinappTestCase(common.BleachbitTestCase):
"""Test cases for Winapp"""
|
def run_all(self, cleaner, really_delete):
"""Test all the cleaner options"""
for (option_id, __name) in cleaner.get_options():
for cmd in cleaner.get_commands(option_id):
for result in cmd.execute(really_delete):
|
common.validate_result(self, result, really_delete)
@common.skipUnlessWindows
def test_remote(self):
"""Test with downloaded file"""
winapps = Winapp(get_winapp2())
for cleaner in winapps.get_cleaners():
self.run_all(cleaner, False)
def test_detectos(self):
"""Test detectos function"""
# Tests are in the format (required_ver, mock, expected_return)
tests = (('5.1', '5.1', True),
('5.1', '6.0', False),
('6.0', '5.1', False),
# 5.1 is the maximum version
('|5.1', '5.1', True),
('|5.1', '6.0', False),
('|5.1', '10.0', False),
# 10.0 is the maximum version
('|10.0', '5.1', True),
('|10.0', '10.0', True),
('|10.0', '10.1', False),
# 6.1 is the minimum version
('6.1|', '5.1', False),
('6.1|', '6.0', False),
('6.1|', '6.1', True),
('6.1|', '6.2', True),
('6.1|', '10.0', True),
('6.2|', '5.1', False),
('6.2|', '6.0', False),
('6.2|', '6.1', False),
('6.2|', '6.2', True),
# must be 6.2 or 6.3
('6.2|6.3', '6.0', False),
('6.2|6.3', '6.1', False),
('6.2|6.3', '6.2', True),
('6.2|6.3', '6.3', True),
('6.2|6.3', '10.0', False),
# 10.0 is the minimum
('10.0|', '5.1', False),
('10.0|', '10.0', True))
for (req, mock, expected_return) in tests:
mock = parse_windows_build(mock)
actual_return = detectos(req, mock)
self.assertEqual(expected_return, actual_return,
'detectos(%s, %s)==%s instead of %s' % (req, mock,
actual_return, expected_return))
@common.skipUnlessWindows
def test_detect_file(self):
"""Test detect_file function"""
tests = [('%windir%\\system32\\kernel32.dll', True),
('%windir%\\system32', True),
('%ProgramFiles%\\Internet Explorer', True),
('%ProgramFiles%\\Internet Explorer\\', True),
('%windir%\\doesnotexist', False),
('%windir%\\system*', True),
('%windir%\\*ystem32', True),
('%windir%\\*ystem3*', True)]
# On 64-bit Windows, Winapp2.ini expands the %ProgramFiles% environment
# variable to also %ProgramW6432%, so test unique entries in
# %ProgramW6432%.
import struct
if 8 * struct.calcsize('P') != 32:
raise NotImplementedError('expecting 32-bit Python')
if os.getenv('ProgramW6432'):
dir_64 = os.listdir(os.getenv('ProgramFiles'))
dir_32 = os.listdir(os.getenv('ProgramW6432'))
dir_32_unique = set(dir_32) - set(dir_64)
if dir_32 and not dir_32_unique:
raise RuntimeError(
'Test expects objects in %ProgramW6432% not in %ProgramFiles%')
for pathname in dir_32_unique:
tests.append(('%%ProgramFiles%%\\%s' % pathname, True))
else:
logger.info(
'skipping %ProgramW6432% tests because WoW64 not detected')
for (pathname, expected_return) in tests:
actual_return = detect_file(pathname)
msg = 'detect_file(%s) returned %s' % (pathname, actual_return)
self.assertEqual(expected_return, actual_return, msg)
def setup_fake(self, f1_filename=None):
"""Setup the test environment"""
subkey = 'Software\\BleachBit\\DeleteThisKey\\AndThisKey'
# put ampersand in directory name to test
# https://github.com/bleachbit/bleachbit/issues/308
dirname = tempfile.mkdtemp(prefix='bleachbit-test-winapp&')
fname1 = os.path.join(dirname, f1_filename or 'deleteme.log')
open(fname1, 'w').close()
dirname2 = os.path.join(dirname, 'sub')
os.mkdir(dirname2)
fname2 = os.path.join(dirname2, 'deleteme.log')
open(fname2, 'w').close()
fbak = os.path.join(dirname, 'deleteme.bak')
open(fbak, 'w').close()
self.assertExists(fname1)
self.assertExists(fname2)
self.assertExists(fbak)
create_sub_key(subkey)
self.assertTrue(detect_registry_key(KEYFULL))
self.assertTrue(detect_registry_key('HKCU\\%s' % subkey))
return dirname, fname1, fname2, fbak
def ini2cleaner(self, body, do_next=True):
"""Write a minimal Winapp2.ini"""
ini = open(self.ini_fn, 'w')
ini.write('[someapp]\n')
ini.write('LangSecRef=3021\n')
ini.write(body)
ini.write('\n')
ini.close()
self.assertExists(self.ini_fn)
if do_next:
return next(Winapp(self.ini_fn).get_cleaners())
else:
return Winapp(self.ini_fn).get_cleaners()
@common.skipUnlessWindows
def test_fake(self):
"""Test with fake file"""
# reuse this path to store a winapp2.ini file in
(ini_h, self.ini_fn) = tempfile.mkstemp(
suffix='.ini', prefix='winapp2')
os.close(ini_h)
# a set of tests
# this map explains what each position in the test tuple means
# 0=line to write directly to winapp2.ini
# 1=filename1 to place in fake environment (default=deleteme.log)
# 2=auto-hide before cleaning
# 3=dirname exists after clea
|
drogenlied/qudi
|
core/base.py
|
Python
|
gpl-3.0
| 10,150
| 0.001872
|
# -*- coding: utf-8 -*-
"""
This file contains the Qudi module base class.
Qudi is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Qudi is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Qudi. If not, see <http://www.gnu.org/licenses/>.
Copyright (c) the Qudi Developers. See the COPYRIGHT.txt file at the
top-level directory of this distribution and at <https://github.com/Ulm-IQO/qudi/>
"""
import logging
import qtpy
from qtpy import QtCore
from .FysomAdapter import Fysom # provides a final state machine
from collections import OrderedDict
import os
import sys
class Base(QtCore.QObject, Fysom):
"""
Base class for all loadable modules
* Ensure that the program will not die during the load of modules in any case,
and therefore do nothing!!!
* Initialize modules
* Provides a self identification of the used module
* Output redirection (instead of print)
* Provides a self de-initialization of the used module
* Reload the module with code change
|
s
* Get your own configuration (for save)
* Get name of status variables
* Get status variables
* Reload module data (from saved variables)
"""
sigStateChanged = QtCore.Signal(object) # (module name,
|
state change)
_modclass = 'base'
_modtype = 'base'
_in = dict()
_out = dict()
def __init__(self, manager, name, config=None, callbacks=None, **kwargs):
""" Initialise Base class object and set up its state machine.
@param object self: tthe object being initialised
@param object manager: the manager object that
@param str name: unique name for this object
@param dict configuration: parameters from the configuration file
@param dict callbacks: dictionary specifying functions to be run
on state machine transitions
"""
if config is None:
config = {}
if callbacks is None:
callbacks = {}
default_callbacks = {
'onactivate': self.on_activate,
'ondeactivate': self.on_deactivate
}
default_callbacks.update(callbacks)
# State machine definition
# the abbrivations for the event list are the following:
# name: event name,
# src: source state,
# dst: destination state
_baseStateList = {
'initial': 'deactivated',
'events': [
{'name': 'activate', 'src': 'deactivated', 'dst': 'idle'},
{'name': 'deactivate', 'src': 'idle', 'dst': 'deactivated'},
{'name': 'deactivate', 'src': 'running', 'dst': 'deactivated'},
{'name': 'run', 'src': 'idle', 'dst': 'running'},
{'name': 'stop', 'src': 'running', 'dst': 'idle'},
{'name': 'lock', 'src': 'idle', 'dst': 'locked'},
{'name': 'lock', 'src': 'running', 'dst': 'locked'},
{'name': 'block', 'src': 'idle', 'dst': 'blocked'},
{'name': 'block', 'src': 'running', 'dst': 'blocked'},
{'name': 'locktoblock', 'src': 'locked', 'dst': 'blocked'},
{'name': 'unlock', 'src': 'locked', 'dst': 'idle'},
{'name': 'unblock', 'src': 'blocked', 'dst': 'idle'},
{'name': 'runlock', 'src': 'locked', 'dst': 'running'},
{'name': 'runblock', 'src': 'blocked', 'dst': 'running'}
],
'callbacks': default_callbacks
}
# Initialise state machine:
if qtpy.PYQT4 or qtpy.PYSIDE:
QtCore.QObject.__init__(self)
Fysom.__init__(self, _baseStateList)
else:
super().__init__(cfg=_baseStateList, **kwargs)
# add connection base
self.connector = OrderedDict()
self.connector['in'] = OrderedDict()
for con in self._in:
self.connector['in'][con] = OrderedDict()
self.connector['in'][con]['class'] = self._in[con]
self.connector['in'][con]['object'] = None
self.connector['out'] = OrderedDict()
for con in self._out:
self.connector['out'][con] = OrderedDict()
self.connector['out'][con]['class'] = self._out[con]
self._manager = manager
self._name = name
self._configuration = config
self._statusVariables = OrderedDict()
# self.sigStateChanged.connect(lambda x: print(x.event, x.fsm._name))
def __getattr__(self, name):
"""
Attribute getter.
We'll reimplement it here because otherwise only __getattr__ of the
first base class (QObject) is called and the second base class is
never looked up.
Here we look up the first base class first and if the attribute is
not found, we'll look into the second base class.
"""
try:
return QtCore.QObject.__getattr__(self, name)
except AttributeError:
pass
return Fysom.__getattr__(self, name)
@property
def log(self):
"""
Returns a logger object
"""
return logging.getLogger("{0}.{1}".format(
self.__module__, self.__class__.__name__))
@QtCore.Slot(result=bool)
def _wrap_activation(self):
self.log.debug('Activation in thread {0}'.format(QtCore.QThread.currentThreadId()))
try:
self.activate()
except:
self.log.exception('Error during activation')
return False
return True
@QtCore.Slot(result=bool)
def _wrap_deactivation(self):
self.log.debug('Deactivation in thread {0}'.format(QtCore.QThread.currentThreadId()))
try:
self.deactivate()
except:
self.log.exception('Error during activation:')
return False
return True
def on_activate(self, e):
""" Method called when module is activated. If not overridden
this method returns an error.
@param object e: Fysom state change descriptor
"""
self.log.error('Please implement and specify the activation method '
'for {0}.'.format(self.__class__.__name__))
def on_deactivate(self, e):
""" Method called when module is deactivated. If not overridden
this method returns an error.
@param object e: Fysom state change descriptor
"""
self.log.error('Please implement and specify the deactivation '
'method {0}.'.format(self.__class__.__name__))
# Do not replace these in subclasses
def onchangestate(self, e):
""" Fysom callback for state transition.
@param object e: Fysom state transition description
"""
self.sigStateChanged.emit(e)
def getStatusVariables(self):
""" Return a dict of variable names and their content representing
the module state for saving.
@return dict: variable names and contents.
"""
return self._statusVariables
def setStatusVariables(self, variableDict):
""" Give a module a dict of variable names and their content
representing the module state.
@param OrderedDict dict: variable names and contents.
"""
if not isinstance(variableDict, (dict, OrderedDict)):
self.log.error('Did not pass a dict or OrderedDict to '
'setStatusVariables in {0}.'.format(
self.__class__.__name__))
return
se
|
blond-admin/BLonD
|
blond/toolbox/parameter_scaling.py
|
Python
|
gpl-3.0
| 21,008
| 0.004665
|
# coding: utf8
# Copyright 2014-2020 CERN. This software is distributed under the
# terms of the GNU General Public Licence version 3 (GPL Version 3),
# copied verbatim in the file LICENCE.md.
# In applying this licence, CERN does not waive the privileges and immunities
# granted to it by virtue of its status as an Intergovernmental Organization or
# submit itself to any jurisdiction.
# Project website: http://blond.web.cern.ch/
'''
**Scaling of longitudinal beam and machine parameters, with user interface.**
:Authors: **Konstantinos Iliakis**, **Helga Timko**
'''
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.Qt import QButtonGroup, QHBoxLayout, QGroupBox
from scipy import integrate
from scipy.constants import m_p, e, c
import numpy as np
# Machine-dependent parameters [SI-units] -------------------------------------
set_ups = {'PSB': '0',
'CPS': '1',
'SPS, Q20': '2', 'SPS, Q22': '3', 'SPS, Q26': '4',
'LHC, -2016': '5', 'LHC, 2017-': '6'}
gamma_ts = {'0': 4.0767,
'1': np.sqrt(37.2),
'2': 18., '3': 20., '4': 22.83,
'5': 55.759505, '6': 53.8}
harmonics = {'0': 1,
'1': 21,
'2': 4620, '3': 4620, '4': 4620,
'5': 35640, '6': 35640}
circumferences = {'0': 2*np.pi*25,
'1': 2*np.pi*100.,
'2': 2*np.pi*1100.009, '3': 2*np.pi*1100.009, '4': 2*np.pi*1100.009,
'5': 26658.883, '6': 26658.883}
energies_fb = {'0': (160.e6 + m_p*c**2/e),
'1': (2.0e9 + m_p*c**2/e),
'2': 25.92e9, '3': 25.92e9, '4': 25.92e9,
'5': 450.e9, '6': 450.e9}
energies_ft = {'0': (2.0e9 + m_p*c**2/e),
'1': 25.92e9,
'2': 450.e9, '3': 450.e9, '4': 450.e9,
'5': 6.5e12, '6': 6.5e12}
# Machine-dependent parameters [SI-units] -------------------------------------
class ParameterScaling(object):
@property
def phi_b(self):
return self.omega_rf*self.tau/2.
@property
def delta_b(self):
return self.dE_b/(self.beta_sq*self.energy)
@property
def dE_b(self):
return np.sqrt(self.beta_sq*self.energy*self.voltage*(1 -
np.cos(self.phi_b)) / (np.pi*self.harmonic*self.eta_0))
@property
def integral(self):
return integrate.quad(lambda x: np.sqrt(2.*(np.cos(x) -
np.cos(self.phi_b))), 0, self.phi_b)[0]
@property
def emittance(self):
return 4.*self.energy*self.omega_s0*self.beta_sq*self.integral / \
(self.omega_rf**2*self.eta_0)
def relativistic_quantities(self):
self.momentum = np.sqrt(self.energy**2 - self.mass**2)
self.tb1.append(" Synchronous momentum: "+
np.str(self.momentum)+" eV")
self.kinetic_energy = self.energy - self.mass
self.tb1.append(" Synchronous kinetic energy: "+
np.str(self.kinetic_energy)+" eV")
self.gamma = self.energy/self.mass
self.tb1.append(" Synchronous relativistic gamma: "+
np.str(self.gamma)+"")
self.beta = np.sqrt(1. - 1./self.gamma**2)
self.tb1.append(" Synchronous relativistic beta: "+
np.str(self.beta)+"")
self.beta_sq = self.beta ** 2
self.tb1.append(" Synchronous relativistic beta squared: "+
np.str(self.beta_sq)+"\n")
def frequencies(self):
self.t_rev = self.circumference/(self.beta*c)
self.tb1.append(" Revolution period: "+
np.str(self.t_rev * 1.e6)+" us")
self.f_rev = 1./self.t_rev
self.tb1.append(" Revolution frequency: "+
np.str(self.f_rev)+" Hz")
self.omega_rev = 2.*np.pi*self.f_rev
self.tb1.append(" Angular revolution frequency: "+
np.str(self.omega_rev)+" 1/s")
self.f_RF = self.harmonic*self.f_rev
self.tb1.append(" RF frequency: "+np.str(self.f_RF*1.e-6)+" MHz")
self.omega_rf = 2.*np.pi*self.f_RF
self.tb1.append(" Angular RF frequency: "+
np.str(self.omega_rf)+" 1/s\n")
def tun
|
e(self):
self.eta_0 = np.fabs(1./self.gamma_t**2 - 1./self.gamma**2)
self.tb1.append(" Slippage factor (zeroth order): "+
np.str(self.eta_0)+"")
self.Q_s0 = np.sqrt(self.harmonic*self.voltage*self.eta_0 /
|
(2.*np.pi*self.beta_sq*self.energy))
self.tb1.append(" Central synchrotron tune: "+np.str(self.Q_s0)+"")
self.f_s0 = self.Q_s0*self.f_rev
self.tb1.append(" Central synchrotron frequency: "+
np.str(self.f_s0)+"")
self.omega_s0 = 2.*np.pi*self.f_s0
self.tb1.append(" Angular synchrotron frequency: "+
np.str(self.omega_s0)+" 1/s\n")
def bucket_parameters(self):
self.tb1.append("Bucket parameters assume: single RF, stationary case, and no intensity effects.\n")
self.bucket_area = 8.*np.sqrt(2.*self.beta_sq*self.energy*self.voltage /
(np.pi*self.harmonic*self.eta_0)) / self.omega_rf
self.tb1.append(" Bucket area: "+np.str(self.bucket_area)+" eVs")
self.dt_max = 0.5*self.t_rev/self.harmonic
self.tb1.append(" Half of bucket length: "+
np.str(self.dt_max*1.e9)+" ns")
self.dE_max = np.sqrt(2.*self.beta**2*self.energy*self.voltage /
(np.pi*self.eta_0*self.harmonic))
self.tb1.append(" Half of bucket height: "+
np.str(self.dE_max*1.e-6)+" MeV")
self.delta_max = self.dE_max/(self.beta_sq*self.energy)
self.tb1.append(" In relative momentum offset: "+
np.str(self.delta_max)+"\n")
def emittance_from_bunch_length(self, four_sigma_bunch_length):
self.tau = four_sigma_bunch_length
if self.tau >= 2.*self.dt_max:
self.tb1.append("Chosen bunch length too large for this bucket. Aborting!")
raise RuntimeError("Chosen bunch length too large for this bucket. Aborting!")
self.tb1.append("Calculating emittance of 4-sigma bunch length: "+
np.str(self.tau*1.e9)+" ns")
self.tb1.append(" Emittance contour in phase: "+
np.str(self.phi_b)+" rad")
self.tb1.append(" Emittance contour in relative momentum: "+
np.str(self.delta_b)+"")
self.tb1.append(" Emittance contour in energy offset: "+
np.str(self.dE_b*1.e-6)+" MeV")
self.tb1.append(" R.m.s. bunch length is: "+
np.str(self.tau*c/4*100)+" cm")
self.tb1.append(" R.m.s. energy spread is: "+
np.str(0.5*self.dE_b/self.kinetic_energy)+"")
self.tb1.append(" Longitudinal emittance is: "+
np.str(self.emittance)+" eVs\n")
def bunch_length_from_emittance(self, emittance):
self.emittance_aim = emittance
if self.emittance_aim >= self.bucket_area:
self.tb1.append("Chosen emittance too large for this bucket. Aborting!")
raise RuntimeError("Chosen emittance too large for this bucket. Aborting!")
self.tb1.append("Calculating 4-sigma bunch length for an emittance of "
+np.str(self.emittance_aim)+" eVs")
# Make a guess, iterate to get closer
self.tau = self.dt_max/2.
while (np.fabs((self.emittance - self.emittance_aim)
/self.emittance_aim) > 0.001):
self.tau *= np.sqrt(self.emittance_aim/self.emittance)
|
joachimmetz/plaso
|
plaso/parsers/aws_elb_access.py
|
Python
|
apache-2.0
| 14,893
| 0.00235
|
# -*- coding: utf-8 -*-
"""Parser for AWS ELB access logs.
This parser is based on the log format documented at
https://docs.aws.amazon.com/elasticloadbalancing/latest/application/load-balancer-access-logs.html
Note:
The AWS documentation is not clear about the meaning
of the "target_port_list" field. The assumption is
that it refers to a list of possible backend instances'
IP addreses that could receive the client's request.
This parser stores the "target_port_list"data in the
"destination_list" attribute of an EventData object.
"""
import pyparsing
from dfdatetime import time_elements as dfdatetime_time_elements
from plaso.containers import events
from plaso.containers import time_events
from plaso.lib import definitions
from plaso.lib import errors
from plaso.parsers import manager
from plaso.parsers import text_parser
class AWSELBEventData(events.EventData):
"""AWS Elastic Load Balancer access log event data
Attributes:
request_type (str): The type of request or connection.
resource_identifier (str): The resource ID of the load balancer.
source_ip_address (str): The IP address of the requesting source.
source_port (int): The port of the requesting source.
destination_ip_address (str): The IP address of the destination
that processed this request.
destination_port (int): The port of the destination that processed
this request.
request_processing_time (str): The total duration from
the time the load balancer received the request until the
time it sent the request to a destination.
destination_processing_time (str): The total duration from
the time the load balancer sent the request to a destination until
the destination started to send the response headers.
response_processing_time (str): The total processing duration.
destination_status_code (int): The status code of the response
from the destination.
received_bytes (int): The size of the request, in bytes, received from
the source.
sent_bytes (int): The size of the response, in bytes, sent to the source.
user_agent (str): A User-Agent string.
ssl_cipher (str): The SSL cipher of the HTTPS listener.
ssl_protocol (str): The SSL protocol of the HTTPS listener.
destination_group_arn (str): The Amazon Resource Name (ARN) of the
destination group.
trace_identifier (str): The contents of the X-Amzn-Trace-Id header.
domain_name (str): The SNI domain provided by the
source during the TLS handshake.
chosen_cert_arn (str): The ARN of the certificate
presented to the source.
matched_rule_priority (int): The priority value of the rule that
matched the request.
actions_executed (str): The actions taken when processing the request.
redirect_url (str): The URL of the redirect destination.
error_reason (str): The error reason code, enclosed in
double quotes.
destination_list (str): A space-delimited list of IP addresses
and ports for the desti
|
nations that processed this request.
destination_status_code_list (str): A space-delimited list of status codes
|
.
classification (str): The classification for desync mitigation.
classification_reason (str): The classification reason code.
"""
DATA_TYPE = 'aws:elb:access'
def __init__(self):
"""Initializes event data."""
super(AWSELBEventData, self).__init__(data_type=self.DATA_TYPE)
self.request_type = None
self.resource_identifier = None
self.source_ip_address = None
self.source_port = None
self.destination_ip_address = None
self.destination_port = None
self.request_processing_time = None
self.destination_processing_time = None
self.response_processing_time = None
self.elb_status_code = None
self.destination_status_code = None
self.received_bytes = None
self.sent_bytes = None
self.request = None
self.user_agent = None
self.ssl_cipher = None
self.ssl_protocol = None
self.destination_group_arn = None
self.trace_identifier = None
self.domain_name = None
self.chosen_cert_arn = None
self.matched_rule_priority = None
self.actions_executed = None
self.redirect_url = None
self.error_reason = None
self.destination_list = None
self.destination_status_code_list = None
self.classification = None
self.classification_reason = None
class AWSELBParser(text_parser.PyparsingSingleLineTextParser):
"""Parses an AWS ELB access log file."""
NAME = 'aws_elb_access'
DATA_FORMAT = 'AWS ELB Access log file'
MAX_LINE_LENGTH = 3000
_ENCODING = 'utf-8'
BLANK = pyparsing.Literal('"-"')
_WORD = pyparsing.Word(pyparsing.printables) | BLANK
_QUOTE_INTEGER = (
pyparsing.OneOrMore('"') + text_parser.PyparsingConstants.INTEGER | BLANK)
_INTEGER = text_parser.PyparsingConstants.INTEGER | BLANK
_FLOAT = pyparsing.Word(pyparsing.nums + '.')
_PORT = pyparsing.Word(pyparsing.nums, max=6).setParseAction(
text_parser.ConvertTokenToInteger) | BLANK
_CLIENT_IP_ADDRESS_PORT = pyparsing.Group(
text_parser.PyparsingConstants.IP_ADDRESS('source_ip_address') +
pyparsing.Suppress(':') + _PORT('source_port') | BLANK)
_DESTINATION_IP_ADDRESS_PORT = pyparsing.Group(
text_parser.PyparsingConstants.IP_ADDRESS('destination_ip_address') +
pyparsing.Suppress(':') + _PORT('destination_port') | BLANK)
_DATE_TIME_ISOFORMAT_STRING = pyparsing.Combine(
pyparsing.Word(pyparsing.nums, exact=4) + pyparsing.Literal('-') +
pyparsing.Word(pyparsing.nums, exact=2) + pyparsing.Literal('-') +
pyparsing.Word(pyparsing.nums, exact=2) + pyparsing.Literal('T') +
pyparsing.Word(pyparsing.nums, exact=2) + pyparsing.Literal(':') +
pyparsing.Word(pyparsing.nums, exact=2) + pyparsing.Literal(':') +
pyparsing.Word(pyparsing.nums, exact=2) + pyparsing.Literal('.') +
pyparsing.Word(pyparsing.nums, exact=6) + pyparsing.Literal('Z'))
# A log line is defined as in the AWS ELB documentation
_LOG_LINE = (
_WORD.setResultsName('request_type') +
_DATE_TIME_ISOFORMAT_STRING.setResultsName('time') +
_WORD.setResultsName('resource_identifier') +
_CLIENT_IP_ADDRESS_PORT.setResultsName('source_ip_port') +
_DESTINATION_IP_ADDRESS_PORT.setResultsName('destination_ip_port') +
_FLOAT.setResultsName('request_processing_time') +
_FLOAT.setResultsName('destination_processing_time') +
_FLOAT.setResultsName('response_processing_time') +
_INTEGER.setResultsName('elb_status_code') +
_INTEGER.setResultsName('destination_status_code') +
_INTEGER.setResultsName('received_bytes') +
_INTEGER.setResultsName('sent_bytes') +
pyparsing.quotedString.setResultsName('request')
.setParseAction(pyparsing.removeQuotes) +
pyparsing.quotedString.setResultsName('user_agent')
.setParseAction(pyparsing.removeQuotes) +
_WORD.setResultsName('ssl_cipher') +
_WORD.setResultsName('ssl_protocol') +
_WORD.setResultsName('destination_group_arn') +
_WORD.setResultsName('trace_identifier') +
pyparsing.quotedString.setResultsName(
'domain_name').setParseAction(pyparsing.removeQuotes) +
pyparsing.quotedString.setResultsName(
'chosen_cert_arn').setParseAction(pyparsing.removeQuotes) +
_INTEGER.setResultsName('matched_rule_priority') +
_DATE_TIME_ISOFORMAT_STRING.setResultsName('request_creation_time') +
pyparsing.quotedString.setResultsName(
'actions_executed').setParseAction(pyparsing.removeQuotes) +
pyparsing.quotedString.setResultsName(
'redirect_url').setParseAction(pyparsing.removeQuotes) +
pyparsing.quotedString.setResultsName(
'error_reason').setParseAction(pyparsing.removeQuotes) +
pyparsing.quotedString.setResultsName(
'destination_list').setParseAction(pyparsing.removeQuotes) +
pyparsing.quotedString.setResultsName(
'destination_status_code_list').setParseAction(
pyparsing.removeQuotes) +
pyparsing.quotedString.setResultsName(
|
fake-name/ReadableWebProxy
|
WebMirror/management/rss_parser_funcs/feed_parse_extractStealtranslationHomeBlog.py
|
Python
|
bsd-3-clause
| 108
| 0.055556
|
def
|
extractStealtranslationHomeBlog(item):
'''
Parser fo
|
r 'stealtranslation.home.blog'
'''
return None
|
robotpilot/robomongo
|
src/third-party/mongodb/buildscripts/bcp.py
|
Python
|
gpl-3.0
| 824
| 0.053398
|
import utils
import os
import shutil
import sys
def go( boost_root ):
OUTPUT = "src/third_party/boost"
if os.path.exists( OUTPUT ):
shutil.rmtree( OUTPUT )
cmd = [ "bcp" , "--scan" , "--boost=%s" % boost_root ]
src = utils.getAllSourceFiles()
cmd += src
cmd.append( OUTPUT )
if not os.path.exists( OUTPUT ):
os.makedirs( OUT
|
PUT )
res = utils.execsys( cmd )
out = open( OUTPUT + "/bcp-out.txt" , 'w' )
out.write( re
|
s[0] )
out.close()
out = open( OUTPUT + "/notes.txt" , 'w' )
out.write( "command: " + " ".join( cmd ) )
out.close()
print( res[1] )
if __name__ == "__main__":
if len(sys.argv) == 1:
print( "usage: python %s <boost root directory>" % sys.argv[0] )
sys.exit(1)
go( sys.argv[1] )
|
botlabio/autonomio
|
autonomio/save_model_as.py
|
Python
|
mit
| 1,376
| 0.001453
|
def save_model_as(X, columns, model, save_model, flatten):
'''Model Saver
WHAT: Saves a trained model so it can be loaded later
for predictions by predictor().
'''
model_json = model.to_json()
with open(save_model+".json", "w") as json_file:
json_file.write(model_json)
model.save_weights(save_model+".h5")
print("Model" + " " + save_model + " " + "have been saved.")
temp = ""
f = open(save_model+".x", "w+")
# for a range of columns (two ints)
if type(X) == list:
if len(X) == 2:
if type(X[0]) == int:
for i in range(X[0], X[1]):
try:
temp += columns[i] + " "
except:
pass
# for multiple column index
if type(X) == list:
if len(X) > 2:
if type(X[0]) == int:
for i in X:
temp += columns[i] + " "
# for multiple column labels
if type(X) == list:
if type(X[0]) == str:
|
for i in X:
temp += i+" "
temp = temp[:-1]
# for an integer as column name (int)
if type(X) == int:
temp = columns[X]
# for a single column label which contains string values
if type(X) == str:
temp = X
temp += " "+str(flatten)
f.w
|
rite(temp)
f.close()
|
PaulaEstrella/MTTT-PyQT
|
MTTTCore.py
|
Python
|
gpl-3.0
| 15,580
| 0.005969
|
"""@brief MTTT's core commands, stems from the original version created using Gtk https://github.com/roxana-lafuente/MTTT"""
# !/usr/bin/env python
# -*- coding: utf-8 -*-
##############################################################################
#
# Machine Translation Training Tool
# Copyright (C) 2016 Roxana Lafuente <roxana.lafuente@gmail.com>
# Miguel Lemos <miguelemosreverte@gmail.com>
# Paula Estrella <pestrella@gmail.com>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without ev
|
en the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURP
|
OSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
def install_and_import(package):
import importlib
try:
importlib.import_module(package)
except ImportError:
try:
import pip
except ImportError:
print "no pip"
os.system('python get_pip.py')
finally:
import pip
pip.main(['install', package])
finally:
globals()[package] = importlib.import_module(package)
#os is one of the modules that I know comes with 2.7, no questions asked.
import os
#these other ones I a am not so sure of. Thus the install function.
install_and_import("requests")
install_and_import("subprocess")
install_and_import("json")
install_and_import("sys")
install_and_import("time")
install_and_import("shutil")
install_and_import("urlparse")
install_and_import("itertools")
from commands import *
from files_processing import *
from constants import moses_dir_fn
from Ui_mosesDialog import MosesDialog
UI_INFO = """
<ui>
<menubar name='MenuBar'>
<menu action='VisualsMenu'>
<menu action='Visuals'>
<menuitem action='metro'/>
<menuitem action='paper'/>
<separator />
<menuitem action='lights_on_option'/>
</menu>
</menu>
</menubar>
</ui>
"""
class MTTTCore():
def __init__(self):
# Recognize OS
if os.name == 'posix': # Linux
self.is_linux, self.is_windows = True, False
elif os.name == 'nt': # Windows
self.is_linux, self.is_windows = False, True
else:
print "Unknown OS"
exit(1)
# Check Moses Config file.
self.moses_dir = ""
try:
f = open(moses_dir_fn, 'r')
self.moses_dir = f.read()
f.close()
except IOError, OSError:
# File does not exist.
self.moses_dir = self.get_moses_dir()
f = open(moses_dir_fn, 'w')
f.write(self.moses_dir)
f.close()
finally:
# File content is wrong
if not self.is_moses_dir_valid(self.moses_dir):
moses_dir = self.get_moses_dir()
f = open(moses_dir_fn, 'w')
f.write(self.moses_dir)
f.close()
self.saved_absolute_path = os.path.abspath("saved")
self.saved_relative_filepath = "./saved"
if not os.path.exists(self.saved_absolute_path):
os.makedirs(self.saved_absolute_path)
# Init
self.source_lang = None
self.target_lang = None
self.output_text= None
self.cwd = os.getcwd()
def is_moses_dir_valid(self, directory):
is_valid = True
if directory == "":
is_valid = False # Empty string
elif not os.path.exists(directory):
is_valid = False # Directory does not exist
else:
# Check if dir exists but does not contain moses installation
is_valid = self._check_moses_installation(directory)
return is_valid
def _check_moses_installation(self, directory):
# TODO: TRY catch OSError when permission denied!!
file_content = [f for f in os.listdir(directory)]
moses_files = ["/scripts/tokenizer/tokenizer.perl",
"/scripts/recaser/truecase.perl",
"/scripts/training/clean-corpus-n.perl",
"/bin/lmplz",
"/bin/build_binary",
"/scripts/training/train-model.perl",
"/bin/moses"
]
if self.is_windows:
moses_files = [f.replace("/", "\\")
for f in moses_files]
moses_files = [f + ".exe"
for f in moses_files
if "/bin" in f]
is_valid = True
for mfile in moses_files:
is_valid = is_valid and os.path.isfile(directory + mfile)
return is_valid
def get_moses_dir(self):
"""
Gets Moses directory.
"""
moses = MosesDialog()
self.moses_dir = moses.detect()
return self.moses_dir
def _prepare_corpus(self, output_text, source_lang, target_lang, st_train, tt_train, lm_text):
self.output_text = str(output_text)
self.source_lang = str(source_lang)
self.target_lang = str(target_lang)
self.lm_text = str(lm_text)
self.tt_train = str(tt_train)
self.st_train = str(st_train)
output_directory = adapt_path_for_cygwin(self.is_windows, self.output_text)
return_text = ""
if output_directory is not None:
# Change directory to the output_directory.
try:
os.chdir(self.output_text)
except:
# Output directory does not exist.
os.mkdir(self.output_text)
os.chdir(self.output_text)
cmds = []
# 1) Tokenization
# a) Target text
target_tok = generate_input_tok_fn(self.target_lang,
output_directory)
cmds.append(get_tokenize_command(adapt_path_for_cygwin(self.is_windows, self.moses_dir),
self.target_lang,
adapt_path_for_cygwin(self.is_windows,self.tt_train),
target_tok))
# b) Source text
source_tok = generate_input_tok_fn(self.source_lang,
output_directory)
cmds.append(get_tokenize_command(adapt_path_for_cygwin(self.is_windows, self.moses_dir),
self.source_lang,
adapt_path_for_cygwin(self.is_windows,self.st_train),
source_tok))
# c) Language model
lm_tok = generate_lm_tok_fn(output_directory)
cmds.append(get_tokenize_command(adapt_path_for_cygwin(self.is_windows, self.moses_dir),
self.source_lang,
adapt_path_for_cygwin(self.is_windows,self.lm_text),
lm_tok))
# 2) Truecaser training
# a) Target text
cmds.append(get_truecaser_train_command(adapt_path_for_cygwin(self.is_windows, self.moses_dir),
target_tok))
# b) Source text
cmds.append(get_truecaser_train_command(adapt_path_for_cygwin(self.is_windows, self.moses_dir),
source_tok))
# c) Language model
cmds.append(get_truecaser_train_command(adapt_path_for_cygwin(self.is_windows, self.moses_dir),
lm_tok))
# 3) Truecaser
input_true =
|
openrobotics/openrobotics_thunderbot
|
pyttsx/pyttsx/driver.py
|
Python
|
mit
| 6,982
| 0.000573
|
'''
Proxy for drivers.
Copyright (c) 2009, 2013 Peter Parente
Permission to use, copy, modify, and distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
'''
import sys
import traceback
import weakref
class DriverProxy(object):
'''
Proxy to a driver implementation.
@ivar _module: Module containing the driver implementation
@type _module: module
@ivar _engine: Reference to the engine that owns the driver
@type _engine: L{engine.Engine}
@ivar _queue: Queue of commands outstanding for the driver
@type _queue: list
@ivar _busy: True when the driver is busy processing a command, False when
not
@type _busy: bool
@ivar _name: Name associated with the current utterance
@type _name: str
@ivar _debug: Debugging output enabled or not
@type _debug: bool
@ivar _iterator: Driver iterator to invoke when in an external run loop
@type _iterator: iterator
'''
def __init__(self, engine, driverName, debug):
'''
Constructor.
@param engine: Reference to the engine that owns the driver
@type engine: L{engine.Engine}
@param driverName: Name of the driver module to use under drivers/ or
None to select the default for the platform
@type driverName: str
@param debug: Debugging output enabled or not
@type debug: bool
'''
if driverName is None:
# pick default driver for common platforms
if sys.platform == 'darwin':
driverName = 'nsss'
elif sys.platform == 'win32':
driverName = 'sapi5'
else:
driverName = 'espeak'
# import driver module
name = 'drivers.%s' % driverName
self._module = __import__(name, globals(), locals(), [driverName])
# build driver instance
self._driver = self._module.buildDriver(weakref.proxy(self))
# initialize refs
self._engine = engine
self._queue = []
self._busy = True
self._name = None
self._iterator = None
self._debug = debug
def __del__(self):
try:
self._driver.destroy()
except (AttributeError, TypeError):
pass
def _push(self, mtd, args, name=None):
'''
Adds a command to the queue.
@param mtd: Method to invoke to process the command
@type mtd: method
@param args: Arguments to apply when invoking the method
@type args: tuple
@param name: Name associated with the command
@type name: str
'''
self._queue.append((mtd, args, name))
self._pump()
def _pump(self):
'''
Attempts to process the next command in the queue if one exists and the
driver is not currently busy.
'''
while (not self._busy) and len(self._queue):
cmd = self._queue.pop(0)
self._name = cmd[2]
try:
cmd[0](*cmd[1])
except Exception, e:
self.notify('error', exception=e)
if self._debug: traceback.print_exc()
def notify(self, topic, **kwargs):
'''
Sends a notification to the engine from the driver.
@param topic: Notification topic
@type topic: str
@param kwargs: Arbitrary keyword arguments
@type kwargs: dict
'''
kwargs['name'] = self._name
self._engine._notify(topic, **kwargs)
def setBusy(self, busy):
'''
Called by the driver to indicate it is busy.
@param busy: True when busy, false when idle
@type busy: bool
'''
self._busy = busy
if not self._busy:
self._pump()
def isBusy(self):
'''
@return: True if the driver is busy, false if not
@rtype: bool
'''
return self._busy
def say(self, text, name):
'''
Called by the engine to push a say command onto the queue.
@param text: Text to speak
@type text: unicode
@param name: Name to associate with the utterance
@type name: str
'''
self._push(self._driver.say, (text,), name)
def stop(self):
'''
Called by the engine to stop the current utterance and clear the queue
of commands.
'''
# clear queue up to first end loop command
while(True):
try:
mtd, args, name
|
= self._queue[0]
except IndexError:
break
if(mtd == self._engine.endLoop): break
self._queue.p
|
op(0)
self._driver.stop()
def getProperty(self, name):
'''
Called by the engine to get a driver property value.
@param name: Name of the property
@type name: str
@return: Property value
@rtype: object
'''
return self._driver.getProperty(name)
def setProperty(self, name, value):
'''
Called by the engine to set a driver property value.
@param name: Name of the property
@type name: str
@param value: Property value
@type value: object
'''
self._push(self._driver.setProperty, (name, value))
def runAndWait(self):
'''
Called by the engine to start an event loop, process all commands in
the queue at the start of the loop, and then exit the loop.
'''
self._push(self._engine.endLoop, tuple())
self._driver.startLoop()
def startLoop(self, useDriverLoop):
'''
Called by the engine to start an event loop.
'''
if useDriverLoop:
self._driver.startLoop()
else:
self._iterator = self._driver.iterate()
def endLoop(self, useDriverLoop):
'''
Called by the engine to stop an event loop.
'''
self._queue = []
self._driver.stop()
if useDriverLoop:
self._driver.endLoop()
else:
self._iterator = None
self.setBusy(True)
def iterate(self):
'''
Called by the engine to iterate driver commands and notifications from
within an external event loop.
'''
try:
self._iterator.next()
except StopIteration:
pass
|
nkrinner/nova
|
nova/api/openstack/compute/servers.py
|
Python
|
apache-2.0
| 61,851
| 0.00042
|
# Copyright 2010 OpenStack Foundation
# Copyright 2011 Piston Cloud Computing, Inc
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import base64
import os
import re
from oslo.config import cfg
from oslo import messaging
import six
import webob
from webob import exc
from nova.api.openstack import common
from nova.api.openstack.compute import ips
from nova.api.openstack.compute.views import servers as views_servers
from nova.api.openstack import wsgi
from nova.api.openstack import xmlutil
from nova import block_device
from nova import compute
from nova.compute import flavors
from nova import exception
from nova.objects import block_device as block_device_obj
from nova.objects import instance as instance_obj
from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
from nova.openstack.common import strutils
from nova.openstack.common import timeutils
from nova.openstack.common import uuidutils
from nova import policy
from nova import utils
server_opts = [
cfg.BoolOpt('enable_instance_password',
default=True,
help='Enables returning of the instance password by the'
' relevant server API calls such as create, rebuild'
' or rescue, If the hypervisor does not support'
' password injection then the password returned will'
' not be correct'),
]
CONF = cfg.CONF
CONF.register_opts(server_opts)
CONF.import_opt('network_api_class', 'nova.network')
CONF.import_opt('reclaim_instance_interval', 'nova.compute.manager')
LOG = logging.getLogger(__name__)
XML_WARNING = False
def make_fault(elem):
fault = xmlutil.SubTemplateElement(elem, 'fault', selector='fault')
fault.set('code')
fault.set('created')
msg = xmlutil.SubTemplateElement(fault, 'message')
msg.text = 'message'
det = xmlutil.SubTemplateElement(fault, 'details')
det.text = 'details'
def make_server(elem, detailed=False):
elem.set('name')
elem.set('id')
global XML_WARNING
if not XML_WARNING:
LOG.warning(_('XML support has been deprecated and may be removed '
'as early as the Juno release.'))
XML_WARNING = True
if detailed:
elem.set('userId', 'user_id')
elem.set('tenantId', 'tenant_id')
elem.set('updated')
elem.set('created')
elem.set('hostId')
elem.set('accessIPv4')
elem.set('accessIPv6')
elem.set('status')
elem.set('progress')
elem.set('reservation_id')
# Attach image node
image = xmlutil.SubTemplateElement(elem, 'image', selector='image')
image.set('id')
xmlutil.make_links(image, 'links')
# Attach flavor node
flavor = xmlutil.SubTemplateElement(elem, 'flavor', selector='flavor')
flavor.set('id')
xmlutil.make_links(flavor, 'links')
# Attach fault node
make_fault(elem)
# Attach metadata node
elem.append(common.MetadataTemplate())
# Attach addresses node
elem.append(ips.AddressesTemplate())
xmlutil.make_links(elem, 'links')
server_nsmap = {None: xmlutil.XMLNS_V11, 'atom': xmlutil.XMLNS_ATOM}
class ServerTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('server', selector='server')
make_server(root, detailed=True)
return xmlutil.MasterTemplate(root, 1, nsmap=server_nsmap)
class MinimalServersTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('servers')
elem = xmlutil.SubTemplateElement(root, 'server', selector='servers')
make_server(elem)
xmlutil.make_links(root, 'servers_links')
return xmlutil.MasterTemplate(root, 1, nsmap=server_nsmap)
class ServersTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('servers')
elem = xmlutil.SubTemplateElement(root, 'server', selector='servers')
make_server(elem, detailed=True)
return xmlutil.MasterTemplate(root, 1, nsmap=server_nsmap)
class ServerAdminPassTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('server')
root.set('adminPass')
return xmlutil.SlaveTemplate(root, 1, nsmap=server_nsmap)
class ServerMultipleCreateTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('server')
root.set('reservation_id')
return xmlutil.MasterTemplate(root, 1, nsmap=server_nsmap)
def FullServerTemplate():
master = ServerTemplate()
master.attach(ServerAdminPassTemplate())
return master
class CommonDeserializer(wsgi.MetadataXMLDeserializer):
"""Common deserializer to handle xml-formatted server create requests.
Handles standard server attributes as well as optional metadata
and personality attributes
"""
metadata_deserializer = common.MetadataXMLDeserializer()
def _extract_personality(self, server_node):
"""Marshal the personality attribute of a parsed request."""
node = self.find_first_child_named(server_node, "personality")
if node is not None:
personality = []
for file_node in self.find_children_named(node, "file"):
item = {}
if file_node.hasAttribute("path"):
item["path"] = file_node.getAttribute("path")
item["contents"] = self.extract_text(file_node)
personality.append(item)
return personality
else:
return None
def _extract_server(self, node):
"""Marshal the server attribute of a parsed request."""
server = {}
server_node = self.find_first_child_named(node, 'server')
attributes = ["name", "imageRef", "flavorRef", "adminPass",
"accessIPv4", "accessIPv6", "key_name",
"availability_zone", "min_count", "max_count"]
for attr in attributes:
if server_node.getAttribute(attr):
server[attr] = server_node.getAttribute(attr)
res_id = server_node.getAttribute('return_reservation_id')
if res_id:
server['return_reservation_id'] = \
strutils.bool_from_string(res_id)
scheduler_hints = self._extract_scheduler_hints(server_node)
if scheduler_hints:
server['OS-SCH-HNT:scheduler_hints'] = scheduler_hin
|
ts
metadata_node = self.find_first_child_named(s
|
erver_node, "metadata")
if metadata_node is not None:
server["metadata"] = self.extract_metadata(metadata_node)
user_data_node = self.find_first_child_named(server_node, "user_data")
if user_data_node is not None:
server["user_data"] = self.extract_text(user_data_node)
personality = self._extract_personality(server_node)
if personality is not None:
server["personality"] = personality
networks = self._extract_networks(server_node)
if networks is not None:
server["networks"] = networks
security_groups = self._extract_security_groups(server_node)
if security_groups is not None:
server["security_groups"] = security_groups
# NOTE(vish): this is not namespaced in json, so leave it without a
# namespace for now
block_device_mapping = self._extract_block_device_mapping(server_node)
if block_device_mapping is not None:
|
nttks/edx-platform
|
lms/envs/acceptance.py
|
Python
|
agpl-3.0
| 6,447
| 0.002482
|
"""
This config file extends the test environment configuration
so that we can run the lettuce acceptance tests.
"""
# We intentionally define lots of variables that aren't used, and
# want to import all variables from base settings files
# pylint: disable=wildcard-import, unused-wildcard-import
from .test import *
from .sauce import *
# You need to start the server in debug mode,
# otherwise the browser will not render the pages correctly
DEBUG = True
SITE_NAME = 'localhost:{}'.format(LETTUCE_SERVER_PORT)
# Output Django logs to a file
import logging
logging.basicConfig(filename=TEST_ROOT / "log" / "lms_acceptance.log", level=logging.ERROR)
# set root logger level
logging.getLogger().setLevel(logging.ERROR)
import os
from random import choice
def seed():
return os.getppid()
# Silence noisy logs
LOG_OVERRIDES = [
('track.middleware', logging.CRITICAL),
('codejail.safe_exec', logging.ERROR),
('edx.courseware', logging.ERROR),
('audit', logging.ERROR),
('instructor_task.api_helper', logging.ERROR),
]
for log_name, log_level in LOG_OVERRIDES:
logging.getLogger(log_name).setLevel(log_level)
update_module_store_settings(
MODULESTORE,
doc_store_settings={
'db': 'acceptance_xmodule',
'collection': 'acceptance_modulestore_%s' % seed(),
},
module_store_options={
'fs_root': TEST_ROOT / "data",
},
default_store=os.environ.get('DEFAULT_STORE', 'draft'),
)
CONTENTSTORE = {
'ENGINE': 'xmodule.contentstore.mongo.MongoContentStore',
'DOC_STORE_CONFIG': {
'host': 'localhost',
'db': 'acceptance_xcontent_%s' % seed(),
}
}
# Set this up so that 'paver lms --settings=acceptance' and running the
# harvest command both use the same (test) database
# which they can flush without messing up your dev db
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': TEST_ROOT / "db" / "test_edx.db",
'TEST_NAME': TEST_ROOT / "db" / "test_edx.db",
'OPTIONS': {
'timeout': 30,
},
'ATOMIC_REQUESTS': True,
}
}
TRACKING_BACKENDS.update({
'mongo': {
'ENGINE': 'track.backends.mongodb.MongoBackend'
}
})
EVENT_TRACKING_BACKENDS['tracking_logs']['OPTIONS']['backends'].update({
'mongo': {
'ENGINE': 'eventtracking.backends.mongodb.MongoBackend',
'OPTIONS': {
'database': 'track'
}
}
})
BULK_EMAIL_DEFAULT_FROM_EMAIL = "test@test.org"
# Forums are disabled in test.py to speed up unit tests, but we do not have
# per-test control for lettuce acceptance tests.
# If you are writing an acceptance test that needs the discussion service enabled,
# do not write it in lettuce, but instead write it using bok-choy.
# DO NOT CHANGE THIS SETTING HERE.
FEATURES['ENABLE_DISCUSSION_SERVICE'] = False
# Use the auto_auth workflow for creating user
|
s and logging them in
FEATURE
|
S['AUTOMATIC_AUTH_FOR_TESTING'] = True
# Enable third-party authentication
FEATURES['ENABLE_THIRD_PARTY_AUTH'] = True
THIRD_PARTY_AUTH = {
"Google": {
"SOCIAL_AUTH_GOOGLE_OAUTH2_KEY": "test",
"SOCIAL_AUTH_GOOGLE_OAUTH2_SECRET": "test"
},
"Facebook": {
"SOCIAL_AUTH_FACEBOOK_KEY": "test",
"SOCIAL_AUTH_FACEBOOK_SECRET": "test"
}
}
# Enable fake payment processing page
FEATURES['ENABLE_PAYMENT_FAKE'] = True
# Enable email on the instructor dash
FEATURES['ENABLE_INSTRUCTOR_EMAIL'] = True
FEATURES['REQUIRE_COURSE_EMAIL_AUTH'] = False
FEATURES['ENABLE_SPECIAL_EXAMS'] = True
# Don't actually send any requests to Software Secure for student identity
# verification.
FEATURES['AUTOMATIC_VERIFY_STUDENT_IDENTITY_FOR_TESTING'] = True
# HACK
# Setting this flag to false causes imports to not load correctly in the lettuce python files
# We do not yet understand why this occurs. Setting this to true is a stopgap measure
USE_I18N = True
FEATURES['ENABLE_FEEDBACK_SUBMISSION'] = False
# Include the lettuce app for acceptance testing, including the 'harvest' django-admin command
INSTALLED_APPS += ('lettuce.django',)
LETTUCE_APPS = ('courseware', 'instructor')
# Lettuce appears to have a bug that causes it to search
# `instructor_task` when we specify the `instructor` app.
# This causes some pretty cryptic errors as lettuce tries
# to parse files in `instructor_task` as features.
# As a quick workaround, explicitly exclude the `instructor_task` app.
LETTUCE_AVOID_APPS = ('instructor_task',)
LETTUCE_BROWSER = os.environ.get('LETTUCE_BROWSER', 'chrome')
# Where to run: local, saucelabs, or grid
LETTUCE_SELENIUM_CLIENT = os.environ.get('LETTUCE_SELENIUM_CLIENT', 'local')
SELENIUM_GRID = {
'URL': 'http://127.0.0.1:4444/wd/hub',
'BROWSER': LETTUCE_BROWSER,
}
#####################################################################
# See if the developer has any local overrides.
try:
from .private import * # pylint: disable=import-error
except ImportError:
pass
# Because an override for where to run will affect which ports to use,
# set these up after the local overrides.
# Configure XQueue interface to use our stub XQueue server
XQUEUE_INTERFACE = {
"url": "http://127.0.0.1:{0:d}".format(XQUEUE_PORT),
"django_auth": {
"username": "lms",
"password": "***REMOVED***"
},
"basic_auth": ('anant', 'agarwal'),
}
# Point the URL used to test YouTube availability to our stub YouTube server
YOUTUBE['API'] = "http://127.0.0.1:{0}/get_youtube_api/".format(YOUTUBE_PORT)
YOUTUBE['METADATA_URL'] = "http://127.0.0.1:{0}/test_youtube/".format(YOUTUBE_PORT)
YOUTUBE['TEXT_API']['url'] = "127.0.0.1:{0}/test_transcripts_youtube/".format(YOUTUBE_PORT)
if FEATURES.get('ENABLE_COURSEWARE_SEARCH') or \
FEATURES.get('ENABLE_DASHBOARD_SEARCH') or \
FEATURES.get('ENABLE_COURSE_DISCOVERY'):
# Use MockSearchEngine as the search engine for test scenario
SEARCH_ENGINE = "search.tests.mock_search_engine.MockSearchEngine"
# Generate a random UUID so that different runs of acceptance tests don't break each other
import uuid
SECRET_KEY = uuid.uuid4().hex
ANONYMOUS_ID_SECRET_KEY = SECRET_KEY
USERNAME_CIPHER_SECRET_KEY = SECRET_KEY
############################### PIPELINE #######################################
PIPELINE_ENABLED = False
# We want to make sure that any new migrations are run
# see https://groups.google.com/forum/#!msg/django-developers/PWPj3etj3-U/kCl6pMsQYYoJ
MIGRATION_MODULES = {}
|
DLR-SC/DataFinder
|
src/datafinder/gui/admin/datastore_configuration_wizard/gridftp/__init__.py
|
Python
|
bsd-3-clause
| 1,999
| 0.018009
|
# $Filename$
# $Authors$
# Last Changed: $Date$ $Committer$ $Revision-Id$
#
# Copyright (c) 2003-2011, German Aerospace Center (DLR)
# All rights reserved.
#
#Redistribution and use in source and binary forms, with or without
#
#modification, are permitted provided that the following conditions are
#met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the
# distribution.
#
# * Neither the name of the German Aerospace Center nor the names of
# its contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
#THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
#LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
#A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
#OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
#SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
|
NOT
#LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
#DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
#THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
#(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
#OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
Implements controller for GridFTP option pages.
"""
fr
|
om datafinder.gui.admin.datastore_configuration_wizard.gridftp import performance_option_controller
from datafinder.gui.admin.datastore_configuration_wizard.gridftp import security_option_controller
__version__ = "$Revision-Id:$"
|
boos/cppcheck
|
addons/test/util.py
|
Python
|
gpl-3.0
| 1,250
| 0.0008
|
# Helpers for pytest tests
import subprocess
import json
import os
def find_cppcheck_binary():
possible_locations = [
"./cppcheck",
"./build/bin/cppcheck",
r".\bin\cppcheck.exe",
]
for location in possible_locations:
if os.path.exists(location):
break
else:
raise RuntimeError("Could not find cppcheck binary")
return location
def dump_create(fpath, *argv):
cppcheck_binary = find_cppcheck_binary()
cmd = [cppcheck_binary, "--dump", "-DDUMMY", "--quiet", fpath] + list(argv)
p = subprocess.Popen(cmd)
p.communicate()
if p.returncode != 0:
raise OSError("cppcheck returns error code: %d" % p.returncode)
subprocess.Popen(["sync"])
def dump_remove(fpath):
subprocess
|
.Popen(["rm", "-f", fpath + ".dump"])
def convert_json_output(raw_json_strings):
"""Convert raw stdout/stderr cppcheck JSON output to python dict."""
json_output = {}
for line in raw_json_st
|
rings:
try:
json_line = json.loads(line)
# json_output[json_line['errorId']] = json_line
json_output.setdefault(json_line['errorId'], []).append(json_line)
except ValueError:
pass
return json_output
|
dhosterman/hebrew_order_david
|
accounts/admin.py
|
Python
|
mit
| 1,111
| 0.0018
|
from django.contrib import admin
from .models import User
from application.models import (Contact, Personal, Wife, Occupation, Children,
Hod, Committee, UserCommittee, Legal)
# Register your models here.
class ContactInline(admin.StackedInline):
model = Contact
class PersonalInline(admin.StackedInline):
model = Personal
class WifeInline(admin.StackedInline):
model = Wife
class OccupationInline(admin.Stac
|
kedInline):
model = Occupation
class HodInline(admin.StackedInline):
model = Hod
class ChildrenInline(admin.StackedInline):
model = Children
class UserCommitteeInline(admin.StackedInline):
model = UserCommittee
class UserAdmin(admin.ModelAdmin):
inlines = [
ContactInline,
PersonalInline,
WifeInline,
OccupationInline,
HodInline,
ChildrenInline,
UserCommitteeInline
]
class LegalAdmin
|
(admin.ModelAdmin):
model = Legal
admin.site.register(User, UserAdmin)
admin.site.register(Legal, LegalAdmin)
admin.site.site_header = 'Hebrew Order of David Administration'
|
pmghalvorsen/gramps_branch
|
gramps/gen/utils/string.py
|
Python
|
gpl-2.0
| 3,058
| 0.014716
|
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2000-2007 Donald N. Allingham
# Copyright (C) 2009 Gary Burton
# Copyright (C) 2011 Tim G L Lyons
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warrant
|
y of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You
|
should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
"""
String mappings for constants
"""
#-------------------------------------------------------------------------
#
# Gramps modules
#
#-------------------------------------------------------------------------
from ..lib import Person, Citation, FamilyRelType
from ..const import GRAMPS_LOCALE as glocale
_ = glocale.translation.sgettext
def _T_(value): # enable deferred translations (see Python docs 22.1.3.4)
return value
# _T_ is a gramps-defined keyword -- see po/update_po.py and po/genpot.sh
#-------------------------------------------------------------------------
#
# Integer to String mappings for constants
#
#-------------------------------------------------------------------------
gender = {
Person.MALE : _("male"),
Person.FEMALE : _("female"),
Person.UNKNOWN : _("gender|unknown"),
}
def format_gender( type):
return gender.get(type[0], _("Invalid"))
conf_strings = {
Citation.CONF_VERY_HIGH : _T_("Very High"),
Citation.CONF_HIGH : _T_("High"),
Citation.CONF_NORMAL : _T_("Normal"),
Citation.CONF_LOW : _T_("Low"),
Citation.CONF_VERY_LOW : _T_("Very Low"),
}
# note that a list /very/ similar to this is in EditCitation._setup_fields
# but that has the glocale's translated values since it is used in the UI
family_rel_descriptions = {
FamilyRelType.MARRIED : _("A legal or common-law relationship "
"between a husband and wife"),
FamilyRelType.UNMARRIED : _("No legal or common-law relationship "
"between man and woman"),
FamilyRelType.CIVIL_UNION : _("An established relationship between "
"members of the same sex"),
FamilyRelType.UNKNOWN : _("Unknown relationship between a man "
"and woman"),
FamilyRelType.CUSTOM : _("An unspecified relationship between "
"a man and woman"),
}
data_recover_msg = _('The data can only be recovered by Undo operation '
'or by quitting with abandoning changes.')
|
DedMemez/ODS-August-2017
|
safezone/DistributedFindFour.py
|
Python
|
apache-2.0
| 35,125
| 0.001964
|
# Fuck you Disyer. Stealing my fucking paypal. GET FUCKED: toontown.safezone.DistributedFindFour
from panda3d.core import BitMask32, CollideMask, CollisionHandler, CollisionHandlerQueue, CollisionNode, CollisionRay, CollisionSphere, CollisionTube, Lens, NodePath, TextNode, Vec3, Vec4
from direct.distributed.ClockDelta import *
from direct.task.Task import Task
from direct.interval.IntervalGlobal import *
from TrolleyConstants import *
from direct.gui.DirectGui import *
from toontown.toonbase import TTLocalizer
from direct.distributed import DistributedNode
from direct.distributed.ClockDelta import globalClockDelta
from ChineseCheckersBoard import ChineseCheckersBoard
from direct.fsm import ClassicFSM, State
from direct.fsm import StateData
from toontown.toonbase.ToontownTimer import ToontownTimer
from toontown.toonbase import ToontownGlobals
from direct.distributed.ClockDelta import *
from otp.otpbase import OTPGlobals
from direct.showbase import PythonUtil
import random
class DistributedFindFour(DistributedNode.DistributedNode):
def __init__(self, cr):
NodePath.__init__(self, 'DistributedFindFour')
DistributedNode.DistributedNode.__init__(self, cr)
self.cr = cr
self.reparentTo(render)
self.boardNode = loader.loadModel('phase_6/models/golf/findfour_game')
self.boardNode.reparentTo(self)
self.board = [[0,
0,
0,
0,
0,
0,
0],
[0,
0,
0,
0,
0,
0,
0],
[0,
0,
0,
0,
0,
0,
0],
[0,
0,
0,
0,
0,
0,
0],
[0,
0,
0,
0,
0,
0,
0],
[0,
0,
0,
0,
0,
0,
0]]
self.exitButton = None
self.inGame = False
self.waiting = True
self.startButton = None
self.playerNum = None
self.turnText = None
self.isMyTurn = False
self.wantTimer = True
self.leaveButton = None
self.screenText = None
self.turnText = None
self.exitButton = None
self.numRandomMoves = 0
self.blinker = Sequence()
self.playersTurnBlinker = Sequence()
self.yourTurnBlinker = Sequence()
self.winningSequence = Sequence()
self.moveSequence = Sequence()
self.moveList = []
self.mySquares = []
self.playerSeats = None
self.moveCol = None
self.move = None
self.accept('mouse1', self.mouseClick)
self.traverser = base.cTrav
self.pickerNode = CollisionNode('mouseRay')
self.pickerNP = camera.attachNewNode(self.pickerNode)
self.pickerNode.setFromCollideMask(BitMask32(4096))
self.pickerRay = CollisionRay()
self.pickerNode.addSolid(self.pickerRay)
self.myHandler = CollisionHandlerQueue()
self.traverser.addCollider(self.pickerNP, self.myHandler)
self.buttonModels = loader.loadModel('phase_3.5/models/gui/inventory_gui')
self.upButton = self.buttonModels.find('**//InventoryButtonUp')
self.downButton = self.buttonModels.find('**/InventoryButtonDown')
self.rolloverButton = self.buttonModels.find('**/InventoryButtonRollover')
self.clockNode = ToontownTimer()
self.clockNode.setPos(1.16, 0, -0.83)
self.clockNode.setScale(0.3)
self.clockNode.hide()
self.tintConstant = Vec4(0.25, 0.25, 0.25, 0)
self.ghostConstant = Vec4(0, 0, 0, 0.5)
self.knockSound = loader.loadSfx('phase_5/audio/sfx/GUI_knock_1.ogg')
self.clickSound = loader.loadSfx('phase_3/audio/sfx/GUI_balloon_popup.ogg')
self.moveSound = loader.loadSfx('phase_6/audio/sfx/CC_move.ogg')
self.accept('stoppedAsleep', self.handleSleep)
from direct.fsm import ClassicFSM, State
self.fsm = ClassicFSM.ClassicFSM('ChineseCheckers', [State.State('waitingToBegin', self.enterWaitingToBegin, self.exitWaitingToBegin, ['playing', 'gameOver']), State.State('playing', self.enterPlaying, self.exitPlaying, ['gameOver']), State.State('gameOver', self.enterGameOver, self.exitGameOver, ['waitingToBegin'])], 'waitingToBegin', 'waitingToBegin')
startLoc = self.boardNode.find('**/locators')
self.locatorList = list(startLoc.getChildren())
self.startingPositions = self.locatorList.pop(0)
self.startingPositions = self.startingPositions.getChildren()
instancePiece = self.boardNode.find('**/pieces')
tempList = []
for x in xrange(7):
self.startingPositions[x].setTag('StartLocator', '%d' % x)
collNode = CollisionNode('startpicker%d' % x)
collNode.setIntoCollideMask(BitMask32(4096))
tempList.append(self.startingPositions[x].attachNewNode(collNode))
tempList[x].node().addSolid(CollisionTube(0, 0, 0.23, 0, 0, -0.23, 0.2))
for z in self.startingPositions:
y = instancePiece.copyTo(z)
for val in y.getChildren():
val.hide()
tempList = []
for x in xrange(42):
self.locatorList[x].setTag('GamePeiceLocator', '%d' % x)
collNode = CollisionNode('startpicker%d' % x)
collNode.setIntoCollideMask(BitMask32(4096))
tempList.append(self.locatorList[x].attachNewNode(collNode))
tempList[x].node().addSolid(CollisionSphere(0, 0, 0, 0.2))
for z in self.locatorList:
y = instancePiece.copyTo(z)
for val in y.getChildren():
val.hide()
dummyHide = instancePiece.getParent().attachNewNode('DummyHider')
instancePiece.reparentTo(dummyHide)
dummyHide.hide()
return
def setName(self, name):
self.name = name
def announceGenerate(self):
DistributedNode.DistributedNode.announceGenerate(self)
if self.table.fsm.getCurrentState().getName() != 'observing':
if base.localAvatar.doId in self.table.tableState:
self.seatPos = self.table.tableState.index(base.localAvatar.doId)
if self.seatPos <= 2:
for x in self.startingPositions:
x.setH(0)
for x in self.locatorList:
x.setH(0)
else:
for x in self.startingPositions:
x.setH(180)
for x in self.locatorList:
x.setH(180)
self.moveCameraForGame()
else:
self.seatPos = self.table.seatBumpForObserve
if self.seatPos > 2:
for x in self.startingPositions:
x.setH(180)
for x in self.locatorList:
x.setH(180)
self.moveCameraForGame()
def handleSleep(self, task = None):
if self.fsm.getCurrentState().getName() == 'waitingToBegin':
self.exit
|
ButtonPushed()
|
def setTableDoId(self, doId):
self.tableDoId = doId
self.table = self.cr.doId2do[doId]
self.table.setTimerFunc(self.startButtonPushed)
self.fsm.enterInitialState()
self.table.setGameDoId(self.doId)
def disable(self):
DistributedNode.DistributedNode.disable(self)
if self.leaveButton:
self.leaveButton.destroy()
self.leavebutton = None
if self.screenText:
self.screenText.destroy()
self.screenText = None
if self.turnText:
self.turnText.destroy()
self.turnText = None
self.clockNode.stop()
self.clockNode.hide()
self.fsm = None
taskMgr.remove('playerTurnTask')
self.ignoreAll()
return
|
yw374cornell/e-mission-server
|
emission/storage/timeseries/format_hacks/move_filter_field.py
|
Python
|
bsd-3-clause
| 2,215
| 0.006321
|
# For some reason, probably because we were trying to serialize the default
# object, we put the "filter" field into the metadata. But the filter doesn't
# make sense for data other than location, so it doesn't seem like it should be
# in the metadata. Putting it into the metadata also means that it is not
# accessible as part of the data frame (although maybe we should put all
# metadata into the data frame).
# So this simple script moves the filter from the metadata into the data for
# location entries and removes it for all other entries
import logging
import emission.core.get_database as edb
def get_curr_key(entry):
return entry["metadata"]["key"]
def is_location_entry(entry):
curr_key = get_curr_key(entry)
return curr_key == "background/location" or curr_key == "background/filtered_location"
def move_all_filters_to_data():
tsdb = edb.get_timeseries_db()
for entry in tsdb.find():
if "filter" in entry["metadata"]:
curr_filter = entry["metadata"]["filter"]
if is_location_entry(entry):
entry["data"]["filter"] = curr_filter
logging.debug("for entry %s
|
, found key %s, moved filter %s into data" %
(entry["_id"], get_curr_key(entry), curr_filter))
# For all cases, including the location one, we want to delete t
|
he filter from metadata
del entry["metadata"]["filter"]
tsdb.save(entry)
logging.debug("for entry %s, for key %s, deleted filter %s from metadata" %
(entry["_id"], get_curr_key(entry), curr_filter))
else:
pass
# logging.warning("No filter found for entry %s, skipping" % entry)
if "filter" not in entry["data"] and is_location_entry(entry):
# This must be an entry from before the time that we started sending
# entries to the server. At that time, we only sent time entries,
# so set it to time in this case
entry["data"]["filter"] = "time"
logging.debug("No entry found in either data or metadata, for key %s setting to 'time'" % entry["metadata"]["key"])
tsdb.save(entry)
|
OSSESAC/odoopubarquiluz
|
addons/document_page/wizard/document_page_create_menu.py
|
Python
|
agpl-3.0
| 3,491
| 0.002292
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class document_page_create_menu(osv.osv_memory):
""" Create Menu """
_name = "document.page.create.menu"
_description = "Wizard Create Menu"
_columns = {
'menu_name': fields.char('Menu Name', size=256, required=True),
'menu_parent_id': fields.many2one('ir.ui.menu', 'Parent Menu', required=True),
}
def default_get(self, cr, uid, fields, context=None):
if context is None:
context = {}
res = super(document_page_create_menu,self).default_get(cr, uid, fields, context=context)
page_id = context.get('active_id')
obj_page = self.pool.get('document.page')
page = obj_page.browse(cr, uid, page_id, context=context)
res['menu_name'] = page.name
return res
def document_page_menu_create(self, cr, uid, ids, context=None):
if context is None:
context = {}
obj_page = self.pool.get('document.page')
obj_view = self.pool.get('ir.ui.view')
obj_menu = self.pool.get('ir.ui.menu')
obj_action = self.pool.get('ir.actions.act_window')
page_id = context.get('active_id', False)
page = obj_page.browse(cr, uid, page_id, context=context)
datas = self.browse(cr, uid, ids, context=context)
data = False
if datas:
data = datas[0]
if not data:
return {}
value = {
'name': 'Document Page',
'view_type': 'form',
'view_mode': 'form,tree',
'res_model': 'document.page',
'view_id': False,
'type': 'ir.actions.act_window',
'target': 'inlineview',
}
value['domain'] = "[('parent_id','=',%d)]" % (page.id)
value['res_id'] = page.id
action_id = obj_action.create(cr, SUPERUSER_ID, value)
|
# only the super user is allowed to create menu due to security rules on ir.values
menu_id = obj_menu.create(cr, SUPERUSER_ID, {
'name': data.menu_name,
'parent_id':data.menu_parent_id.id,
'icon': 'STOCK_DIALOG_QUESTION',
'action': 'ir.actions.act_window,'+ str(actio
|
n_id),
}, context)
obj_page.write(cr, uid, [page_id], {'menu_id':menu_id})
return {
'type': 'ir.actions.client',
'tag': 'reload',
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
pdevetto/super-duper-disco
|
movies/models.py
|
Python
|
gpl-3.0
| 1,963
| 0.027509
|
from __future__ import unicode_literals
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
DIRECTOR = 0
ACTOR = 1
PRODUCER = 2
SCREENPLAY = 3
PHOTOGRAPHY = 4
WRITER = 5
PEOPLE_ROLE = (
(DIRECTOR, 'Director'),
(ACTOR, 'Actor'),
(PRODUCER, 'Producer'),
(SCREENPLAY, 'Screenplay'),
(PHOTOGRAPHY, 'Director of Photography'),
(WRITER, 'Writer')
)
@python_2_unicode_compatible
class People(models.Model):
name = models.CharField(max_length=200)
tmdb_id = models.IntegerField(blank=True)
profile = models.CharField(max_length=200, null=True)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Genre(models.Model):
name = models.CharField(max_length=200)
|
tmdb_id = models.IntegerField(blank=True)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Movie(models.Model):
title = models.CharField(max_length=200, blank=True)
titlehash = mode
|
ls.CharField(max_length=200)
filename = models.CharField(max_length=200, blank=True)
filepath = models.CharField(max_length=255, blank=True)
poster = models.CharField(max_length=200, null=True)
year = models.IntegerField(null=True)
tmdb_id = models.IntegerField(null=True)
clean = models.IntegerField(null=True, default=0)
possible = models.TextField(null=True, default=0)
search = models.TextField(null=True, default=0)
genres = models.ManyToManyField(Genre)
def __str__(self):
return self.title
def custom_meth(self):
return "Voila"
@python_2_unicode_compatible
class Role(models.Model):
role = models.IntegerField(default=0, choices=PEOPLE_ROLE)
people = models.ForeignKey(People, on_delete=models.CASCADE)
movie = models.ForeignKey(Movie, on_delete=models.CASCADE)
tmdb_id = models.CharField(max_length=200)
def __str__(self):
return "ROLE" + self.role
|
philippbosch/django-geoposition
|
geoposition/apps.py
|
Python
|
mit
| 129
| 0.007752
|
from django.apps i
|
mport AppConfig
class GeoPositionConfig(AppConfig):
name = 'geoposition'
v
|
erbose_name = "GeoPosition"
|
robmcmullen/atrcopy
|
atrcopy/dcm.py
|
Python
|
gpl-2.0
| 1,753
| 0.001711
|
import numpy as np
from . import errors
from .container import DiskImageContainer
from .segments import SegmentData
class DCMContainer(DiskImageContainer):
valid_densities = {
0: (720, 128),
1: (720, 256),
2: (1040, 128),
}
def get_next(self):
try:
data = self.raw[self.index]
except IndexError:
raise errors.InvalidContainer("Incomplete DCM file")
else:
self.index += 1
return data
def unpack_bytes(self, data):
self.index = 0
self.count = len(data)
self.raw = data
archive_type = self.get_next()
if archive_type =
|
= 0xf9 or archive_type == 0xfa:
archive_flags = self.get_next()
if archive_flags & 0x1f != 1:
if archive_type == 0xf9:
raise errors.InvalidContainer("DCM m
|
ulti-file archive combined in the wrong order")
else:
raise errors.InvalidContainer("Expected pass one of DCM archive first")
density_flag = (archive_flags >> 5) & 3
if density_flag not in self.valid_densities:
raise errors.InvalidContainer(f"Unsupported density flag {density_flag} in DCM")
else:
raise errors.InvalidContainer("Not a DCM file")
# DCM decoding goes here. Currently, instead of decoding it raises the
# UnsupportedContainer exception, which signals to the caller that the
# container has been successfully identified but can't be parsed.
#
# When decoding is supported, return the decoded byte array instead of
# this exception.
raise errors.UnsupportedContainer("DCM archives are not yet supported")
|
mbiciunas/nix
|
test/cli_config/tag/test_tag_show.py
|
Python
|
gpl-3.0
| 1,291
| 0.001549
|
import pytest
from cli_config.tag import tag
from utility.nix_error import NixError
def test_tag_show_no_tag(capsys):
with pytest.raises(SystemExit) as _excinfo:
tag.tag("nixconfig", ["show"])
_out, _err = capsys.readouterr()
assert "2" in str(
|
_excinfo.value), "Exception doesn't contain expected string"
assert len(_out) == 0, "StdOut should be empty, contains: {}".format(_out)
assert "the following arguments are required: tag" in _err, "StdErr doesn't contain expected string"
def test_tag_show_invalid_tag(capsys):
with pytest.raises(NixError) as _excinfo:
|
tag.tag("nixconfig", ["show", "badtag"])
_out, _err = capsys.readouterr()
assert "Unknown tag: badtag" in str(_excinfo.value)
assert len(_out) == 0, "StdOut should be empty, contains: {}".format(_out)
assert len(_err) is 0, "StdErr should be empty, contains: {}".format(_err)
def test_tag_show_good_tag(capsys):
tag.tag("nixconfig", ["show", "tag1"])
_out, _err = capsys.readouterr()
assert "script1" in _out, "'script1' should be in output"
assert "script2" in _out, "'script2' should be in output"
assert "script3" not in _out, "'script2' should be in output"
assert len(_err) is 0, "StdErr should be empty, contains: {}".format(_err)
|
mapledyne/skytap
|
skytap/models/Interface.py
|
Python
|
mit
| 1,497
| 0
|
"""Support for an interface resource in Skytap."""
import json
from skytap.framework.ApiClient import ApiClient # noqa
from skytap.models.PublishedServices import PublishedServices # noqa
from skytap.models.SkytapResource import SkytapResource # noqa
class Interface(SkytapResource):
"""One Skytap (network) Interface."""
def __getattr__(self, key):
"""Get attributes.
Interfaces aren't fully returned when the API call is made -
Published Services aren't returned. Often this doesn't matter,
so we don't automatically pull this information. However, if you ask
for the services, this function will go and get the requested
info
|
rmation on demand. This allows saving of API calls (we don't
request this unless you're accessing Published Services), but also
you can treat the object as if the services are there all along. We'll
get the info when you ask for it, and you can move along like it was
there from the start.
If you're doing anything other than asking for services, then this
passes the call upstream to do the default stuff.
"""
if key =
|
= 'services':
api = ApiClient()
services_json = json.loads(api.rest(self.url))
self.services = PublishedServices(services_json["services"],
self.url)
return self.services
return super(Interface, self).__getattr__(key)
|
UnrememberMe/pants
|
contrib/codeanalysis/src/python/pants/contrib/codeanalysis/tasks/index_java.py
|
Python
|
apache-2.0
| 3,430
| 0.010496
|
# coding=utf-8
# Copyright 2017 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
from pants.backend.jvm.tasks.nailgun_task import NailgunTask
from pants.base.exceptions import TaskError
from pants.base.workunit import WorkUnitLabel
from pants.contrib.codeanalysis.tasks.indexable_java_targets import IndexableJavaTargets
class IndexJava(NailgunTask):
cache_target_dirs = True
_KYTHE_JAVA_INDEXER_MAIN = 'com.google.devtools.kythe.analyzers.java.JavaIndexer'
@classmethod
def subsystem_dependencies(cls):
return super(IndexJava, cls).subsystem_dependencies() + (IndexableJavaTargets,)
@classmethod
def implementation_version(cls):
# Bump this version to invalidate all past artifacts generated by this task.
return super(IndexJava, cls).implementation_version() + [('IndexJava', 8), ]
@classmethod
def product_types(cls):
return ['kythe_entries_files']
@classmethod
def prepare(cls, options, round_manager):
super(IndexJava, cls).prepare(options, round_manager)
round_manager.require_data('kindex_files')
@classmethod
def register_options(cls, register):
super(IndexJava, cls).register_options(register)
cls.register_jvm_tool(register,
'kythe-java-indexer',
main=cls._KYTHE_JAVA_INDEXER_MAIN)
@staticmethod
def _entries_file(vt):
return os.path.join(vt.results_dir, 'index.entries')
def execute(self):
indexable_targets = IndexableJavaTargets.global_instance().get(self.context)
with self.invalidated(indexable_targets, invalidate_dependents=True) as invalidation_check:
if invalidation_check.invalid_vts:
indexer_cp = self.tool_classpath('kythe-java-indexer')
# Kythe jars embed a copy of Java 9's com.sun.tools.javac and javax.tools, for use on JDK8.
# We must put these jars on the bootclasspath, ahead of any others, to ensure that we load
# the Java 9 versions, and not the runtime's versions.
jvm_options = ['-Xbootclasspath/p:{}'.format(':'.join(index
|
er_cp))]
jvm_options.extend(self.get_options().jvm_options)
for vt in invalidation_check.invalid_vts:
self._index(vt, indexer_cp, jvm_options)
for vt in invalidation_check.all_vts:
|
entries = self._entries_file(vt)
self.context.products.get_data('kythe_entries_files', dict)[vt.target] = entries
def _index(self, vt, indexer_cp, jvm_options):
self.context.log.info('Kythe indexing {}'.format(vt.target.address.spec))
kindex_file = self.context.products.get_data('kindex_files').get(vt.target)
if not kindex_file:
raise TaskError('No .kindex file found for {}'.format(vt.target.address.spec))
args = [kindex_file, '--emit_jvm', 'semantic', '--out', self._entries_file(vt)]
result = self.runjava(classpath=indexer_cp, main=self._KYTHE_JAVA_INDEXER_MAIN,
jvm_options=jvm_options,
args=args, workunit_name='kythe-index',
workunit_labels=[WorkUnitLabel.COMPILER])
if result != 0:
raise TaskError('java {main} ... exited non-zero ({result})'.format(
main=self._KYTHE_INDEXER_MAIN, result=result))
|
AC130USpectre/Python-programs
|
ModuleOperators.py
|
Python
|
gpl-3.0
| 1,078
| 0.004638
|
#some useful operations in module arithmetic
def power2(base, mod):
return (base ** 2) % mod
def prod(num1, num2, mod):
return (num1 * num2) % mod
def power(base, exp, mod)
|
:
k = 0
while exp >> k != 0:
k
|
+= 1
k -= 1
result = base
for i in range(k - 1, -1, -1):
if 1 << i & exp == 0:
result = power2(result, mod)
else:
result = prod(power2(result, mod), base, mod)
return result
def euclid(a, b):
if a % b == 0:
return b
else:
return euclid(b, a % b)
def extEuclid(a, b):
R_last = a
R_now = b
R_fut = R_last % R_now
Q_last = 1
Q_now = 0
P_last = 0
P_now = 1
while R_fut > 0:
G = R_last // R_now
R_fut = R_last % R_now
P_fut = P_last - P_now * G
Q_fut = Q_last - Q_now * G
R_last = R_now
R_now = R_fut
P_last = P_now
P_now = P_fut
Q_last = Q_now
Q_now = Q_fut
return (Q_last, P_last, a * Q_last + b * P_last)
|
Glottotopia/aagd
|
moin/local/moin/build/lib.linux-x86_64-2.6/MoinMoin/script/cli/show.py
|
Python
|
mit
| 723
| 0.002766
|
# -*- coding: iso-8859-1 -*-
"""
MoinMoin - cli show script
@copyright: 2006 MoinMoin:ThomasWaldmann
@license: GNU GPL, see COPYING for details.
"""
from MoinMoin.script import MoinScript
from MoinMoin.wsgiapp import run
class PluginScript(MoinScript):
"""\
Purpose:
========
Just run a CLI request and show the output.
Detailed Instructions:
======================
General syntax: moin [options] cli show
[options] usually should be:
|
--config-dir=/path/to/my/cfg/ --wiki-url=http://wiki.example.org/
"""
def __init__(self, argv, def_values):
MoinScript.__init__(self,
|
argv, def_values)
def mainloop(self):
self.init_request()
run(self.request)
|
bdaroz/the-blue-alliance
|
tests/test_key_name_validators.py
|
Python
|
mit
| 2,185
| 0.003204
|
import unittest2
from models.event import Event
from models.match import Match
from models.team import Team
class TestKeyNameValidators(unittest2.TestCase):
def setUp(self):
self.valid_team_key = "frc177"
self.valid_team_key2 = "frc1"
self.invalid_team_key = "bcr077"
self.invalid_team_key2 = "frc 011"
self.invalid_team_key3 = "frc711\\"
self.valid_event_key = "2010ct"
self.valid_event_key2 = "2014onto2"
self.invalid_event_key = "210c1"
self.invalid_event_key2 = "frc2010ct"
self.invalid_event_key3 = "2010 ct"
self.valid_match_key = "2010ct_sf1m2"
self.invalid_match_key = "0010c1_0m2"
self.invalid_match_key2 = "2010c1_1f1m1"
self.invalid_match_key3 = "2010c1_ef10m1"
def test_valid_team_key(self):
self.assertEqual(Team.validate_key_name(self.valid_team_key), True)
self.assertEqual(Team.validate_key_name(self.valid_team_ke
|
y2), True)
def test_invalid_team_key(self):
self.assertEqual(Team.validate_key_name(self.invalid_team_key), False)
self.assertEqual(Team.validate_key_name(self.invalid_team_key2), False)
self.assertEqual(Team.validate_key_name(self.invalid_team_key3), False)
def test_valid_event_key(self):
self.assertEqual(Event.validate_key_name(self.valid_event_key), True)
|
self.assertEqual(Event.validate_key_name(self.valid_event_key2), True)
def test_invalid_event_key(self):
self.assertEqual(Event.validate_key_name(self.invalid_event_key), False)
self.assertEqual(Event.validate_key_name(self.invalid_event_key2), False)
self.assertEqual(Event.validate_key_name(self.invalid_event_key3), False)
def test_valid_match_key(self):
self.assertEqual(Match.validate_key_name(self.valid_match_key), True)
def test_invalid_match_key(self):
self.assertEqual(Match.validate_key_name(self.invalid_match_key), False)
self.assertEqual(Match.validate_key_name(self.invalid_match_key2), False)
self.assertEqual(Match.validate_key_name(self.invalid_match_key3), False)
if __name__ == '__main__':
unittest.main()
|
UCHIC/WaterMonitor
|
Current_Designs/Computational_Datalogger/Software/template.py
|
Python
|
bsd-3-clause
| 1,582
| 0.006953
|
import Logger
import os
# The following five lines of code MUST ABSOLUTELY appear in this order. DO NOT MOVE OR CHANGE THE FOLLOWING FOUR LINES OF CODE.
# Logger.initPins() Should never be called by the user. It should only be called when this script is automatically run.
Logger.init() # Initialzie the Logger Python module.
Logger.initPins() # Sets pins to initial state. This function should only be called once, when called automatically when powered on.
Logger.setRomBusy() # Tell the AVR datalogger that the EEPROM chip is in use
Logger.setPowerGood() # Tell the AVR datalogger that the Ras
|
pberry Pi is powered on
dataTuple = Logger.loadData() # Read the data from the EEPROM chip
Logger.setRomFree() # Tell the AVR datalogger that the EEPROM chip is no longer in use.
# Process the contents of dataTuple here. The format is as follows:
# Index | dataTuple
# ---------------------------------------------------------
# 0 Number of Records
# 1 Year logging started
# 2 Month logging started
# 3
|
Day logging started
# 4 Hour logging started
# 5 Minute logging started
# 6 Second logging started
# 7 Data Byte
# 8 Data Byte
# 9 Data Byte
# 10 Data Byte
# ... ...
if (dataTuple[0] == Logger.bufferMax()): # This means that the Pi was turned on by the Datalogger, not a user, so it should turn itself off.
Logger.setPowerOff() # Tell the AVR datalogger that the Raspberry Pi is shutting down.
os.system("sudo poweroff") # Shut down the Raspberry Pi
|
samuelgarcia/python-neo
|
neo/io/plexonio.py
|
Python
|
bsd-3-clause
| 594
| 0
|
from neo.io.basefromrawio import B
|
aseFromRaw
from neo.rawio.plexonrawio import PlexonRawIO
class PlexonIO(PlexonRawIO, BaseFromRaw):
"""
Class for reading the old data format from Plexon
acquisition system (.plx)
Note that Plexon now use a new format PL2 which is NOT
supported by this IO.
Compatible with versions 100 to 106.
Other versions have not been tested.
"""
_prefered_signal_group_mode = 'group-by-same-units'
def __init__(self, filename):
|
PlexonRawIO.__init__(self, filename=filename)
BaseFromRaw.__init__(self, filename)
|
Rignak/Scripts-Python
|
Servlet/readme_maker.py
|
Python
|
gpl-3.0
| 1,062
| 0.000942
|
# -*- coding: utf-8 -*-
"""
Created on Tue Apr 9 21:30:31 2019
@author: Rignak
"""
import os
from os.path import join, split
lines = []
for root, folders, filenames in os.walk('..'):
for filename in filenames:
if filename == 'readme.md':
lines += ['']
with open(join(root, filename), 'r', encoding='utf-8') as file:
spoiler = False
lines.append(f"\n# {split(root)[-1]}")
for line in file.readlines():
if line.startswith('#') and not spoiler:
lines.append(f"\n<details>\n<summary> {split(root)[-1]} </summary>\n\n")
spoiler = True
if line.startswith('!'):
path = line.split('(')
|
[-1][:-1]
line = line.replace(path, root[3:] + '/' + path)
lines.append(line)
lines.append("\n</details>\n")
with open(j
|
oin('..', 'README.md'), 'w', encoding='utf-8') as file:
for line in lines:
file.write(line)
|
karlalopez/hackbright
|
objects-tom/solution/objects4.py
|
Python
|
apache-2.0
| 344
| 0.008721
|
class Student(object):
|
"""For student records"""
def __init__(self, name=None):
# This special method is called a "constructor"
self.name = name
def print_name(self):
print self.name
jenny = Student('Jenny'
|
)
jenny.print_name() # prints 'Jenny'
### Exercise Time ###
bill = Student()
bill.print_name()
|
lino-framework/lino
|
lino/core/ddh.py
|
Python
|
bsd-2-clause
| 3,228
| 0.00031
|
# -*- coding: UTF-8 -*-
# Copyright 2009-2018 Rumma & Ko Ltd
# License: GNU Affero General Public License v3 (see file COPYING for details)
"""Defines the :class:`DisableDeleteHandler` class.
See :doc:`/dev/delete`.
"""
# import logging ; logger = logging.getLogger(__name__)
from django.conf import settings
from django.db import models
from .utils import full_model_name as fmn
class DisableDeleteHandler(object):
"""A helper object used to find out whether a known object can be
deleted or not.
Lino installs an instance of this on each model in an attribute
`_lino_ddh` during kernel startup.
.. attribute:: fklist
A list of tuples `(model, fk)`, one item for each FK field in
the application which points to this model.
.. attribute:: model
The owning model (i.e. ``m._lino_ddh.model is m`` is True for
every model)
"""
def __init__(self, model):
self.model = model
self.fklist = []
def add_fk(self, model, fk):
# called f
|
rom kernel during startup. fk_model is None for
# fields defined on a parent model.
for m, fld in self.fklist:
if model is m and fld.name == fk.name:
# avoid duplicate entries caused by MTI children
return
self.fklist.append((model, fk))
def f(a):
return fmn(a[0]) + '.' + a[1].name
self.fklist.sort(key=f)
def __str__(self):
s = '
|
,'.join([m.__name__ + '.' + fk.name for m, fk in self.fklist])
return "<DisableDeleteHandler(%s, %s)>" % (self.model, s)
def disable_delete_on_object(self, obj, ignore_models=set()):
"""Return a veto message which explains why this object cannot be
deleted. Return `None` if there is no veto.
If `ignore_model` (a set of model class objects) is specified,
do not check for vetos on ForeignKey fields defined on one of
these models.
"""
#logger.info("20101104 called %s.disable_delete(%s)", obj, self)
# print "20150831 disable_delete", obj, self
for m, fk in self.fklist:
if m in ignore_models:
# print "20150831 skipping", m, fk
continue
# if m.__name__.endswith("Partner") and fk.name == 'partner':
# print 20150831, m, fk
if fk.name in m.allow_cascaded_delete:
continue
if fk.null and fk.remote_field.on_delete == models.SET_NULL:
continue
n = m.objects.filter(**{fk.name: obj}).count()
if n:
return obj.delete_veto_message(m, n)
kernel = settings.SITE.kernel
# print "20141208 generic related objects for %s:" % obj
for gfk, fk_field, qs in kernel.get_generic_related(obj):
if gfk.name in qs.model.allow_cascaded_delete:
continue
if fk_field.null: # a nullable GFK is no reason to veto
continue
n = qs.count()
# print "20141208 - %s %s %s" % (
# gfk.model, gfk.name, qs.query)
if n:
return obj.delete_veto_message(qs.model, n)
return None
|
monumentum/mongoenginerics
|
mongoenginerics/adapter/apistar.py
|
Python
|
mit
| 1,346
| 0
|
import importlib
import json
from .base import MongoEnginericsAdapter
class ApistarWSGIAdapter(MongoEnginericsAdapter):
def __init__(self, *arg
|
s, **kwargs):
self.engine = importlib.import_module('apistar')
self._wsgi = importlib.import_module('apistar.frameworks.wsgi')
super(ApistarWSGIAdapter, self).__init__(*args, **kwargs)
def attach(self, ctrl):
def find(query: self.engine.http.QueryParams):
return ctrl.find(query)
def update(item_id, updates: self.eng
|
ine.http.Body):
return ctrl.update(item_id, json.loads(updates))
def create(body: self.engine.http.Body):
return ctrl.create(json.loads(body))
def find_one(item_id):
return ctrl.find_one(item_id)
def delete(item_id):
return ctrl.delete(item_id)
return self.engine.Include('/{}'.format(ctrl.name), [
self.engine.Route('/', 'GET', find),
self.engine.Route('/', 'POST', create),
self.engine.Route('/{item_id}', 'GET', find_one),
self.engine.Route('/{item_id}', 'PUT', update),
self.engine.Route('/{item_id}', 'DELETE', delete),
])
def get_app(self):
routes = [self.attach(ctrl()) for ctrl in self._controllers]
return self._wsgi.WSGIApp(routes=routes)
|
Jakeable/Ralybot
|
plugins/steamdb.py
|
Python
|
gpl-3.0
| 3,386
| 0.003839
|
import re
import requests
import bs4
from ralybot import hook
from ralybot.util import web
# different forks of cloudflare-scrape have different package layouts
try:
from cfscrape import cfscrape
except ImportError:
import cfscrape
except ImportError:
cfscrape = None
class SteamError(Exception):
pass
def percentage(part, whole):
return 100 * float(part) / float(whole)
CALC_URL = "https://steamdb.info/calculator/"
PLAYED_RE = re.compile(r"(.*)\((.*)%\)")
def get_data(user, currency="us"):
"""
Takes a user's Steam Community ID and returns a dict containing info about the games the user owns.
:type user: str
:type currency: str
:return: dict
"""
data = {}
# form the request
params = {'player': user, 'currency': currency}
# get the page
try:
if cfscrape:
scraper = cfscrape.create_scraper()
request = scraper.get(CALC_URL, params=params)
else:
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, '
'like Gecko) Chrome/41.0.2228.0 Safari/537.36',
'Referer': 'https://steamdb.info/'
}
request = requests.get(CALC_URL, params=params, headers=headers)
request.raise_for_status()
except (requests.exceptions.HTTPError, reques
|
ts.exceptions.ConnectionError) as e:
raise SteamError("Could not get user info: {}".format(e))
# parse that page!
soup = bs4.BeautifulSoup(request.content)
# get all the data we need
try:
data["name"] = soup.find("h1", {"class": "header-title"}).find("a").text
data["url"] = request.url
data["status"] = soup.find('td', text='Status').find_next('td').text
data["value"] = soup.find("h1", {"class": "calculator-price"}).text
da
|
ta["value_sales"] = soup.find("h1", {"class": "calculator-price-lowest"}).text
data["count"] = int(soup.find("div",
{"class": "pull-right price-container"}).find("p").find("span", {"class":
"number"}).text)
played = soup.find('td', text='Games not played').find_next('td').text
played = PLAYED_RE.search(played).groups()
data["count_unplayed"] = int(played[0])
data["count_played"] = data["count"] - data["count_unplayed"]
data["percent_unplayed"] = round(percentage(data["count_unplayed"], data["count"]), 1)
data["percent_played"] = round(percentage(data["count_played"], data["count"]), 1)
except AttributeError:
raise SteamError("Could not read info, does this user exist?")
return data
@hook.command
def steamcalc(text):
"""steamcalc <username> - Gets value of steam account. Uses steamcommunity.com/id/<nickname>."""
user = text.strip().lower()
try:
data = get_data(user)
except SteamError as e:
return "{}".format(e)
data["short_url"] = web.try_shorten(data["url"])
return "\x02{name}\x02 has \x02{count}\x02 games with a total value of \x02{value}\x02" \
" (\x02{value_sales}\x02 during sales). \x02{count_unplayed}\x02 games" \
" (\x02{percent_unplayed}%\x02) have never been played - {short_url}".format(**data)
|
Lenchik13/Testing
|
fixture/application.py
|
Python
|
apache-2.0
| 1,000
| 0.001
|
from selenium import webdriver
from fixture.session import SessionHelper
from fixture.group import GroupHelper
from fixture.contact import ContactHelper
class Application:
def __init__(self, browser, base_url):
if browser == "firefox":
self.wd = webdriver.Firefox()
elif browser == "chrome":
self.wd = webdriver.Chrome()
elif browser == "ie":
self.
|
wd = webdriver.Ie()
else:
raise ValueError("Unrecognezed browser %s" % browser)
self.session = SessionHelper(self)
self.group = GroupHelper(self)
self.contact = ContactHelper(self)
self.base_url = base_url
def is_valid(self):
try:
self.wd.curr
|
ent_url
return True
except:
return False
def open_home_page(self):
wd = self.wd
if not (wd.current_url.endswith("/addressbook/")):
wd.get(self.base_url)
def destroy(self):
self.wd.quit()
|
Shuailong/Leetcode
|
solutions/nim-game.py
|
Python
|
mit
| 920
| 0.007609
|
#!/usr/bin/env python
# encoding: utf-8
"""
nim-game.py
Created by Shuailong on 2015-12-21.
https://leetcode.com/problems/nim-game/.
"""
class Solution1(object):
def canWinNim(self, n):
"""
:type n: int
:rtype: bool
"""
'''Too time consuming'''
win1 = True
win2 = True
win3 = True
win = True
i = 4
while i < n+1:
win = not win1 or not win2 o
|
r not win3
win1 = win2
win2 = win3
win
|
3 = win
i += 1
return win
class Solution(object):
def canWinNim(self, n):
"""
:type n: int
:rtype: bool
"""
'''Find the law and rewrite'''
return n & 3 != 0
# return n % 4 != 0
def main():
solution = Solution()
n = 4
print solution.canWinNim(n)
if __name__ == '__main__':
main()
|
gojira/tensorflow
|
tensorflow/contrib/tpu/profiler/pip_package/setup.py
|
Python
|
apache-2.0
| 2,551
| 0
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Cloud TPU profiler package."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from setuptools import setup
_VERSION = '1.7.0'
CONSOLE_SCRIPTS = [
'capture_tpu_profile=cloud_tpu_profiler.main:run_main',
]
setup(
name='cloud_tpu_profiler',
version=_VERSION.replace('-', ''),
description='Trace and profile Cloud TPU performance',
long_description='Tools for capture TPU profile',
url='https://www.tensorflow.org/tfrc/',
auth
|
or='Google Inc.',
author_email='opensource@google.com',
packages=['cloud_tpu_profiler'],
package_data={
'cloud_tpu_profiler': ['data/*'],
},
entry_points={
'console_scripts': CONSOLE_SCRIPTS,
},
classifiers=[
# How mature i
|
s this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Mathematics',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Software Development',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
],
license='Apache 2.0',
keywords='tensorflow performance tpu',
)
|
ds-hwang/deeplearning_udacity
|
python_practice/quiz1.py
|
Python
|
mit
| 409
| 0.007335
|
"""Softmax."""
scores = [3.0, 1.0, 0.2]
import numpy as np
def softmax(x):
"
|
""Compute softmax values for each sets of scores in x."""
return np.exp(x) / sum(np.exp(x))
print(softmax(scores))
# Plot softmax curves
import matplotlib.pyplot as plt
x = np.arange(-2.0, 6.0, 0.1)
scores = np.vstack([x, np.ones_like(x), 0.2 * np.ones_like(x)])
plt.plot(x, soft
|
max(scores).T, linewidth=2)
plt.show()
|
jerrrytan/bitcamp
|
bitcamp/manage.py
|
Python
|
mit
| 250
| 0
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.set
|
default("DJANGO_SETTINGS_MODULE", "bitcamp.settings")
from django.core.managemen
|
t import execute_from_command_line
execute_from_command_line(sys.argv)
|
StefanRijnhart/stock-logistics-warehouse
|
stock_reserve/model/stock_reserve.py
|
Python
|
agpl-3.0
| 6,546
| 0
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Author: Guewen Baconnier
# Copyright 2013 Camptocamp SA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import models, fields, api
from openerp.exceptions import except_orm
from openerp.tools.translate import _
class StockReservation(models.Model):
""" Allow to reserve products.
The fields mandatory for the creation of a reservation are:
* product_id
* product_uom_qty
* product_uom
* name
The following fields are required but have default values that you may
want to override:
* company_id
* location_id
* dest_location_id
Optionally, you may be interested to define:
* date_validity (once passed, the reservation will be released)
* note
"""
_name = 'stock.reservation'
_description = 'Stock Reservation'
_inherits = {'stock.move': 'move_id'}
move_id = fields.Many2one(
'stock.move',
'Reservation Move',
required=True,
readonly=True,
ondelete='cascade',
select=1)
date_validity = fields.Date('Validity Date')
@api.model
def default_get(self, fields_list):
"""
Ensure default value of computed field `product_qty` is not set
as it would raise an error
"""
res = super(StockReservation, self).default_get(fields_list)
if 'product_qty' in res:
del res['product_qty']
return res
@api.model
def get_location_from_ref(self, ref):
""" Get a location from a xmlid if allowed
:param ref: tuple (module, xmlid)
"""
data_obj = self.env['ir.model.data']
try:
location = data_obj.xmlid_to_object(ref, raise_if_not_found=True)
location.check_access_rule('read')
location_id = location.id
except (except_orm, ValueError):
location_id = False
return location_id
@api.model
def _default_picking_type_id(self):
""" Search for an internal picking type
"""
type_obj = self.env['stock.picking.type']
types = type_obj.search([('code', '=', 'internal')], limit=1)
if types:
return types[0].id
return False
@api.model
def _default_location_id(self):
move_obj = self.env['stock.move']
picking_type_id = self._default_picking_type_id()
return (move_obj
.with_context(default_picking_type_id=picking_type_id)
._default_location_source())
@api.model
def _default_location_dest_id(self):
ref = 'stock_reserve.stock_location_reservation'
return self.get_location_from_ref(ref)
_defaults = {
'picking_type_id': _default_picking_type_id,
'location_id': _default_location_id,
'location_dest_id': _default_location_dest_id,
'product_uom_qty': 1.0,
}
@api.multi
def reserve(self):
""" Confirm a reservation
The reservation is done using the default UOM of the product.
A date until which the product is reserved can be specified.
"""
self.date_expected = fields.Datetime.now()
self.move_id.action_confirm()
self.move_id.picking_id.action_assign()
return True
@api.multi
def release(self):
"""
Release moves from reservation
"""
self.mapped('move_id').action_cancel()
return True
@api.model
def release_validity_exceeded(self, ids=None):
""" Release all the reservation having an exceeded validity date "
|
""
domain = [('date_validity', '<', fields.date.today()),
('state', '=', 'assigned')]
if ids:
domain.append(('id', 'in', ids))
reserv_ids = self.search(domain)
reserv_ids.release()
return True
@api.multi
def unlink(self):
""" Release the reservation before the unlink """
self.release()
return super(StockReservation, self).unlink()
@api.onchange('product_id')
def _oncha
|
nge_product_id(self):
""" set product_uom and name from product onchange """
# save value before reading of self.move_id as this last one erase
# product_id value
product = self.product_id
# WARNING this gettattr erase self.product_id
move = self.move_id
result = move.onchange_product_id(
prod_id=product.id, loc_id=False, loc_dest_id=False,
partner_id=False)
if result.get('value'):
vals = result['value']
# only keep the existing fields on the view
self.name = vals.get('name')
self.product_uom = vals.get('product_uom')
# repeat assignation of product_id so we don't loose it
self.product_id = product.id
@api.onchange('product_uom_qty')
def _onchange_quantity(self):
""" On change of product quantity avoid negative quantities """
if not self.product_id or self.product_uom_qty <= 0.0:
self.product_uom_qty = 0.0
@api.multi
def open_move(self):
assert len(self.ids) == 1, "1 ID expected, got %r" % self.ids
reserv = self.move_id
IrModelData = self.env['ir.model.data']
ref_form2 = 'stock.action_move_form2'
action = IrModelData.xmlid_to_object(ref_form2)
action_dict = action.read()[0]
action_dict['name'] = _('Reservation Move')
# open directly in the form view
ref_form = 'stock.view_move_form'
view_id = IrModelData.xmlid_to_res_id(ref_form)
action_dict.update(
views=[(view_id, 'form')],
res_id=reserv.id,
)
return action_dict
|
zhangxujinsh/keras
|
keras/callbacks.py
|
Python
|
mit
| 8,643
| 0.00162
|
from __future__ import absolute_import
from __future__ import print_function
import theano
import theano.tensor as T
import numpy as np
import time, json, warnings
from collections import deque
from .utils.generic_utils import Progbar
class CallbackList(object):
def __init__(self, callbacks=[], queue_length=10):
self.callbacks = [c for c in callbacks]
self.queue_length = queue_length
def append(self, callback):
self.callbacks.append(callback)
def _set_params(self, params):
for callback in self.callbacks:
callback._set_params(params)
def _set_model(self, model):
for callback in self.callbacks:
callback._set_model(model)
def on_epoch_begin(self, epoch, logs={}):
for callback in self.callbacks:
callback.on_epoch_begin(epoch, logs)
self._delta_t_batch = 0.
self._delta_ts_batch_begin = deque([], maxlen=self.queue_length)
self._delta_ts_batch_end = deque([], maxlen=self.queue_length)
def on_epoch_end(self, epoch, logs={}):
for callback in self.callbacks:
callback.on_epoch_end(epoch, logs)
def on_batch_begin(self, batch, logs={}):
t_before_callbacks = time.time()
for callback in self.callbacks:
callback.on_batch_begin(batch, logs)
self._delta_ts_batch_begin.append(time.time() - t_before_callbacks)
delta_t_median = np.median(self._delta_ts_batch_begin)
if self._delta_t_batch > 0. and delta_t_median > 0.95 * self._delta_t_batch and delta_t_median > 0.1:
warnings.warn('Method on_batch_begin() is slow compared '
'to the batch update (%f). Check your callbacks.' % delta_t_median)
self._t_enter_batch = time.time()
def on_batch_end(self, batch, logs={}):
self._delta_t_batch = time.time() - self._t_enter_batch
t_before_callbacks = time.time()
for callback in self.callbacks:
callback.on_batch_end(batch, logs)
self._delta_ts_batch_end.append(time.time() - t_before_callbacks)
delta_t_median = np.median(self._delta_ts_batch_end)
if self._delta_t_batch > 0. and delta_t_median > 0.95 * self._delta_t_batch and delta_t_median > 0.1:
warnings.warn('Method on_batch_end() is slow compared '
'to the batch update (%f). Check your callbacks.' % delta_t_median)
def on_train_begin(self, logs={}):
for callback in self.callbacks:
callback.on_train_begin(logs)
def on_train_end(self, logs={}):
for callback in self.callbacks:
callback.on_train_end(logs)
class Callback(object):
def __init__(self):
pass
def _set_params(self, params):
self.params = params
def _set_model(self, model):
self.model = model
def on_epoch_begin(self, epoch, logs={}):
pass
def on_epoch_end(self, epoch, logs={}):
pass
def on_batch_begin(self, batch, logs={}):
pass
def on_batch_end(self, batch, logs={}):
pass
def on_train_begin(self, logs={}):
pass
def on_train_end(self, logs={}):
pass
class BaseLogger(Callback):
def on_train_begin(self, logs={}):
self.verbose = self.pa
|
rams['verbose']
def on_epoch_begin(self, epoch, logs={}):
if self.verbose:
print('Epoch %d' % epoch)
self.progbar = Progbar(target=self.params['nb_sample'],
verbose=self.verbose)
self.seen = 0
self.totals = {}
def on_batch_begin(self, batch, logs={}):
if self.seen < self.params['nb_sample']:
self.log_values = []
|
def on_batch_end(self, batch, logs={}):
batch_size = logs.get('size', 0)
self.seen += batch_size
for k, v in logs.items():
if k in self.totals:
self.totals[k] += v * batch_size
else:
self.totals[k] = v * batch_size
for k in self.params['metrics']:
if k in logs:
self.log_values.append((k, logs[k]))
# skip progbar update for the last batch; will be handled by on_epoch_end
if self.verbose and self.seen < self.params['nb_sample']:
self.progbar.update(self.seen, self.log_values)
def on_epoch_end(self, epoch, logs={}):
for k in self.params['metrics']:
if k in self.totals:
self.log_values.append((k, self.totals[k] / self.seen))
if k in logs:
self.log_values.append((k, logs[k]))
if self.verbose:
self.progbar.update(self.seen, self.log_values)
class History(Callback):
def on_train_begin(self, logs={}):
self.epoch = []
self.history = {}
def on_epoch_begin(self, epoch, logs={}):
self.seen = 0
self.totals = {}
def on_batch_end(self, batch, logs={}):
batch_size = logs.get('size', 0)
self.seen += batch_size
for k, v in logs.items():
if k in self.totals:
self.totals[k] += v * batch_size
else:
self.totals[k] = v * batch_size
def on_epoch_end(self, epoch, logs={}):
self.epoch.append(epoch)
for k, v in self.totals.items():
if k not in self.history:
self.history[k] = []
self.history[k].append(v / self.seen)
for k, v in logs.items():
if k not in self.history:
self.history[k] = []
self.history[k].append(v)
class ModelCheckpoint(Callback):
def __init__(self, filepath, monitor='val_loss', verbose=0, save_best_only=False):
super(Callback, self).__init__()
self.monitor = monitor
self.verbose = verbose
self.filepath = filepath
self.save_best_only = save_best_only
self.best = np.Inf
def on_epoch_end(self, epoch, logs={}):
if self.save_best_only:
current = logs.get(self.monitor)
if current is None:
warnings.warn("Can save best model only with %s available, skipping." % (self.monitor), RuntimeWarning)
else:
if current < self.best:
if self.verbose > 0:
print("Epoch %05d: %s improved from %0.5f to %0.5f, saving model to %s"
% (epoch, self.monitor, self.best, current, self.filepath))
self.best = current
self.model.save_weights(self.filepath, overwrite=True)
else:
if self.verbose > 0:
print("Epoch %05d: %s did not improve" % (epoch, self.monitor))
else:
if self.verbose > 0:
print("Epoch %05d: saving model to %s" % (epoch, self.filepath))
self.model.save_weights(self.filepath, overwrite=True)
class EarlyStopping(Callback):
def __init__(self, monitor='val_loss', patience=0, verbose=0):
super(Callback, self).__init__()
self.monitor = monitor
self.patience = patience
self.verbose = verbose
self.best = np.Inf
self.wait = 0
def on_epoch_end(self, epoch, logs={}):
current = logs.get(self.monitor)
if current is None:
warnings.warn("Early stopping requires %s available!" % (self.monitor), RuntimeWarning)
if current < self.best:
self.best = current
self.wait = 0
else:
if self.wait >= self.patience:
if self.verbose > 0:
print("Epoch %05d: early stopping" % (epoch))
self.model.stop_training = True
self.wait += 1
class RemoteMonitor(Callback):
def __init__(self, root='http://localhost:9000'):
self.root = root
def on_epoch_begin(self, epoch, logs={}):
self.seen = 0
self.totals = {}
def on_batch_end(self, batch, logs={}):
batch_size = logs.get('size', 0)
self.seen += batch_size
for k, v in logs.items():
if k in self.
|
MalloyPower/parsing-python
|
front-end/testsuite-python-lib/Python-2.2/Lib/test/test_types.py
|
Python
|
mit
| 14,942
| 0.025833
|
# Python test set -- part 6, built-in types
from test_support import *
print '6. Built-in types'
print '6.1 Truth value testing'
if None: raise TestFailed, 'None is true instead of false'
if 0: raise TestFailed, '0 is true instead of false'
if 0L: raise TestFailed, '0L is true instead of false'
if 0.0: raise TestFailed, '0.0 is true instead of false'
if '': raise TestFailed, '\'\' is true instead of false'
if (): raise TestFailed, '() is true instead of false'
if []: raise TestFailed, '[] is true instead of false'
if {}: raise TestFailed, '{} is true instead of false'
if not 1: raise TestFailed, '1 is false instead of true'
if not 1L: raise TestFailed, '1L is false instead of true'
if not 1.0: raise TestFailed, '1.0 is false instead of true'
if not 'x': raise TestFailed, '\'x\' is false instead of true'
if not (1, 1): raise TestFailed, '(1, 1) is false instead of true'
if not [1]: raise TestFailed, '[1] is false instead of true'
if not {'x': 1}: raise TestFailed, '{\'x\': 1} is false instead of true'
def f(): pass
class C: pass
import sys
x = C()
if not f: raise TestFailed, 'f is false instead of true'
if not C: raise TestFailed, 'C is false instead of true'
if not sys: raise TestFailed, 'sys is false instead of true'
if not x: raise TestFailed, 'x is false instead of true'
print '6.2 Boolean operations'
if 0 or 0: raise TestFailed, '0 or 0 is true instead of false'
if 1 and 1: pass
else: raise TestFailed, '1 and 1 is false instead of false'
if not 1: raise TestFailed, 'not 1 is true instead of false'
print '6.3 Comparisons'
if 0 < 1 <= 1 == 1 >= 1 > 0 != 1: pass
else: raise TestFailed, 'int comparisons failed'
if 0L < 1L <= 1L == 1L >= 1L > 0L != 1L: pass
else: raise TestFailed, 'long int comparisons failed'
if 0.0 < 1.0 <= 1.0 == 1.0 >= 1.0 > 0.0 != 1.0: pass
else: raise TestFailed, 'float comparisons failed'
if '' < 'a' <= 'a' == 'a' < 'abc' < 'abd' < 'b': pass
else: raise TestFailed, 'string comparisons failed'
if 0 in [0] and 0 not in [1]: pass
else: raise TestFailed, 'membership test failed'
if None is None and [] is not []: pass
else: raise TestFailed, 'identity test failed'
print '6.4 Numeric types (mostly conversions)'
if 0 != 0L or 0 != 0.0 or 0L != 0.0: raise TestFailed, 'mixed comparisons'
if 1 != 1L or 1 != 1.0 or 1L != 1.0: raise TestFailed, 'mixed comparisons'
if -1 != -1L or -1 != -1.0 or -1L != -1.0:
raise TestFailed, 'int/long/float value not equal'
if int(1.9) == 1 == int(1.1) and int(-1.1) == -1 == int(-1.9): pass
else: raise TestFailed, 'int() does not round properly'
if long(1.9) == 1L == long(1.1) and long(-1.1) == -1L == long(-1.9): pass
else: raise TestFailed, 'long() does not round properly'
if float(1) == 1.0 and float(-1) == -1.0 and float(0) == 0.0: pass
else: raise TestFailed, 'float() does not work properly'
print '6.4.1 32-bit integers'
if 12 + 24 != 36: raise TestFailed, 'int op'
if 12 + (-24) != -12: raise TestFailed, 'int op'
if (-12) + 24 != 12: raise TestFailed, 'int op'
if (-12) + (-24) != -36: raise TestFailed, 'int op'
if not 12 < 24: raise TestFailed, 'int op'
if not -24 < -12: raise TestFailed, 'int op'
# Test for a particular bug in integer multiply
xsize, ysize, zsize = 238, 356, 4
if not (xsize*ysize*zsize == zsize*xsize*ysize == 338912):
raise TestFailed, 'int mul commutativity'
# And another.
m = -sys.maxint - 1
for divisor in 1, 2, 4, 8, 16, 32:
j = m // divisor
prod = divisor * j
if prod != m:
raise TestFailed, "%r * %r == %r != %r" % (divisor, j, prod, m)
if type(prod) is not int:
raise TestFailed, ("expected type(prod) to be int, not %r" %
type(prod))
# Check for expected * overflow to long.
for divisor in 1, 2, 4, 8, 16, 32:
j = m // divisor - 1
prod = divisor * j
if type(prod) is not long:
raise TestFailed, ("expected type(%r) to be long, not %r" %
(prod, type(prod)))
# Check for expected * overflow to long.
m = sys.maxint
for divisor in 1, 2, 4, 8, 16, 32:
j = m // divisor + 1
prod = divisor * j
if type(prod) is not long:
raise TestFailed, ("expected type(%r) to be long, not %r" %
(prod, type(prod)))
print '6.4.2 Long integers'
if 12L + 24L != 36L: raise TestFailed, 'long op'
if 12L + (-24L) != -12L: raise TestFailed, 'long op'
if (-12L) + 24L != 12L: raise TestFailed, 'long op'
if (-12L) + (-24L) != -36L: raise TestFailed, 'long op'
if not 12L < 24L: raise TestFailed, 'long op'
if not -24L < -12L: raise TestFailed, 'long op'
x = sys.maxint
if int(long(x)) != x: raise TestFailed, 'long op'
try: int(long(x)+1L)
except OverflowError: pass
else:raise TestFailed, 'long op'
x = -x
if int(long(x)) != x: raise TestFailed, 'long op'
x = x-1
if int(long(x)) != x: raise TestFailed, 'long op'
try: int(long(x)-1L)
except OverflowError: pass
else:raise TestFailed, 'long op'
print '6.4.3 Floating point numbers'
if 12.0 + 24.0 != 36.0: raise TestFailed, 'float op'
if 12.0 + (-24.0) != -12.0: raise TestFailed, 'float op'
if (-12.0) + 24.0 != 12.0: raise TestFailed, 'float op'
if (-12.0) + (-24.0) != -36.0: raise TestFailed, 'float op'
if not 12.0 < 24.0: raise TestFailed, 'float op'
if not -24.0 < -12.0: raise TestFailed, 'float op'
print '6.5 Sequence types'
print '6.5.1 Strings'
if len('') != 0: raise TestFailed, 'len(\'\')'
if len('a') != 1: raise TestFailed, 'len(\'a\')'
if len('abcdef') != 6: raise TestFailed, 'len(\'abcdef\')'
if 'xyz' + 'abcde' != 'xyzabcde': raise TestFailed, 'string concatenation'
if 'xyz'*3 != 'xyzxyzxyz': raise TestFailed, 'string repetition *3'
if 0*'abcde' != '': raise TestFailed, 'string repetition 0*'
if min('abc') != 'a' or max('abc') != 'c': raise TestFailed, 'min/max string'
if 'a' in 'abc' and 'b' in 'abc' and 'c' in 'abc' and 'd' not in 'abc': pass
else: raise TestFailed, 'in/not in string'
x = 'x'*103
if '%s!'%x != x+'!': raise TestFailed, 'nasty string formatting bug'
print '6.5.2 Tuples'
if len(()) != 0: raise TestFailed, 'len(())'
if len((1,)) != 1: raise TestFailed, 'len((1,))'
|
if len((1,2,3,4,5,6)) != 6: raise TestFailed, 'len((1,2,3,4,5,6))'
if (1,2)+(3,4) != (1,2,3,4): raise TestFailed, 'tuple concatenation'
if (1,2)*3 != (1,2,1,2,1,2): raise TestFailed, 'tuple repetition *3'
if 0*(1,2,3) != (
|
): raise TestFailed, 'tuple repetition 0*'
if min((1,2)) != 1 or max((1,2)) != 2: raise TestFailed, 'min/max tuple'
if 0 in (0,1,2) and 1 in (0,1,2) and 2 in (0,1,2) and 3 not in (0,1,2): pass
else: raise TestFailed, 'in/not in tuple'
print '6.5.3 Lists'
if len([]) != 0: raise TestFailed, 'len([])'
if len([1,]) != 1: raise TestFailed, 'len([1,])'
if len([1,2,3,4,5,6]) != 6: raise TestFailed, 'len([1,2,3,4,5,6])'
if [1,2]+[3,4] != [1,2,3,4]: raise TestFailed, 'list concatenation'
if [1,2]*3 != [1,2,1,2,1,2]: raise TestFailed, 'list repetition *3'
if [1,2]*3L != [1,2,1,2,1,2]: raise TestFailed, 'list repetition *3L'
if 0*[1,2,3] != []: raise TestFailed, 'list repetition 0*'
if 0L*[1,2,3] != []: raise TestFailed, 'list repetition 0L*'
if min([1,2]) != 1 or max([1,2]) != 2: raise TestFailed, 'min/max list'
if 0 in [0,1,2] and 1 in [0,1,2] and 2 in [0,1,2] and 3 not in [0,1,2]: pass
else: raise TestFailed, 'in/not in list'
a = [1, 2, 3, 4, 5]
a[:-1] = a
if a != [1, 2, 3, 4, 5, 5]:
raise TestFailed, "list self-slice-assign (head)"
a = [1, 2, 3, 4, 5]
a[1:] = a
if a != [1, 1, 2, 3, 4, 5]:
raise TestFailed, "list self-slice-assign (tail)"
a = [1, 2, 3, 4, 5]
a[1:-1] = a
if a != [1, 1, 2, 3, 4, 5, 5]:
raise TestFailed, "list self-slice-assign (center)"
print '6.5.3a Additional list operations'
a = [0,1,2,3,4]
a[0L] = 1
a[1L] = 2
a[2L] = 3
if a != [1,2,3,3,4]: raise TestFailed, 'list item assignment [0L], [1L], [2L]'
a[0] = 5
a[1] = 6
a[2] = 7
if a != [5,6,7,3,4]: raise TestFailed, 'list item assignment [0], [1], [2]'
a[-2L] = 88
a[-1L] = 99
if a != [5,6,7,88,99]: raise TestFailed, 'list item assignment [-2L], [-1L]'
a[-2] = 8
a[-1] = 9
if a != [5,6,7,8,9]: raise TestFailed, 'list item assignment [-2], [-1]'
a[:2] = [0,4]
a[-3:] = []
a[1:1] = [1,2,3]
if a != [0,1,2,3,4]: raise TestFailed, 'list slice assignment'
a[ 1L : 4L] = [7,8,9]
if a != [0,7,8,9,4]: raise TestFailed, 'list slic
|
lino-framework/book
|
lino_book/projects/eric/tests/test_notify.py
|
Python
|
bsd-2-clause
| 5,765
| 0.002951
|
# -*- coding: utf-8 -*-
# Copyright 2016-2017 Rumma & Ko Ltd
# License: BSD (see file COPYING for details)
"""Runs some tests about the notification framework.
You can run only these tests by issuing::
$ go team
$ python manage.py test tests.test_notify
Or::
$ go noi
$ python setup.py test -s tests.ProjectsTests.test_team
"""
from __future__ import unicode_literals
import datetime
from mock import patch
from django.conf import settings
from django.utils.timezone import make_aware
from lino.api import dd, rt
from lino.utils.djangotest import TestCase
from lino.core import constants
from lino.modlib.users.choicelists import UserTypes
from lino.utils.instantiator import create
from lino.modlib.notify.models import send_pending_emails_often
from lino.modlib.notify.choicelists import MailModes
from lino.core.diff import ChangeWatcher
import contextlib
@contextlib.contextmanager
def capture_stdout():
import sys
from cStringIO import StringIO
oldout = sys.stdout
try:
out = StringIO()
sys.stdout = out
yield out
finally:
sys.stdout = oldout
out = out.getvalue()
class TestCase(TestCase):
"""Miscellaneous tests."""
maxDiff = None
def test_01(self):
self.assertEqual(settings.SETTINGS_MODULE, None)
self.assertEqual(settings.LOGGING, {})
self.assertEqual(settings.SERVER_EMAIL, 'root@localhost')
@patch('lino.api.dd.logger')
def test_comment(self, logger):
"""Test what happens when a comment is posted on a ticket with
watchers.
"""
ContentType = rt.models.contenttypes.ContentType
Comment = rt.models.comments.Comment
Ticket = rt.models.tickets.Ticket
Project = rt.models.tickets.Project
Vote = rt.models.votes.Vote
Message = rt.models.notify.Message
User = settings.SITE.user_model
create(Project, name="Project")
robin = create(
User, username='robin',
first_name="Robin",
user_type=UserTypes.admin)
aline
|
= create(
User, username='aline',
first_name="Aline",
email="aline@example.com", language='fr')
o
|
bj = create(
Ticket, summary="Save the world, après moi le déluge",
user=robin)
create(Vote, votable=obj, user=aline)
self.assertEqual(Message.objects.count(), 0)
url = "/api/comments/CommentsByRFC"
post_data = dict()
post_data[constants.URL_PARAM_ACTION_NAME] = 'submit_insert'
post_data.update(short_text="I don't agree.")
post_data[constants.URL_PARAM_MASTER_PK] = obj.pk
ct = ContentType.objects.get_for_model(Ticket)
post_data[constants.URL_PARAM_MASTER_TYPE] = ct.id
# post_data[constants.URL_PARAM_REQUESTING_PANEL] = '123'
response = self.client.post(
url, post_data,
REMOTE_USER='robin',
HTTP_ACCEPT_LANGUAGE='en')
result = self.check_json_result(
response, 'rows success message close_window')
self.assertEqual(result['success'], True)
self.assertEqual(
result['message'],
"""Comment "Comment #1" has been created.""")
self.assertEqual(Message.objects.count(), 1)
msg = Message.objects.all()[0]
# self.assertEqual(msg.message_type)
self.assertEqual(msg.seen, None)
self.assertEqual(msg.user, aline)
expected = """Robin a commenté [ticket 1] (Save the world, """\
"""après moi le déluge): I don't agree."""
self.assertEqual(expected, msg.body)
# manually set created timestamp so we can test on it later.
now = datetime.datetime(2016, 12, 22, 19, 45, 55)
if settings.USE_TZ:
now = make_aware(now)
msg.created = now
msg.save()
settings.SERVER_EMAIL = 'root@example.com'
with capture_stdout() as out:
send_pending_emails_often()
out = out.getvalue().strip()
print(out)
expected = """send email
Sender: root@example.com
To: aline@example.com
Subject: [Django] Robin a comment? #1 (? Save the world, apr?s moi le d?luge)
<body>
(22/12/2016 19:45)
Robin a comment? <a href="http://127.0.0.1:8000/api/tickets/Ticket/1" title="Save the world, après moi le déluge">#1</a> (Save the world, apr?s moi le d?luge): I don't agree.
</body>
"""
self.assertEquivalent(expected, out)
self.assertEqual(logger.debug.call_count, 1)
logger.debug.assert_called_with(
'Send out %s summaries for %d users.',
MailModes.often, 1)
# logger.info.assert_called_with(
# 'Notify %s users about %s', 1, 'Change by robin')
Message.objects.all().delete()
self.assertEqual(Message.objects.count(), 0)
ar = rt.login('robin')
cw = ChangeWatcher(obj)
obj.priority = 200
obj.save_watched_instance(ar, cw)
with capture_stdout() as out:
send_pending_emails_often()
out = out.getvalue().strip()
# print(out)
expected = ""
# self.assertEquivalent(expected, out)
# we do not test the output because the datetime changes. But
# we actually just wanted to see if there is no
# UnicodeException. We capture it in order to hide it from
# test runner output.
self.assertEqual(logger.debug.call_count, 2)
logger.debug.assert_called_with(
'Send out %s summaries for %d users.',
MailModes.often, 1)
|
googleapis/python-access-approval
|
google/cloud/accessapproval_v1/services/access_approval/__init__.py
|
Python
|
apache-2.0
| 769
| 0
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with t
|
he License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
|
software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from .client import AccessApprovalClient
from .async_client import AccessApprovalAsyncClient
__all__ = (
"AccessApprovalClient",
"AccessApprovalAsyncClient",
)
|
CanonicalLtd/subiquity
|
subiquitycore/async_helpers.py
|
Python
|
agpl-3.0
| 2,411
| 0
|
# Copyright 2019 Canonical, Ltd.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WI
|
THOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public Lice
|
nse for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import asyncio
import concurrent.futures
import logging
log = logging.getLogger("subiquitycore.async_helpers")
def _done(fut):
try:
fut.result()
except asyncio.CancelledError:
pass
def schedule_task(coro, propagate_errors=True):
loop = asyncio.get_event_loop()
if asyncio.iscoroutine(coro):
task = asyncio.Task(coro)
else:
task = coro
if propagate_errors:
task.add_done_callback(_done)
loop.call_soon(asyncio.ensure_future, task)
return task
async def run_in_thread(func, *args):
loop = asyncio.get_event_loop()
try:
return await loop.run_in_executor(None, func, *args)
except concurrent.futures.CancelledError:
raise asyncio.CancelledError
class SingleInstanceTask:
def __init__(self, func, propagate_errors=True):
self.func = func
self.propagate_errors = propagate_errors
self.task = None
async def _start(self, old):
if old is not None:
old.cancel()
try:
await old
except BaseException:
pass
schedule_task(self.task, self.propagate_errors)
async def start(self, *args, **kw):
await self.start_sync(*args, **kw)
return self.task
def start_sync(self, *args, **kw):
old = self.task
coro = self.func(*args, **kw)
if asyncio.iscoroutine(coro):
self.task = asyncio.Task(coro)
else:
self.task = coro
return schedule_task(self._start(old))
async def wait(self):
while True:
try:
return await self.task
except asyncio.CancelledError:
pass
|
ros2/launch
|
launch_testing/launch_testing/loader.py
|
Python
|
apache-2.0
| 12,234
| 0.002779
|
# Copyright 2019 Apex.AI, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
import inspect
import itertools
import os
import unittest
import warnings
from .actions import ReadyToTest
# Patch up the warnings module to streamline the warning messages. See
# https://docs.python.org/3/library/warnings.html#warnings.showwarning
def slim_formatwarning(msg, *args, **kwargs):
return 'Warning: ' + str(msg) + os.linesep
warnings.formatwarning = slim_formatwarning
def _normalize_ld(launch_description_fn):
# A launch description fn can return just a launch description, or a tuple of
# (launch_description, test_context). This wrapper function normalizes things
# so we always get a tuple, sometimes with an empty dictionary for the test_context
def normalize(result):
if isinstance(result, tuple):
return result
else:
return result, {}
def wrapper(**kwargs):
fn_args = inspect.getfullargspec(launch_description_fn)
if 'ready_fn' in fn_args.args + fn_args.kwonlyargs:
# This is an old-style launch_description function which expects ready_fn to be passed
# in to the function
# This type of launch description will be deprecated in the future. Warn about it
# here
warnings.warn(
'Passing ready_fn as an argument to generate_test_description will '
'be removed in a future release. Include a launch_testing.actions.ReadyToTest '
'action in the LaunchDescription instead.'
)
return normalize(launch_description_fn(**kwargs))
else:
# This is a new-style launch_description which should contain a ReadyToTest action
ready_fn = kwargs.pop('ready_fn')
result = normalize(launch_description_fn(**kwargs))
# Fish the ReadyToTest action out of the launch description and plumb our
# ready_fn to it
def iterate_ready_to_test_actions(entities):
"""Recursively search LaunchDescription entities for all ReadyToTest actions."""
for entity in entities:
if isinstance(entity, ReadyToTest):
yield entity
yield from iterate_ready_to_test_actions(
entity.describe_sub_entities()
)
for conditional_sub_entity in entity.describe_conditional_sub_entities():
yield from iterate_ready_to_test_actions(
conditional_sub_entity[1]
)
try:
ready_action = next(e for e in iterate_ready_to_test_actions(result[0].entities))
except StopIteration: # No ReadyToTest action found
raise Exception(
'generate_test_description functions without a ready_fn argument must return '
'a LaunchDescription containing a ReadyToTest action'
)
ready_action._add_callback(ready_fn)
return result
return wrapper
class TestRun:
def __init__(self,
name,
test_description_function,
param_args,
pre_shutdown_tests,
post_shutdown_tests):
self.name = name
if not hasattr(test_description_function, '__markers__'):
test_description_function.__markers__ = {}
self._test_description_function = test_description_function
self.normalized_test_description = _normalize_ld(test_description_function)
self.param_args = param_args
self.pre_shutdown_tests = pre_shutdown_tests
self.post_shutdown_tests = post_shutdown_tests
# If we're parametrized, extend the test names so we can tell more easily what
# params they were run with
if self.param_args:
for tc in itertools.chain(
_iterate_tests_in_test_suite(pre_shutdown_tests),
_iterate_tests_in_test_suite(post_shutdown_tests)
):
test_method = getattr(tc, tc._testMethodName)
new_name = tc._testMethodName + self._format_params()
setattr(tc, '_testMethodName', new_name)
setattr(tc, new_name, test_method)
# Disable cleanup of test cases once they are run
for tc in itertools.chain(
_iterate_test_suites(pre_shutdown_tests),
_iterate_test_suites(post_shutdown_tests)
):
tc._removeTestAtIndex = lambda *args, **kwargs: None
@property
def markers(self):
return self._test_description_function.__markers__
def bind(self, tests, injected_attributes={}, injected_args={}):
"""
Bind injected_attributes and injected_args to tests.
Injected Attributes can be accessed from a test a
|
s self.name
Injected Arguments can be accessed as an argument if the test has an argument with a
matching name
"""
# Inject test attributes into the test as self.wha
|
tever. This method of giving
# objects to the test is pretty inferior to injecting them as arguments to the
# test methods - we may deprecate this in favor of everything being an argument
for name, value in injected_attributes.items():
_give_attribute_to_tests(value, name, tests)
# Give objects with matching names as arguments to tests. This doesn't have the
# weird scoping and name collision issues that the above method has. In fact,
# we give proc_info and proc_output to the tests as arguments too, so anything
# you can do with test attributes can also be accomplished with test arguments
_bind_test_args_to_tests(injected_args, tests)
def get_launch_description(self):
"""
Get just the launch description portion of the test_description.
This should only be used for the purposes of introspecting the launch description. The
returned launch description is not meant to be launched
"""
return self.normalized_test_description(ready_fn=lambda: None)[0]
def all_cases(self):
yield from _iterate_tests_in_test_suite(self.pre_shutdown_tests)
yield from _iterate_tests_in_test_suite(self.post_shutdown_tests)
def __str__(self):
return self.name + self._format_params()
def _format_params(self):
if not self.param_args:
return ''
else:
str_args = map(str, self.param_args.values())
return '[{}]'.format(', '.join(str_args))
def LoadTestsFromPythonModule(module, *, name='launch_tests'):
if not hasattr(module.generate_test_description, '__parametrized__'):
normalized_test_description_func = (
lambda: [(module.generate_test_description, {})]
)
else:
normalized_test_description_func = module.generate_test_description
# If our test description is parameterized, we'll load a set of tests for each
# individual launch
return [TestRun(name,
description,
args,
PreShutdownTestLoader().loadTestsFromModule(module),
PostShutdownTestLoader().loadTestsFromModule(module))
for description, args in normalized_test_description_func()]
def PreShutdownTestLoader():
return _make_loader(False)
def PostShutdownTestLoader():
return _make_loader(True)
def _make_loader(load_post_shutdown):
class _loader(unittest.TestLoade
|
nelango/ViralityAnalysis
|
model/lib/nltk/corpus/reader/api.py
|
Python
|
mit
| 17,836
| 0.001682
|
# Natural Language Toolkit: API for Corpus Readers
#
# Copyright (C) 2001-2015 NLTK Project
# Author: Steven Bird <stevenbird1@gmail.com>
# Edward Loper <edloper@gmail.com>
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
"""
API for corpus readers.
"""
from __future__ import unicode_literals
import os
import re
from collections import defaultdict
from nltk import compat
from nltk.data import PathPointer, FileSystemPathPointer, ZipFilePathPointer
from nltk.corpus.reader.util import *
@compat.python_2_unicode_compatible
class CorpusReader(object):
"""
A base class for "corpus reader" classes, each of which can be
used to read a specific corpus format. Each individual corpus
reader instance is used to read a specific corpus, consisting of
one or more files under a common root directory. Each file is
identified by its ``file identifier``, which is the relative path
to the file from the root directory.
A separate subclass is be defined for each corpus format. These
subclasses define one or more methods that provide 'views' on the
corpus contents, such as ``words()`` (for a list of words) and
``parsed_sents()`` (for a list of parsed sentences). Called with
no arguments, these methods will return the contents of the entire
corpus. For most corpora, these methods define one or more
selection arguments, such as ``fileids`` or ``categories``, which can
be used to select which portion of the corpus should be returned.
"""
def __init__(self, root, fileids, encoding='utf8', tagset=None):
"""
:type root: PathPointer or str
:param root: A path pointer identifying the root directory for
this corpus. If a string is specified, then it will be
converted to a ``PathPointer`` automatically.
:param fileids: A list of the files that make up this corpus.
This list can either be specified explicitly, as a list of
strings; or implicitly, as a regular expression over file
paths. The absolute path for each file will be constructed
by joining the reader's root to each file name.
:param encoding: The default unicode encoding for the files
that make up the corpus. The value of ``encoding`` can be any
of the following:
- A string: ``encoding`` is the encoding name for all files.
- A dictionary: ``encodin
|
g[file_id]`` is the encoding
name for the file whose identifier is ``file_id``. If
``file_id`` is not in ``encoding``, then the file
contents will be processed using non-unicode byte strings.
- A list: ``encoding`` should be a list of ``(regexp, encoding)``
tuples. The encoding for a file whose identifier is ``file_id``
will be the ``encoding`` value for the first tuple whose
``regexp`
|
` matches the ``file_id``. If no tuple's ``regexp``
matches the ``file_id``, the file contents will be processed
using non-unicode byte strings.
- None: the file contents of all files will be
processed using non-unicode byte strings.
:param tagset: The name of the tagset used by this corpus, to be used
for normalizing or converting the POS tags returned by the
tagged_...() methods.
"""
# Convert the root to a path pointer, if necessary.
if isinstance(root, compat.string_types) and not isinstance(root, PathPointer):
m = re.match('(.*\.zip)/?(.*)$|', root)
zipfile, zipentry = m.groups()
if zipfile:
root = ZipFilePathPointer(zipfile, zipentry)
else:
root = FileSystemPathPointer(root)
elif not isinstance(root, PathPointer):
raise TypeError('CorpusReader: expected a string or a PathPointer')
# If `fileids` is a regexp, then expand it.
if isinstance(fileids, compat.string_types):
fileids = find_corpus_fileids(root, fileids)
self._fileids = fileids
"""A list of the relative paths for the fileids that make up
this corpus."""
self._root = root
"""The root directory for this corpus."""
# If encoding was specified as a list of regexps, then convert
# it to a dictionary.
if isinstance(encoding, list):
encoding_dict = {}
for fileid in self._fileids:
for x in encoding:
(regexp, enc) = x
if re.match(regexp, fileid):
encoding_dict[fileid] = enc
break
encoding = encoding_dict
self._encoding = encoding
"""The default unicode encoding for the fileids that make up
this corpus. If ``encoding`` is None, then the file
contents are processed using byte strings."""
self._tagset = tagset
def __repr__(self):
if isinstance(self._root, ZipFilePathPointer):
path = '%s/%s' % (self._root.zipfile.filename, self._root.entry)
else:
path = '%s' % self._root.path
return '<%s in %r>' % (self.__class__.__name__, path)
def ensure_loaded(self):
"""
Load this corpus (if it has not already been loaded). This is
used by LazyCorpusLoader as a simple method that can be used to
make sure a corpus is loaded -- e.g., in case a user wants to
do help(some_corpus).
"""
pass # no need to actually do anything.
def readme(self):
"""
Return the contents of the corpus README file, if it exists.
"""
return self.open("README").read()
def license(self):
"""
Return the contents of the corpus LICENSE file, if it exists.
"""
return self.open("LICENSE").read()
def citation(self):
"""
Return the contents of the corpus citation.bib file, if it exists.
"""
return self.open("citation.bib").read()
def fileids(self):
"""
Return a list of file identifiers for the fileids that make up
this corpus.
"""
return self._fileids
def abspath(self, fileid):
"""
Return the absolute path for the given file.
:type fileid: str
:param fileid: The file identifier for the file whose path
should be returned.
:rtype: PathPointer
"""
return self._root.join(fileid)
def abspaths(self, fileids=None, include_encoding=False,
include_fileid=False):
"""
Return a list of the absolute paths for all fileids in this corpus;
or for the given list of fileids, if specified.
:type fileids: None or str or list
:param fileids: Specifies the set of fileids for which paths should
be returned. Can be None, for all fileids; a list of
file identifiers, for a specified set of fileids; or a single
file identifier, for a single file. Note that the return
value is always a list of paths, even if ``fileids`` is a
single file identifier.
:param include_encoding: If true, then return a list of
``(path_pointer, encoding)`` tuples.
:rtype: list(PathPointer)
"""
if fileids is None:
fileids = self._fileids
elif isinstance(fileids, compat.string_types):
fileids = [fileids]
paths = [self._root.join(f) for f in fileids]
if include_encoding and include_fileid:
return list(zip(paths, [self.encoding(f) for f in fileids], fileids))
elif include_fileid:
return list(zip(paths, fileids))
elif include_encoding:
return list(zip(paths, [self.encoding(f) for f in fileids]))
else:
return paths
def open(self, file):
"""
Return an open stream that can be used to read the given file.
If the file's encoding is
|
js850/pele
|
pele/systems/molecularsystem.py
|
Python
|
gpl-3.0
| 861
| 0.012776
|
from pele.systems import BaseSystem
import pele.utils.elements.elements as elem # This is a dictionary of element parameters for atoms
class MolecularSystem(BaseSystem):
"""
Representation for a molecular system, this system stores info about atoms, bonds,
angles and torsions.
It is possible to represent the molecule using a graph. However, this class is used
to quickly and efficiently:
- add/remov
|
e ato
|
ms, bonds, angles and torsions;
- read/write PDB and other common formats;
- interface between different formats of input files for CHARMM, AMBER etc.;
- visualise molecular structures;
- measure distances between structures.
"""
def __init__(self):
atoms = []
bonds = []
class Atom(object):
"""
Representation of an Atom, object. Can have
"""
|
alfa-addon/addon
|
plugin.video.alfa/channels/sexgalaxy.py
|
Python
|
gpl-3.0
| 4,331
| 0.009242
|
# -*- coding: utf-8 -*-
import sys
PY3 = False
if sys.version_info[0] >= 3: PY3 = True; unicode = str; unichr = chr; long = int
if PY3:
import urllib.parse as urlparse # Es muy lento en PY2. En PY3 es nativo
else:
import urlparse # Usamos el nativo de PY2 que es más rápido
import re
from platformcode import config, logger
from core import scrapertools
from core import servertools
from core.item import Item
from core import httptools
from channels import filtertools
from channels import autoplay
IDIOMAS = {'vo': 'VO'}
list_language = list(IDIOMAS.values())
list_quality = []
list_servers = ['gounlimited']
host = 'http://sexgalaxy.net' #'http://streamingporn.xyz'
# UBIQFILE y KS2C
def mainlist(item):
logger.info()
itemlist = []
autoplay.init(item.channel, list_servers, list_quality)
itemlist.append(item.clone(title="Peliculas", action="lista", url=host + "/full-movies/"))
itemlist.append(item.clone(title="Peliculas JAV", action="lista", url=host + "/jav-movies/"))
itemlist.append(item.clone(title="Videos", action="lista", url=host + "/new-releases/"))
itemlist.append(item.clone(title="Canales", action="categorias", url=host + "/videos/"))
itemlist.append(item.clone(title="Categorias", action="categorias", url=host + "/videos/"))
itemlist.append(item.clone(title="Buscar", action="search"))
autoplay.show_option(item.channel, itemlist)
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = "%s/?s=%s&submit=Search" % (host, texto)
try:
return lista(item)
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def categorias(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
if "Categorias" in item.title:
data = scrapertools.find_single_match(data, '>Popular Categories<(.*?)>Popular Paysites<')
else:
data = scrapertools.find_single_match(data, '>Popular Paysites<(.*?)</p>')
patron = '<a href="([^"]+)">([^<]+)<'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle in matches:
scrapedplot = ""
scrapedthumbnail = ""
scrapedtitle = str(scrapedtitle)
thumbnail = urlparse.urljoin(item.url, scrapedthumbnail)
itemlist.append(item.clone(action="lista", title=scrapedtitle, url=scrapedurl,
thumbnail=scrapedthumbnail, plot=scrapedplot))
return itemlist
def lista(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.u
|
rl, timeout=3).data
patron = '<article id="post-.*?'
patron += '<a href="([^"]+)" rel="bookmark">([^<]+)<.*?'
patron += '<img src="([^"]+)"'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle, scrapedthumbnail in matches:
scrapedplot = ""
if not "manyvids" in scrapedtitle:
itemlist.append(item.clone(action="findvideos", title=scrapedtitle, contentTitle=scrapedtitle,
fanart=scrapedthumbnail, url=scr
|
apedurl, thumbnail=scrapedthumbnail, plot=scrapedplot))
next_page = scrapertools.find_single_match(data, '<div class="nav-previous"><a href="([^"]+)"')
if next_page != "":
itemlist.append(item.clone(action="lista", title="[COLOR blue]Página Siguiente >>[/COLOR]", url=next_page))
return itemlist
def findvideos(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|amp;|\s{2}| ", "", data)
patron = '<a href="([^"]+)" rel="nofollow[^<]+>(?:|<strong> |)(?:Streaming|Download)'
matches = scrapertools.find_multiple_matches(data, patron)
for url in matches:
if not "ubiqfile" in url:
itemlist.append(item.clone(action='play',title="%s", contentTitle=item.title, url=url))
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
# Requerido para FilterTools
itemlist = filtertools.get_links(itemlist, item, list_language, list_quality)
# Requerido para AutoPlay
autoplay.start(itemlist, item)
return itemlist
|
cloudify-cosmo/softlayer-python
|
SoftLayer/testing/fixtures/SoftLayer_Product_Order.py
|
Python
|
mit
| 433
| 0
|
verifyOrd
|
er = {
'orderId': 1234,
'orderDate': '2013-08-01 15:23:45',
'prices': [{
'id': 1,
'laborFee': '2',
'oneTimeFee': '2',
'oneTimeFeeTax': '.1',
'quantity': 1,
'recurringFee': '2',
'recurringFeeTax': '.1',
'hourlyRecurringFee': '2',
'setupFee': '1',
'item': {'id': 1, 'description': 'this is a thing'},
}]}
placeOrder = verifyOr
|
der
|
DistrictDataLabs/yellowbrick
|
yellowbrick/model_selection/__init__.py
|
Python
|
apache-2.0
| 818
| 0.001222
|
# yellowbrick.model_selection
# Visualizers that wrap the model selection libraries of Scikit-Learn
#
# Author: Benjamin Bengfort <benjamin@bengfort.com>
# Created: Fri Mar 30 10:36:12 2018 -0400
#
# ID: __init__.py [c5355ee] benjamin@bengfort.co
|
m $
"""
Visualizers that wrap the model selection libraries of Scikit-Learn
"""
##########################################################################
## Imports
##########################################################################
|
from .learning_curve import LearningCurve, learning_curve
from .validation_curve import ValidationCurve, validation_curve
from .cross_validation import CVScores, cv_scores
# RFECV and Feature Importances moved here as of YB v1.0
from .importances import FeatureImportances, feature_importances
from .rfecv import RFECV, rfecv
|
johmathe/keras
|
tests/auto/keras/layers/test_convolutional.py
|
Python
|
mit
| 6,580
| 0.001976
|
import unittest
import numpy as np
from numpy.testing import assert_allclose
import theano
from keras.layers import convolutional
class TestConvolutions(unittest.TestCase):
def test_convolution_1d(self):
nb_samples = 9
nb_steps = 7
input_dim = 10
filter_length = 6
nb_filter = 5
weights_in = [np.ones((nb_filter, input_dim, filter_length, 1)), np.ones(nb_filter)]
input = np.ones((nb_samples, nb_steps, input_dim))
for weight in [None, weights_in]:
for border_mode in ['valid', 'full', 'same']:
for subsample_length in [1, 3]:
if border_mode == 'same' and subsample_length != 1:
continue
for W_regularizer in [None, 'l2']:
for b_regularizer in [None, 'l2']:
for act_regularizer in [None, 'l2']:
layer = convolutional.Convolution1D(
nb_filter, filter_length, weights=weight,
border_mode=border_mode, W_regularizer=W_regularizer,
b_regularizer=b_regularizer, activity_regularizer=act_regularizer,
subsample_length=subsample_length, input_shape=(None, input_dim))
layer.input = theano.shared(value=input)
for train in [True, False]:
out = layer.get_output(train).eval()
assert input.shape[0] == out.shape[0]
if border_mode == 'same' and subsample_length == 1:
assert input.shape[1] == out.shape[1]
config = layer.get_config()
def test_maxpooling_1d(self):
nb_samples = 9
nb_steps = 7
input_dim = 10
input = np.ones((nb_samples, nb_steps, input_dim))
for ignore_border in [True, False]:
for stride in [1, 2]:
layer = convolutional.MaxPooling1D(stride=stride, ignore_border=ignore_border)
layer.input = theano.shared(value=input)
for train in [True, False]:
layer.get_output(train).eval()
config = layer.get_config()
def test_convolution_2d(self):
nb_samples = 8
nb_filter = 9
stack_size = 7
nb_row = 10
nb_col = 6
input_nb_row = 11
input_nb_col = 12
weights_in = [np.ones((nb_filter, stack_size, nb_row, nb_col)), np.ones(nb_filter)]
input = np.ones((nb_samples, stack_size, input_nb_row, input_nb_col))
for weight in [None, weights_in]:
for border_mode in ['valid', 'full', 'same']:
for subsample in [(1, 1), (2, 3)]:
if border_mode == 'same' and subsample != (1, 1):
continue
for W_regularizer in [None, 'l2']:
for b_regularizer in [None, 'l2']:
for act_regularizer in [None, 'l2']:
layer = convolutional.Convolution2D(
nb_filter, nb_row, nb_col, weights=weight,
border_mode=border_mode, W_regularizer=W_regularizer,
b_regularizer=b_regularizer, activity_regularizer=act_regularizer,
subsample=subsample, input_shape=(stack_size, None, None))
layer.input = theano.shared(value=input)
for train in [True, False]:
|
out = layer.get_output(train).eval()
if border_mode == 'same' and subsample == (1, 1):
assert out.shape[2:] == input.shape[2:]
config = layer.get_c
|
onfig()
def test_maxpooling_2d(self):
nb_samples = 9
stack_size = 7
input_nb_row = 11
input_nb_col = 12
pool_size = (3, 3)
input = np.ones((nb_samples, stack_size, input_nb_row, input_nb_col))
for ignore_border in [True, False]:
for stride in [(1, 1), (2, 2)]:
layer = convolutional.MaxPooling2D(stride=stride, ignore_border=ignore_border, pool_size=pool_size)
layer.input = theano.shared(value=input)
for train in [True, False]:
layer.get_output(train).eval()
config = layer.get_config()
def test_zero_padding_2d(self):
nb_samples = 9
stack_size = 7
input_nb_row = 11
input_nb_col = 12
input = np.ones((nb_samples, stack_size, input_nb_row, input_nb_col))
layer = convolutional.ZeroPadding2D(padding=(2, 2))
layer.input = theano.shared(value=input)
for train in [True, False]:
out = layer.get_output(train).eval()
for offset in [0, 1, -1, -2]:
assert_allclose(out[:, :, offset, :], 0.)
assert_allclose(out[:, :, :, offset], 0.)
assert_allclose(out[:, :, 2:-2, 2:-2], 1.)
config = layer.get_config()
def test_upsample_1d(self):
nb_samples = 9
nb_steps = 7
input_dim = 10
input = np.ones((nb_samples, nb_steps, input_dim))
for length in [2, 3, 9]:
layer = convolutional.UpSample1D(length=length)
layer.input = theano.shared(value=input)
for train in [True, False]:
out = layer.get_output(train).eval()
assert out.shape[1] == length*nb_steps
config = layer.get_config()
def test_upsample_2d(self):
nb_samples = 9
stack_size = 7
input_nb_row = 11
input_nb_col = 12
input = np.ones((nb_samples, stack_size, input_nb_row, input_nb_col))
for length_row in [2, 3, 9]:
for length_col in [2, 3, 9]:
layer = convolutional.UpSample2D(size=(length_row, length_col))
layer.input = theano.shared(value=input)
for train in [True, False]:
out = layer.get_output(train).eval()
assert out.shape[2] == length_row*input_nb_row
assert out.shape[3] == length_col*input_nb_col
config = layer.get_config()
if __name__ == '__main__':
unittest.main()
|
drglove/SickRage
|
sickbeard/clients/download_station.py
|
Python
|
gpl-3.0
| 2,700
| 0.007037
|
# Authors:
# Pedro Jose Pereira Vieito <pvieito@gmail.com> (Twitter: @pvieito)
#
# URL: https://github.com/mr-orange/Sick-Beard
#
# This file is part of SickRage.
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
#
# Uses the Synology Download Station API: http://download.synology.com/download/Document/DeveloperGuide/Synology_Download_Station_Web_API.pdf
import sickbeard
from sickbeard.clients.generic import GenericClient
class DownloadStationAPI(GenericClient):
def __init__(self, host=None, username=None, password=None):
super(DownloadStationAPI, self).__init__('DownloadStation', host, username, password)
self.url = self.host + 'webapi/DownloadStation/task.cgi'
def _get_auth(self):
auth_url = self.host + 'webapi/auth.cgi?api=SYNO.API.Auth&version=2&method=login&account=' + self.username + '&passwd=' + self.password + '&session=DownloadStation&format=sid'
try:
self.response = self.session.get(auth_url, verify=False)
self.auth = self.response.json()['data']['sid']
except:
|
return None
return self.auth
def _add_torrent_uri(self, result):
data = {'api':'SYNO.D
|
ownloadStation.Task',
'version':'1', 'method':'create',
'session':'DownloadStation',
'_sid':self.auth,
'uri':result.url
}
if sickbeard.TORRENT_PATH:
data['destination'] = sickbeard.TORRENT_PATH
self._request(method='post', data=data)
return self.response.json()['success']
def _add_torrent_file(self, result):
data = {'api':'SYNO.DownloadStation.Task',
'version':'1',
'method':'create',
'session':'DownloadStation',
'_sid':self.auth
}
if sickbeard.TORRENT_PATH:
data['destination'] = sickbeard.TORRENT_PATH
files = {'file':(result.name + '.torrent', result.content)}
self._request(method='post', data=data, files=files)
return self.response.json()['success']
api = DownloadStationAPI()
|
bitcoinclassic/bitcoinclassic
|
qa/rpc-tests/mempool_limit.py
|
Python
|
mit
| 2,225
| 0.007191
|
#!/usr/bin/env python2
# Copyright (c) 2014-2015 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
# Test mempool limiting together/eviction with the wallet
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
class MempoolLimitTest(BitcoinTestFramework):
def __init__(self):
self.txouts = gen_return_txouts()
def setup_network(self):
self.nodes = []
self.nodes.append(start_node(0, self.options.tmpdir, ["-maxmempool=5", "-spendzeroconfchange=0"]))
self.is_network_split = False
self.sync_all()
self.relayfee = self.nodes[0].getnetworkinfo()['relayfee']
def setup_chain(self):
print("Initializing test directory "+self.options.tmpdir)
initialize_chain_clean(self.options.tmpdir, 2)
def run_test(self):
txids = []
utxos = create
|
_confirmed_utxos(self.relayfee, self.nodes[0], 90)
#create a mempool tx that will be evicted
us0 = utxos.pop()
inputs = [{ "txid" : us0["txid"], "vout" : us0["vout"]}]
outputs = {self.nodes[0].getnewaddress() : 0.0001}
tx = self.nodes[0].createraw
|
transaction(inputs, outputs)
self.nodes[0].settxfee(self.relayfee) # specifically fund this tx with low fee
txF = self.nodes[0].fundrawtransaction(tx)
self.nodes[0].settxfee(0) # return to automatic fee selection
txFS = self.nodes[0].signrawtransaction(txF['hex'])
txid = self.nodes[0].sendrawtransaction(txFS['hex'])
relayfee = self.nodes[0].getnetworkinfo()['relayfee']
base_fee = relayfee*100
for i in xrange (4):
txids.append([])
txids[i] = create_lots_of_big_transactions(self.nodes[0], self.txouts, utxos[30*i:30*i+30], (i+1)*base_fee)
# by now, the tx should be evicted, check confirmation state
assert(txid not in self.nodes[0].getrawmempool())
txdata = self.nodes[0].gettransaction(txid)
assert(txdata['confirmations'] == 0) #confirmation should still be 0
if __name__ == '__main__':
MempoolLimitTest().main()
|
netvigator/myPyPacks
|
pyPacks/Web/WantLinks.py
|
Python
|
gpl-2.0
| 2,653
| 0.026008
|
#!/usr/bin/pythonTest
# -*- coding: utf-8 -*-
#
# Web functions want links
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# The GNU General Public License is available from:
# The Free Software Foundation, Inc.
# 51 Franklin Street, Fifth Floor
# Boston MA 02110-1301 USA
#
# http://www.gnu.org/license
|
s/gpl.html
#
# Copyright 2015-2016 Rick Graves
#
def getUniqueLinks( sReadFile, sOutFile ):
#
from File.Get import getListFromFileLines
from File.Write import QuickDumpLines
#
from Web.Address import getHostPathTuple, getDomainOffURL
from Web.Test import isURL
#
lLines = getListFromF
|
ileLines( sReadFile )
#
setLinks= frozenset( filter( isURL, lLines ) )
#
#
lDecorate = [ ( getHostPathTuple( sURL ), sURL ) for sURL in setLinks ]
#
lDecorate = [ ( ( getDomainOffURL( t[0][0] ), t[0][1] ), t[1] ) for t in lDecorate ]
#
lDecorate.sort()
#
lLinks = [ t[1] for t in lDecorate ]
#
QuickDumpLines( lLinks, sOutFile )
if __name__ == "__main__":
#
from os.path import join
from sys import argv
#
from six import print_ as print3
#
from Dir.Get import sTempDir
from File.Test import isFileThere
from Utils.Result import sayTestResult
#
lProblems = []
#
args = argv[ 1 : ]
#
sReadFile = join( sTempDir, 'LotsOfLinks.txt' )
sOutFile = join( sTempDir, 'UniqueLinks.txt' )
#
if args:
#
sReadFile = args[0]
#
if len( args ) > 1:
#
sOutFile = args[2]
#
#
else:
#
if isFileThere( sReadFile ):
#
getUniqueLinks( sReadFile, sOutFile )
#
else:
#
print3( 'Usage: WantLinks [inputFile [, outputFile] ]' )
print3( 'default inputFile {temp dir}lotsolinks.txt' )
print3( 'default outputFile {temp dir}UniqueLinks.txt' )
#
#
#
if False:
#
lProblems.append( 'getDotQuad4IspTester()' )
#
#
#
sayTestResult( lProblems )
|
wkentaro/termsaver
|
termsaverlib/screen/urlfetcher.py
|
Python
|
apache-2.0
| 2,181
| 0.001834
|
###############################################################################
#
# file: urlfetcher.py
#
# Purpose: refer to module documentation for details
#
# Note: This file is part of Termsaver application, and should not be used
# or executed separately.
#
###############################################################################
#
# Copyright 2012 Termsaver
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
###############################################################################
"""
Simple screensaver that displays data from a URL.
See additional information in the class itself.
The screen class available here is:
* `UrlFetcherScreen`
"""
#
# Internal modules
#
from termsaverlib.screen.base.urlfetcher import UrlFetcherBase
from termsaverlib import constants
from termsaverlib.i18n import _
class UrlFetcherScreen(UrlFetcherBase):
"""
Simple screensaver that displays data from a URL.
"""
def __init__(self):
"""
Creates a new instance of this class.
"""
UrlFetcherBase.__init__(self,
"urlfetcher",
_("displays url contents with typing animation"))
def _
|
message_no_url(self):
"""
"""
return _("""
You just need to provide the URL fr
|
om where %(app_title)s will read and
display on screen.
If you do not have any idea which URL to use, check out some examples here:
RFC
RFC-1034 - http://tools.ietf.org/rfc/rfc1034.txt
See a RFC list from Wikipedia:
http://en.wikipedia.org/wiki/List_of_RFCs
(remember to use the txt version)
""") % {
'app_title': constants.App.TITLE,
}
|
ajylee/gpaw-rtxs
|
gpaw/test/aluminum_testcell.py
|
Python
|
gpl-3.0
| 1,942
| 0.026262
|
import numpy as np
import sys
import os
import time
from ase import Atom, Atoms
from ase.visualize import view
from ase.units import Bohr
from ase.structure import bulk
from gpaw import GPAW
from gpaw.atom.basis import BasisMaker
from gpaw.response.df import DF
from gpaw.mpi import serial_comm, rank, size
from gpaw.utilities import devnull
# Ground state calculation
a = 4.043
atoms = bulk('Al', 'fcc', a=a)
atoms.center()
calc = GPAW(gpts=(12,12,12),
kpts=(4,4,4),
xc='LDA')
atoms.set_calculator(calc)
atoms.get_potential_energy()
calc.write('Al1.gpw','all')
# Excited state calculation
q = np.array([1./4.,0.,0.])
w = np.linspace(0, 24, 241)
df = DF(calc='Al1.gpw', q=q, w=w, eta=0.2, ecut=50)
#df.write('Al.pckl')
df.get_EELS_spectrum(filename='EELS_Al_1')
atoms = Atoms('Al8',scaled_positions=[(0,0,0),
(0.5,0,0),
(0,0.5,0),
(0,0,0.5),
(0.5,0.5,0),
(0.5,0,0.5),
(0.,0.5,0.5),
(0.5,0.5,0.5)],
cell=[(0,a,a),(a,0,a),(a,a,0)],
pbc=True)
calc = GPAW(gpts=(24,24,24),
kpts=(2,2,2),
xc='LDA')
atoms.set_calculator(calc)
atoms.get_potential_energy()
calc.write('Al2.gpw','all')
# Excited state calculation
q = np.array([1./2.,0.,0.])
w = np.linspace(0, 24, 241)
df
|
= DF(calc='Al2.gpw', q=q, w=w, eta=0.2, ecut=50)
#df.write('Al.pckl')
df.get_EELS_spectrum(filename='EELS_Al_2')
d1 = np.loadtxt('EELS_Al_1')
d2 = np.loadtxt('EELS_Al_2')
error1 = (d1[1:,1] - d2[1:,1]) / d1[1:,1] * 100
error2 = (d1[1:,2] - d2[1:,2]) / d1[1:,2] * 100
if error1.max() > 0.2 or error2.max()
|
> 0.2: # percent
print error1.max(), error2.max()
raise ValueError('Pls check spectrum !')
#if rank == 0:
# os.remove('Al1.gpw')
# os.remove('Al2.gpw')
|
centrofermi/e3monitor
|
stats/e3sTrackDayStation.py
|
Python
|
gpl-3.0
| 3,202
| 0.004685
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Created on Tue 31 May 2016
@author: Fabrizio Coccetti (fabrizio.coccetti@centrofermi.it) [www.fc8.net]
Query Run Db and extract several infos
"""
import os
import MySQLdb
from datetime import datetime, timedelta
import ConfigParser
import logging
import logging.config
import calendar
from e3monitor.config.__stations__ import EEE_STATISTIC_STATIONS
from e3monitor.config.__files_server__ import (logConfigFile,
dbConfigFile,
pklStatFile,
pathWorkDir)
def daterange(start_date, end_date):
for n in range(int ((end_date - start_date).days)):
yield start_date + timedelta(n)
# Define start of the run and other dates
startRun = datetime(2015, 11, 7)
startRunStr = startRun.strftime("%Y-%m-%d")
# ATTENTION
# endRun must be the day + 1
endRun = datetime(2016, 5, 21)
endRunStr = startRun.strftime("%Y-%m-%d")
#today = datetime.today()
#todayStr = today.strftime("%Y-%m-%d")
# Set up logging
logging.config.fileConfig(logConfigFile)
logger = logging.getLogger('full')
logger.info('Started')
logger = logging.getLogger('plain')
# Open output file
w = open('/var/www/html/monitor/stats/tracks_per_day_per_station.csv', 'w')
logger.info('Opened output file.')
# Adding headers to the output file
w.write('Date' + ',')
for schoolName in EEE_STATISTIC_STATIONS:
w.write(schoolName + ',')
w.write('\n')
# Reading db ini file
logger.info('Reading ' + dbConfigFile)
parser = ConfigParser.ConfigParser()
parser.read(dbConfigFile)
host = parser.get('General', 'host')
user = parser.get('General', 'user')
dbname = parser.get('General', 'dbname')
passwd = parser.get('General', 'passwd')
# Connecting to the database
logger.info('Connecting to %s on %s (as %s)' % (dbname,
|
host, user))
db = MySQLdb.connect(host=host, user=user, passwd=passwd, db=dbname)
cur = db.cursor()
# Query for the number of tracks every month
logger.info('Queries of the total nu
|
mber of Tracks')
query = "SELECT SUM(num_track_events) from runs2 WHERE (run_date = %s) AND station_name = %s;"
logger.info("Exec loop: " + query)
for _lastDay in daterange(startRun, endRun):
_lastDayStr = _lastDay.strftime("%Y-%m-%d")
# writing date to file
w.write(_lastDayStr + ',')
# Loop for each station in Run
for schoolName in EEE_STATISTIC_STATIONS:
queryParam = (_lastDayStr, schoolName)
logger.info('Parameters: ' + str(queryParam))
cur.execute(query, queryParam)
try:
_tracks = int(cur.fetchone()[0])
except:
_tracks = 0
logger.info('School: ' + schoolName + ' Tracks: ' + str(_tracks))
w.write(str(_tracks) + ',')
w.write('\n')
logger.info('Final Result of the queries:\n')
# Save the data extracted from the db
#logger.info('Writing data to file...')
#output = open(os.path.join(pathWorkDir, pklStatFile), 'wb')
#pickle.dump(trackStat, output)
#output.close()
#logger = logging.getLogger('full')
#logger.info('Written ' + os.path.join(pathWorkDir, pklStatFile))
cur.close()
db.close()
w.close()
logger.info('Finished')
|
airbnb/airflow
|
airflow/contrib/operators/emr_terminate_job_flow_operator.py
|
Python
|
apache-2.0
| 1,226
| 0.001631
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy
|
of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module is depre
|
cated. Please use `airflow.providers.amazon.aws.operators.emr_terminate_job_flow`."""
import warnings
# pylint: disable=unused-import
from airflow.providers.amazon.aws.operators.emr_terminate_job_flow import EmrTerminateJobFlowOperator # noqa
warnings.warn(
"This module is deprecated. Please use `airflow.providers.amazon.aws.operators.emr_terminate_job_flow`.",
DeprecationWarning,
stacklevel=2,
)
|
rogerhu/django
|
django/db/models/fields/__init__.py
|
Python
|
bsd-3-clause
| 63,210
| 0.00068
|
from __future__ import unicode_literals
import collections
import copy
import datetime
import decimal
import math
import warnings
from base64 import b64decode, b64encode
from itertools import tee
from django.db import connection
from django.db.models.loading import get_model
from django.db.models.query_utils import QueryWrapper
from django.conf import settings
from django import forms
from django.core import exceptions, validators
from django.utils.datastructures import DictWrapper
from django.utils.dateparse import parse_date, parse_datetime, parse_time
from django.utils.functional import curry, total_ordering, Promise
from django.utils.text import capfirst
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from django.utils.encoding import smart_text, force_text, force_bytes
from django.utils.ipv6 import clean_ipv6_address
from django.utils import six
# Avoid "TypeError: Item in ``from list'' not a string" -- unicode_literals
# makes these strings unicode
__all__ = [str(x) for x in (
'AutoField', 'BLANK_CHOICE_DASH', 'BigIntegerField', 'BinaryField',
'BooleanField', 'CharField', 'CommaSeparatedIntegerField', 'DateField',
'DateTimeField', 'DecimalField', 'EmailField', 'Empty', 'Field',
'FieldDoesNotExist', 'FilePathField', 'FloatField',
'GenericIPAddressField', 'IPAddressField', 'IntegerField', 'NOT_PROVIDED',
'NullBooleanField', 'PositiveIntegerField', 'PositiveSmallIntegerField',
'SlugField', 'SmallIntegerField', 'TextField', 'TimeField', 'URLField',
)]
class Empty(object):
pass
class NOT_PROVIDED:
pass
# The values to use for "blank" in SelectFields. Will be appended to the start
# of most "choices" lists.
BLANK_CHOICE_DASH = [("", "---------")]
def _load_field(app_label, model_name, field_name):
return get_model(app_label, model_name)._meta.get_field_by_name(field_name)[0]
class FieldDoesNotExist(Exception):
pass
# A guide to Field parameters:
#
# * name: The name of the field specifed in the model.
# * attname: The attribute to use on the model object. This is the same as
# "name", except in the case of ForeignKeys, where "_id" is
# appended.
# * db_column: The db_column specified in the model (or None).
# * column: The database column for this field. This is the same as
# "attname", except if db_column is specified.
#
# Code that introspects values, or does other dynamic things, should use
# attname. For example, this gets the primary key value of object "obj":
#
# getattr(obj, opts.pk.attname)
def _empty(of_cls):
new = Empty()
new.__class__ = of_cls
return new
@total_ordering
class Field(object):
"""Base class for all field types"""
# Designates whether empty strings fundamentally are allowed at the
# database level.
empty_strings_allowed = True
empty_values = list(validators.EMPTY_VALUES)
# These track each time a Field instance is created. Used to retain order.
# The auto_creation_counter is used for fields that Django implicitly
# creates, creation_counter is used for all user-specified fields.
creation_counter = 0
auto_creation_counter = -1
default_validators = [] # Default set of validators
default_error_messages = {
'invalid_choice': _('Value %(value)r is not a valid choice.'),
'null': _('This field cannot be null.'),
'blank': _('This field cannot be blank.'),
'unique': _('%(model_name)s with this %(field_label)s '
'already exists.'),
}
# Generic field type description, usually overridden by subclasses
def _description(self):
return _('Field of type: %(field_type)s') % {
'field_type': self.__class__.__name__
}
description = property(_description)
def __init__(self, verbose_name=None, name=None, primary_key=False,
max_length=None, unique=False, blank=False, null=False,
db_index=False, rel=None, default=NOT_PROVIDED, editable=True,
serialize=True, unique_for_date=None, unique_for_month=None,
unique_for_year=None, choices=None, help_text='', db_column=None,
db_tablespace=None, auto_created=False, validators=[],
error_messages=None):
self.name = name
self.verbose_name = verbose_name # May be set by set_attributes_from_name
self._verbose_name = verbose_name # Store original for deconstruction
self.primary_key = primary_key
self.max_length, self._unique = max_length, unique
self.blank, self.null = blank, null
self.rel = rel
self.default = default
self.editable = editable
self.serialize = serialize
self.unique_for_date = unique_for_date
self.unique_for_month = unique_for_month
self.unique_for_year = unique_for_year
self._choices = choices or []
self.help_text = help_text
self.db_column = db_column
self.db_tablespace = db_tablespace or settings.DEFAULT_INDEX_TABLESPACE
self.auto_created = auto_created
# Set db_index to True if the field has a relationship and doesn't
# explicitly set db_index.
self.db_index = db_index
# Adjust the appropriate creation counter, and save our local copy.
if auto_created:
self.creation_coun
|
ter = Field.auto_creation_counter
Field.auto_creation_counter -= 1
else:
self.creation_counter = Field.creation_counter
Field.creation_counter += 1
self._validators = validators # Store for deconstruction later
self.validators = self.default_validators + validators
messages = {}
for c in
|
reversed(self.__class__.__mro__):
messages.update(getattr(c, 'default_error_messages', {}))
messages.update(error_messages or {})
self._error_messages = error_messages # Store for deconstruction later
self.error_messages = messages
def deconstruct(self):
"""
Returns enough information to recreate the field as a 4-tuple:
* The name of the field on the model, if contribute_to_class has been run
* The import path of the field, including the class: django.db.models.IntegerField
This should be the most portable version, so less specific may be better.
* A list of positional arguments
* A dict of keyword arguments
Note that the positional or keyword arguments must contain values of the
following types (including inner values of collection types):
* None, bool, str, unicode, int, long, float, complex, set, frozenset, list, tuple, dict
* UUID
* datetime.datetime (naive), datetime.date
* top-level classes, top-level functions - will be referenced by their full import path
* Storage instances - these have their own deconstruct() method
This is because the values here must be serialised into a text format
(possibly new Python code, possibly JSON) and these are the only types
with encoding handlers defined.
There's no need to return the exact way the field was instantiated this time,
just ensure that the resulting field is the same - prefer keyword arguments
over positional ones, and omit parameters with their default values.
"""
# Short-form way of fetching all the default parameters
keywords = {}
possibles = {
"verbose_name": None,
"primary_key": False,
"max_length": None,
"unique": False,
"blank": False,
"null": False,
"db_index": False,
"default": NOT_PROVIDED,
"editable": True,
"serialize": True,
"unique_for_date": None,
"unique_for_month": None,
"unique_for_year": None,
"choices": [],
"help_text": '',
"db_column": None,
"db_tablespace": settings.DEFAULT_INDEX_TABLESPACE,
"auto_created": False,
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.