repo_name
stringlengths 5
92
| path
stringlengths 4
221
| copies
stringclasses 19
values | size
stringlengths 4
6
| content
stringlengths 766
896k
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 32
997
| alpha_frac
float64 0.25
0.96
| autogenerated
bool 1
class | ratio
float64 1.5
13.6
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
SEL-Columbia/commcare-hq
|
corehq/apps/users/util.py
|
1
|
4131
|
import re
from django.conf import settings
from django.contrib.auth.models import User
from couchdbkit.resource import ResourceNotFound
from corehq import toggles, privileges
from dimagi.utils.couch.database import get_db
from django.core.cache import cache
from django_prbac.exceptions import PermissionDenied
from django_prbac.utils import ensure_request_has_privilege
WEIRD_USER_IDS = ['commtrack-system', 'demo_user']
def cc_user_domain(domain):
sitewide_domain = settings.HQ_ACCOUNT_ROOT
return ("%s.%s" % (domain, sitewide_domain)).lower()
def format_username(username, domain):
return "%s@%s" % (username.lower(), cc_user_domain(domain))
def normalize_username(username, domain=None):
from django.core.validators import validate_email
username = re.sub(r'\s+', '.', username).lower()
if domain:
username = format_username(username, domain)
validate_email(username)
else:
# if no domain, make sure that the username is a valid "local part" of an email address
validate_email("%s@dimagi.com" % username)
return username
def raw_username(username):
"""
Strips the @domain.commcarehq.org from the username if it's there
"""
sitewide_domain = settings.HQ_ACCOUNT_ROOT
username = username.lower()
try:
u, d = username.split("@")
except Exception:
return username
if d.endswith('.' + sitewide_domain):
return u
else:
return username
def user_id_to_username(user_id):
from corehq.apps.users.models import CouchUser
if not user_id:
return user_id
elif user_id == "demo_user":
return "demo_user"
try:
login = CouchUser.get_db().get(user_id)
except ResourceNotFound:
return None
return raw_username(login['username']) if "username" in login else None
def cached_user_id_to_username(user_id):
if not user_id:
return None
key = 'user_id_username_cache_{id}'.format(id=user_id)
ret = cache.get(key)
if ret:
return ret
else:
ret = user_id_to_username(user_id)
cache.set(key, ret)
return ret
def django_user_from_couch_id(id):
"""
From a couch id of a profile object, get the django user
"""
# get the couch doc
couch_rep = get_db().get(id)
django_id = couch_rep["django_user"]["id"]
return User.objects.get(id=django_id)
def doc_value_wrapper(doc_cls, value_cls):
"""
Wrap both the doc and the value
Code copied from couchdbkit.schema.base.QueryMixin.__view
"""
#from corehq.apps.users.models import CouchUser
def wrapper(row):
data = row.get('value')
docid = row.get('id')
doc = row.get('doc')
data['_id'] = docid
if 'rev' in data:
data['_rev'] = data.pop('rev')
value_cls._allow_dynamic_properties = True
doc_cls._allow_dynamic_properties = True
value_inst = value_cls.wrap(data)
doc_inst = doc_cls.wrap(doc)
return doc_inst, value_inst
return wrapper
def user_data_from_registration_form(xform):
"""
Helper function for create_or_update_from_xform
"""
user_data = {}
if "user_data" in xform.form and "data" in xform.form["user_data"]:
items = xform.form["user_data"]["data"]
if not isinstance(items, list):
items = [items]
for item in items:
user_data[item["@key"]] = item["#text"]
return user_data
def can_add_extra_mobile_workers(request):
from corehq.apps.users.models import CommCareUser
from corehq.apps.accounting.models import BillingAccount
num_web_users = CommCareUser.total_by_domain(request.domain)
user_limit = request.plan.user_limit
if user_limit == -1 or num_web_users < user_limit:
return True
try:
ensure_request_has_privilege(request, privileges.ALLOW_EXCESS_USERS)
except PermissionDenied:
account = BillingAccount.get_account_by_domain(request.domain)
if account is None or account.date_confirmed_extra_charges is None:
return False
return True
|
bsd-3-clause
| 3,465,629,854,168,599
| 28.719424
| 95
| 0.648995
| false
| 3.592174
| false
| false
| false
|
richhorace/docker-getpocket-elastic
|
scripts/reprocess-pocket-raw-data.py
|
1
|
2316
|
#/usr/bin/env python3
import argparse
import glob
import json
import logging
import datetime
import os
from local import DATA_DIR, LOG_DIR, REPROCESS_DIR
def parse_files(fnames):
for fname in fnames:
stat = os.stat(fname)
f_date = str(datetime.datetime.utcfromtimestamp(stat.st_birthtime).isoformat())
data = read_file(fname)
parse_data(data, fname,f_date )
def read_file(fname):
with open(fname, 'r', encoding='utf-8') as f:
return json.load(f)
def parse_data(data, fname, f_date):
LOG_PATH = '{}/getpocket-reprocessed.log'.format(LOG_DIR)
logging.basicConfig(level=logging.INFO,
format='{"retrieved": "' + f_date +'", "level": "%(levelname)s", %(message)s}',
filename=LOG_PATH,
filemode='a+')
total = 0
resolved_id_missing = 0
for v in data['list'].values():
fn = {"filename": fname }
v.update(fn)
# Remove unnecessary data
if v.get('image'):
del v['image']
if v.get('images'):
del v['images']
if v.get('videos'):
del v['videos']
if v.get('resolved_id', 0) == 0:
resolved_id_missing += 1
logging.error('"pocket_data": {}'.format(json.dumps(v)))
# logging.error('"pocket_data": {}, "filename": {}'.format(json.dumps(v)))
continue
if v.get('authors'):
try:
author_data = v['authors'].values()
v['authors'] = [(a['name']) for a in author_data]
except BaseException:
print(v['authors'])
if v.get('tags'):
try:
tag_data = v['tags'].keys()
v['tags'] = [a for a in tag_data]
except BaseException:
print(v['tags'])
fn = {"filename": fname }
v.update(fn)
logging.info('"pocket_data": {}'.format(json.dumps(v)))
total += 1
print("Total ({}): {}".format(fname, total))
print("Missing Resolved Id ({}): {}".format(fname, resolved_id_missing))
def main():
# Get local JSON file names
file_names = glob.glob('{}/*.json'.format(REPROCESS_DIR))
# Parse all JSON files
parse_files(file_names)
main()
|
mit
| 6,101,022,340,132,048,000
| 26.903614
| 103
| 0.525475
| false
| 3.784314
| false
| false
| false
|
WmHHooper/aima-python
|
submissions/Martinez/vacuum2.py
|
1
|
1449
|
import agents as ag
def HW2Agent() -> object:
def program(percept):
bump, status = percept
if status == 'Dirty':
action = 'Suck'
else:
lastBump, lastStatus, = program.oldPercepts[-1]
lastAction = program.oldActions[-1]
if bump == 'None':
action = 'Left'
if bump != 'None':
action = 'Right'
if bump != 'None' and lastAction == 'Left':
action = 'Right'
if bump != 'None' and lastAction == 'Right':
action = 'Down'
if bump != 'None' and lastAction == 'Down':
action = 'Up'
if bump == 'None' and lastAction == 'Down':
action = 'Down'
if bump == 'None' and lastAction == 'Right':
action = 'Right'
if bump == 'None' and lastAction == 'Left':
action = 'Right'
if bump != 'None' and lastAction == 'Left':
action = 'Up'
#it says local variable might be referenced before assingment?
program.oldPercepts.append(percept)
program.oldActions.append(action)
return action
# assign static variables here
program.oldPercepts = [('None', 'Clean')]
program.oldActions = ['NoOp']
agt = ag.Agent(program)
# assign class attributes here:
# agt.direction = ag.Direction('left')
return agt
|
mit
| -7,472,466,343,766,180,000
| 26.884615
| 62
| 0.507246
| false
| 4.187861
| false
| false
| false
|
Novartis/railroadtracks
|
src/test/test_recipe.py
|
1
|
26005
|
# Copyright 2014-2015 Novartis Institutes for Biomedical Research
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest, tempfile, csv, shutil, logging
from railroadtracks import core, hortator, rnaseq, easy, environment
from railroadtracks.model.simulate import (PHAGEGFF,
PHAGEGTF,
PHAGEFASTA)
import railroadtracks.model.simulate
from railroadtracks.test.test_model import (_build_UpToAlign,
_build_StepIndex,
_build_StepAlign,
_build_StepQuantify)
# Test the writing of recipes
from railroadtracks import easy
class RecipeTestCase(unittest.TestCase):
def setUp(self):
# -- recipe-init-begin
# -- initialization boiler plate code
wd = tempfile.mkdtemp()
project = easy.Project(rnaseq, wd=wd)
# declare the 3rd-party command-line tools we will use
env = easy.Environment(rnaseq)
# -- recipe-init-end
# -- recipe-data-begin
# Phage genome shipped with the package for testing purposes
PHAGEFASTA = railroadtracks.model.simulate.PHAGEFASTA
PHAGEGFF = railroadtracks.model.simulate.PHAGEGFF
# create random data for 6 samples (just testing here)
nsamples = 6
samplereads = list()
with open(PHAGEFASTA) as fasta_fh:
reference = next(railroadtracks.model.simulate.readfasta_iter(fasta_fh))
for sample_i in range(nsamples):
read1_fh = tempfile.NamedTemporaryFile(prefix='read1', suffix='.fq')
read2_fh = tempfile.NamedTemporaryFile(prefix='read2', suffix='.fq')
read1_fh, read2_fh = railroadtracks.model.simulate.randomPEreads(read1_fh,
read2_fh,
reference)
samplereads.append((read1_fh, read2_fh))
sampleinfo_fh = tempfile.NamedTemporaryFile(suffix='.csv', mode='w+')
csv_w = csv.writer(sampleinfo_fh)
csv_w.writerow(['sample_id', 'group'])
for i in range(6):
csv_w.writerow([str(i), ('A','B')[i%2]])
sampleinfo_fh.flush()
referenceannotation = rnaseq.GFFFile(PHAGEGFF)
# -- recipe-data-end
self._wd = wd
self.project = project
self.reference_fn = PHAGEFASTA
self.env = env
self.nsamples = nsamples
self.samplereads = samplereads
self.sampleinfo_fh = sampleinfo_fh
self.referenceannotation = referenceannotation
self._PHAGEFASTA = PHAGEFASTA
self._PHAGEGFF = PHAGEGFF
def tearDown(self):
samplereads = self.samplereads
# -- recipe-teardown-begin
for read1_fh, read2_fh in self.samplereads:
read1_fh.close()
read2_fh.close()
# FIXME: delete the temporary directory
shutil.rmtree(self.project.wd)
# -- recipe-teardown-end
def test_File(self):
#FIXME: rather test it in the model ?
reference = core.File(self.reference_fn)
@unittest.skipIf(not (environment.Executable.ispresent('bowtie2-build') and \
environment.Executable.ispresent('htseq-count') and \
environment.Executable.ispresent('R') and \
environment.R('R').packageversion_or_none('edgeR') is not None),
'bowtie2 and/or htseq-count is not in the PATH')
def test_RecipeSimpleIncremental(self):
project = self.project
env = self.env
nsamples = self.nsamples
samplereads = self.samplereads
sampleinfo_fh = self.sampleinfo_fh
reference_fn = self.reference_fn
referenceannotation = self.referenceannotation
PHAGEFASTA = self._PHAGEFASTA
PHAGEGFF = self._PHAGEGFF
# steps used
bowtie2index = env.activities.INDEX.bowtie2build
bowtie2align = env.activities.ALIGN.bowtie2
htseqcount = env.activities.QUANTIFY.htseqcount
merge = env.activities.UTILITY.columnmerger
edger = env.activities.DIFFEXP.edger
from railroadtracks import easy
# sequence of tasks to run
torun = list()
# index for alignment
Assets = bowtie2index.Assets
assets = Assets(Assets.Source(rnaseq.FASTAFile(reference_fn)),
Assets.Target.createundefined())
task_index = project.add_task(bowtie2index,
assets)
# the step is not done
self.assertEqual(hortator._TASK_TODO, task_index.info[1])
torun.append(task_index)
# run the tasks
for task in torun:
# run only if not done
if task.info[1] != hortator._TASK_DONE:
task.execute()
task.status = hortator._TASK_DONE
self.assertEqual(1, project.persistent_graph.nconcrete_steps)
# now that the tasks have run let's open the same project
project_same = easy.Project(project.model, wd=project.wd)
# index for alignment
Assets = bowtie2index.Assets
assets = Assets(Assets.Source(rnaseq.FASTAFile(reference_fn)),
Assets.Target.createundefined())
task_index_same = project_same.add_task(bowtie2index,
assets)
self.assertNotEqual(task_index, task_index_same)
self.assertNotEqual(task_index.call.assets, task_index_same.call.assets)
self.assertListEqual(list(task_index.call.assets.source.reference),
list(task_index_same.call.assets.source.reference))
self.assertListEqual(list(task_index.call.assets.target.indexfilepattern),
list(task_index_same.call.assets.target.indexfilepattern))
self.assertEqual(hortator._TASK_DONE, task_index_same.info[1])
self.assertEqual(1, project.persistent_graph.nconcrete_steps)
def _recipesimpleincremental(self, runtasks):
project = self.project
env = self.env
nsamples = self.nsamples
samplereads = self.samplereads
sampleinfo_fh = self.sampleinfo_fh
reference_fn = self.reference_fn
referenceannotation = self.referenceannotation
PHAGEFASTA = self._PHAGEFASTA
PHAGEGFF = self._PHAGEGFF
# steps used
bowtie2index = env.activities.INDEX.bowtie2build
bowtie2align = env.activities.ALIGN.bowtie2
htseqcount = env.activities.QUANTIFY.htseqcount
merge = env.activities.UTILITY.columnmerger
edger = env.activities.DIFFEXP.edger
for iteration in range(5):
nextiteration = False
# sequence of tasks to run
torun = list()
# index for alignment
Assets = bowtie2index.Assets
assets = Assets(Assets.Source(rnaseq.FASTAFile(reference_fn)),
Assets.Target.createundefined())
task_index = project.add_task(bowtie2index, assets)
torun.append(task_index)
if iteration < 1:
nextiteration = True
runtasks(torun)
self.assertEqual(1, project.persistent_graph.nconcrete_steps)
continue
# process all samples
sample_counts = list()
for sample_i, (read1_fh, read2_fh) in enumerate(samplereads):
# align
Assets = bowtie2align.Assets
assets = Assets(Assets.Source(task_index.call.assets.target.indexfilepattern,
rnaseq.FASTQPossiblyGzipCompressed(read1_fh.name),
rnaseq.FASTQPossiblyGzipCompressed(read2_fh.name)),
Assets.Target.createundefined())
task_align = project.add_task(bowtie2align, assets)
torun.append(task_align)
if iteration < 2:
nextiteration = True
runtasks(torun)
self.assertEqual(1+(sample_i+1), project.persistent_graph.nconcrete_steps)
continue
# quantify
# (non-default parameters to fit our demo GFF)
params = rnaseq.HTSeqCount._noexons_parameters
Assets = htseqcount.Assets
assets = Assets(Assets.Source(task_align.call.assets.target.alignment,
rnaseq.GFFFile(referenceannotation)),
Assets.Target.createundefined())
task_quantify = project.add_task(htseqcount,
assets,
parameters=params)
torun.append(task_quantify)
if iteration < 3:
nextiteration = True
runtasks(torun)
self.assertEqual(1+len(samplereads)+(sample_i+1),
project.persistent_graph.nconcrete_steps)
continue
# keep a pointer to the counts, as we will use it in the merge step
sample_counts.append(task_quantify.call.assets)
if nextiteration:
continue
# merge the sample data into a table (so differential expression can be computed)
Assets = merge.Assets
counts = tuple(x.target.counts for x in sample_counts)
assets = Assets(Assets.Source(rnaseq.CSVFileSequence(counts)),
merge.Assets.Target.createundefined())
task_merge = project.add_task(merge,
assets,
parameters=("0", "1"))
torun.append(task_merge)
if iteration < 4:
nextiteration = True
runtasks(torun)
self.assertEqual(1+2*len(samplereads)+1,
project.persistent_graph.nconcrete_steps)
continue
# differential expression with edgeR
Assets = edger.Assets
assets = Assets(Assets.Source(task_merge.call.assets.target.counts,
rnaseq.CSVFile(sampleinfo_fh.name)),
Assets.Target.createundefined())
task_de = project.add_task(edger,
assets)
if iteration < 5:
nextiteration = True
runtasks(torun)
self.assertEqual(1+2*len(samplereads)+2, # 1 index + 2 FASTQ per sample + 1 merge + 1 differential expression
project.persistent_graph.nconcrete_steps)
continue
@unittest.skipIf(not (environment.Executable.ispresent('bowtie2-build') and \
environment.Executable.ispresent('htseq-count') and \
environment.Executable.ispresent('R') and \
environment.R('R').packageversion_or_none('edgeR') is not None),
'bowtie2 and/or htseq-count is not in the PATH')
def test_RecipeSimpleIncrementalComplete(self):
def runtasks(torun):
# run the tasks
for task in torun:
# run only if not done
if task.info[1] != hortator._TASK_DONE:
task.execute()
self._recipesimpleincremental(runtasks)
@unittest.skipIf(not (environment.Executable.ispresent('bowtie2-build') and \
environment.Executable.ispresent('htseq-count') and \
environment.Executable.ispresent('R') and \
environment.R('R').packageversion_or_none('edgeR') is not None),
'bowtie2, htseq-count, R (with package "edgeR") must be in the PATH')
def test_RecipeSimpleIncrementalCompleteNoRun(self):
def runtasks(torun):
# do nothing
pass
self._recipesimpleincremental(runtasks)
@unittest.skipIf(not (environment.Executable.ispresent('bowtie2-build') and \
environment.Executable.ispresent('htseq-count') and \
environment.Executable.ispresent('R') and \
environment.R('R').packageversion_or_none('edgeR') is not None),
'bowtie2, htseq-count, R (with package "edgeR") must be in the PATH')
def test_RecipeSimple(self):
project = self.project
env = self.env
nsamples = self.nsamples
samplereads = self.samplereads
sampleinfo_fh = self.sampleinfo_fh
reference_fn = self.reference_fn
referenceannotation = self.referenceannotation
PHAGEFASTA = self._PHAGEFASTA
PHAGEGFF = self._PHAGEGFF
# -- recipesimple-test-begin
# steps used
bowtie2index = env.activities.INDEX.bowtie2build
bowtie2align = env.activities.ALIGN.bowtie2
htseqcount = env.activities.QUANTIFY.htseqcount
merge = env.activities.UTILITY.columnmerger
edger = env.activities.DIFFEXP.edger
from railroadtracks import easy
# sequence of tasks to run
torun = list()
# index for alignment
Assets = bowtie2index.Assets
assets = Assets(Assets.Source(rnaseq.FASTAFile(reference_fn)),
Assets.Target.createundefined())
task_index = project.add_task(bowtie2index, assets)
torun.append(task_index)
# process all samples
sample_counts = list()
for read1_fh, read2_fh in samplereads:
# align
Assets = bowtie2align.Assets
assets = Assets(Assets.Source(task_index.call.assets.target.indexfilepattern,
rnaseq.FASTQPossiblyGzipCompressed(read1_fh.name),
rnaseq.FASTQPossiblyGzipCompressed(read2_fh.name)),
Assets.Target.createundefined())
task_align = project.add_task(bowtie2align, assets)
torun.append(task_align)
# quantify
# (non-default parameters to fit our demo GFF)
params = rnaseq.HTSeqCount._noexons_parameters
Assets = htseqcount.Assets
assets = Assets(Assets.Source(task_align.call.assets.target.alignment,
rnaseq.GFFFile(referenceannotation)),
Assets.Target.createundefined())
task_quantify = project.add_task(htseqcount,
assets,
parameters=params)
torun.append(task_quantify)
# keep a pointer to the counts,
# as we will use them in the merge step
sample_counts.append(task_quantify.call.assets)
# merge the sample data into a table
# (so differential expression can be computed)
Assets = merge.Assets
counts = tuple(x.target.counts for x in sample_counts)
assets = Assets(Assets.Source(rnaseq.CSVFileSequence(counts)),
merge.Assets.Target.createundefined())
task_merge = project.add_task(merge,
assets,
parameters=("0","1"))
torun.append(task_merge)
# differential expression with edgeR
Assets = edger.Assets
assets = Assets(Assets.Source(task_merge.call.assets.target.counts,
rnaseq.CSVFile(sampleinfo_fh.name)),
Assets.Target.createundefined())
task_de = project.add_task(edger,
assets)
# run the tasks
for task in torun:
# run only if not done
if task.info[1] != hortator._TASK_DONE:
task.execute()
# get results
final_storedentities = project.get_targetsofactivity(rnaseq.ACTIVITY.DIFFEXP)
# get the step that created the results files
final_steps = list()
for stored_entity in final_storedentities:
final_steps.append(project.persistent_graph.get_parenttask_of_storedentity(stored_entity))
# -- recipesimple-test-end
self.assertEqual(1, len(final_storedentities))
self.assertEqual(core.File.__name__, final_storedentities[0].clsname)
self.assertEqual('railroadtracks.model.diffexp.EdgeR', final_steps[0].clsname)
# FIXME: not yet implemented
# now that we have all steps, we "only" have to run them
#steps = todo.stepcrawler()
#for s in steps:
# print('%s' % (s.unifiedname))
# s.run()
@unittest.skipIf(not (environment.Executable.ispresent('bowtie2-build') and \
environment.Executable.ispresent('bowtie-build') and \
environment.Executable.ispresent('STAR') and \
environment.Executable.ispresent('R') and \
environment.R('R').packageversion_or_none('edgeR') is not None and \
environment.R('R').packageversion_or_none('DESeq') is not None and \
environment.R('R').packageversion_or_none('DESeq2') is not None and \
environment.R('R').packageversion_or_none('limma') is not None),
'bowtie2, bowtie, STAR, TopHat2, and R (with packages "edgeR", "DESeq", "DESeq2", "limma") must be in the PATH')
def test_RecipeLoop(self):
project = self.project
env = self.env
nsamples = self.nsamples
samplereads = self.samplereads
sampleinfo_fh = self.sampleinfo_fh
reference_fn = self.reference_fn
referenceannotation = self.referenceannotation
PHAGEFASTA = self._PHAGEFASTA
PHAGEGFF = self._PHAGEGFF
# -- recipeloop-test-begin
from railroadtracks import easy
torun = list()
# bowtie
bowtie1index = env.activities.INDEX.bowtiebuild
bowtie1align = env.activities.ALIGN.bowtie
Assets = bowtie1index.Assets
fa_file = rnaseq.FASTAFile(reference_fn)
task_index_bowtie1 = project.add_task(bowtie1index,
Assets(Assets.Source(fa_file),
None))
torun.append(task_index_bowtie1)
# bowtie2
bowtie2index = env.activities.INDEX.bowtie2build
bowtie2align = env.activities.ALIGN.bowtie2
Assets = bowtie2index.Assets
fa_file = rnaseq.FASTAFile(reference_fn)
task_index_bowtie2 = project.add_task(bowtie2index,
Assets(Assets.Source(fa_file),
None))
torun.append(task_index_bowtie2)
# STAR
starindex = env.activities.INDEX.starindex
staralign = env.activities.ALIGN.staralign
Assets = starindex.Assets
fa_file = rnaseq.FASTAFile(reference_fn)
task_index_star = project.add_task(starindex,
Assets(Assets.Source(fa_file),
None))
torun.append(task_index_star)
# TopHat2
# (index from bowtie2 used)
#tophat2 = env.activities.ALIGN.tophat2
# featureCount
featurecount = env.activities.QUANTIFY.featurecount
# Merge columns (obtained from counting)
merge = env.activities.UTILITY.columnmerger
# EdgeR, DESeq, DESeq2, and LIMMA voom
edger = env.activities.DIFFEXP.edger
deseq = env.activities.DIFFEXP.deseq
deseq2 = env.activities.DIFFEXP.deseq2
voom = env.activities.DIFFEXP.limmavoom
# Now explore the different alignment presets in bowtie2, and vanilla star
from itertools import cycle
from collections import namedtuple
Options = namedtuple('Options', 'aligner assets_index parameters')
# Try various presets for bowtie2
bowtie2_parameters = (('--very-fast', ), ('--fast', ),
('--sensitive', ), ('--very-sensitive', ))
options = [Options(*x) for x in zip(cycle((bowtie2align,)),
cycle((task_index_bowtie2.call.assets.target,)),
bowtie2_parameters)]
# add bowtie
options.append(Options(bowtie1align, task_index_bowtie1.call.assets.target, tuple()))
# add STAR (vanilla, no specific options beside the size of index k-mers)
options.append(Options(staralign,
task_index_star.call.assets.target,
('--genomeChrBinNbits', '12')))
# add TopHat2
#options.append(Options(tophat2, task_index_bowtie2.call.assets.target, tuple()))
# loop over the options
for option in options:
sample_counts = list()
# loop over the samples
for sample_i in range(nsamples):
read1_fh, read2_fh = samplereads[sample_i]
# align
Assets = option.aligner.Assets
assets = Assets(Assets.Source(option.assets_index.indexfilepattern,
rnaseq.FASTQPossiblyGzipCompressed(read1_fh.name),
rnaseq.FASTQPossiblyGzipCompressed(read2_fh.name)),
Assets.Target.createundefined())
task_align = project.add_task(option.aligner,
assets,
parameters=option.parameters)
torun.append(task_align)
# quantify
# (non-default parameters to fit our demo GFF)
Assets = featurecount.Assets
assets = Assets(Assets.Source(task_align.call.assets.target.alignment,
rnaseq.GFFFile(referenceannotation)),
Assets.Target.createundefined())
task_quantify = project.add_task(featurecount,
assets,
parameters = ('--gtf-featuretype', 'CDS',
'--gtf-attrtype', 'ID'))
torun.append(task_quantify)
# keep a pointer to the counts, as we will use it in the merge step
sample_counts.append(task_quantify.call.assets)
# merge the sample data into a table (so differential expression can be computed)
Assets = merge.Assets
source = Assets.Source(rnaseq.CSVFileSequence(tuple(x.target.counts\
for x in sample_counts)))
assets_merge = Assets(source,
Assets.Target.createundefined())
task_merge = project.add_task(merge,
assets_merge,
parameters=("0","1"))
torun.append(task_merge)
# differential expression with edgeR, deseq2, and voom
# (deseq is too whimsical for tests)
for diffexp, params in ((edger, ()),
(deseq, ('--dispersion-fittype=local', )),
(deseq2, ()),
(voom, ())):
Assets = diffexp.Assets
assets = Assets(Assets.Source(task_merge.call.assets.target.counts,
core.File(sampleinfo_fh.name)),
Assets.Target.createundefined())
task_de = project.add_task(diffexp,assets)
torun.append(task_de)
# run the tasks
# (this is an integration test rather than a unit test - the
# 3rd-party tools are often brittle and we want to keep the noise level down)
env_log_level = environment.logger.level
environment.logger.level = logging.ERROR
try:
for task in torun:
if task.info[1] != hortator._TASK_DONE:
try:
task.execute()
status = easy.hortator._TASK_DONE
except:
status = easy.hortator._TASK_FAILED
project.persistent_graph.step_concrete_state(hortator.DbID(task.task_id, False),
easy.hortator._TASK_STATUS_LIST[status])
finally:
environment.logger.level = env_log_level
# -- recipeloop-test-end
if __name__ == '__main__':
unittest.main()
|
apache-2.0
| 5,608,266,947,258,619,000
| 44.304878
| 133
| 0.55051
| false
| 4.339953
| true
| false
| false
|
smurn/twistit
|
twistit/test_testing.py
|
1
|
1640
|
# Copyright (C) 2015 Stefan C. Mueller
import unittest
from twisted.internet import defer
import twistit
class TestExtract(unittest.TestCase):
def test_success(self):
d = defer.succeed(42)
self.assertEqual(42, twistit.extract(d))
def test_fail(self):
d = defer.fail(ValueError())
self.assertRaises(ValueError, twistit.extract, d)
def test_not_called(self):
d = defer.Deferred()
self.assertRaises(twistit.NotCalledError, twistit.extract, d)
class TestExtractFailure(unittest.TestCase):
def test_success(self):
d = defer.succeed(42)
self.assertRaises(ValueError, twistit.extract_failure, d)
def test_fail(self):
d = defer.fail(ValueError())
f = twistit.extract_failure(d)
self.assertTrue(f.check(ValueError))
def test_not_called(self):
d = defer.Deferred()
self.assertRaises(twistit.NotCalledError, twistit.extract_failure, d)
class TestHasValue(unittest.TestCase):
def test_success(self):
d = defer.succeed(None)
self.assertTrue(twistit.has_result(d))
def test_fail(self):
d = defer.fail(ValueError())
self.assertTrue(twistit.has_result(d))
d.addErrback(lambda _:None) # avoid stderr output during test.
def test_notcalled(self):
d = defer.Deferred()
self.assertFalse(twistit.has_result(d))
def test_paused(self):
d = defer.succeed(None)
d.addCallback(lambda _:defer.Deferred())
self.assertFalse(twistit.has_result(d))
|
mit
| -6,057,147,474,698,230,000
| 28.836364
| 77
| 0.622561
| false
| 3.787529
| true
| false
| false
|
coinbase/coinbase-python
|
coinbase/wallet/auth.py
|
1
|
1718
|
# coding: utf-8
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import hashlib
import hmac
from requests.utils import to_native_string
import time
from requests.auth import AuthBase
class HMACAuth(AuthBase):
def __init__(self, api_key, api_secret, api_version):
self.api_key = api_key
self.api_secret = api_secret
self.api_version = api_version
def __call__(self, request):
timestamp = str(int(time.time()))
message = timestamp + request.method + request.path_url + (request.body or '')
secret = self.api_secret
if not isinstance(message, bytes):
message = message.encode()
if not isinstance(secret, bytes):
secret = secret.encode()
signature = hmac.new(secret, message, hashlib.sha256).hexdigest()
request.headers.update({
to_native_string('CB-VERSION'): self.api_version,
to_native_string('CB-ACCESS-KEY'): self.api_key,
to_native_string('CB-ACCESS-SIGN'): signature,
to_native_string('CB-ACCESS-TIMESTAMP'): timestamp,
})
return request
class OAuth2Auth(AuthBase):
def __init__(self, access_token_getter, api_version):
self.access_token_getter = access_token_getter
self.api_version = api_version
def __call__(self, request):
access_token = self.access_token_getter()
request.headers.update({
to_native_string('CB-VERSION'): self.api_version,
to_native_string('Authorization'): to_native_string('Bearer {}'.format(access_token)),
})
return request
|
apache-2.0
| -6,887,145,672,861,208,000
| 32.038462
| 98
| 0.633295
| false
| 3.976852
| false
| false
| false
|
aerospike/aerospike-client-python
|
examples/client/remove_bin.py
|
1
|
4429
|
# -*- coding: utf-8 -*-
##########################################################################
# Copyright 2013-2021 Aerospike, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##########################################################################
from __future__ import print_function
import aerospike
import sys
from optparse import OptionParser
##########################################################################
# Options Parsing
##########################################################################
usage = "usage: %prog [options] key bin_names"
optparser = OptionParser(usage=usage, add_help_option=False)
optparser.add_option(
"--help", dest="help", action="store_true",
help="Displays this message.")
optparser.add_option(
"-U", "--username", dest="username", type="string", metavar="<USERNAME>",
help="Username to connect to database.")
optparser.add_option(
"-P", "--password", dest="password", type="string", metavar="<PASSWORD>",
help="Password to connect to database.")
optparser.add_option(
"-h", "--host", dest="host", type="string", default="127.0.0.1", metavar="<ADDRESS>",
help="Address of Aerospike server.")
optparser.add_option(
"-p", "--port", dest="port", type="int", default=3000, metavar="<PORT>",
help="Port of the Aerospike server.")
optparser.add_option(
"-n", "--namespace", dest="namespace", type="string", default="test", metavar="<NS>",
help="Port of the Aerospike server.")
optparser.add_option(
"-s", "--set", dest="set", type="string", default="demo", metavar="<SET>",
help="Port of the Aerospike server.")
(options, args) = optparser.parse_args()
if options.help:
optparser.print_help()
print()
sys.exit(1)
if len(args) < 2:
optparser.print_help()
print()
sys.exit(1)
##########################################################################
# Client Configuration
##########################################################################
config = {
'hosts': [(options.host, options.port)]
}
##########################################################################
# Application
##########################################################################
exitCode = 0
try:
# ----------------------------------------------------------------------------
# Connect to Cluster
# ----------------------------------------------------------------------------
client = aerospike.client(config).connect(
options.username, options.password)
# ----------------------------------------------------------------------------
# Perform Operation
# ----------------------------------------------------------------------------
try:
namespace = options.namespace if options.namespace and options.namespace != 'None' else None
set = options.set if options.set and options.set != 'None' else None
pk = args.pop(0)
bin_names = args
status = client.remove_bin((namespace, set, pk), bin_names)
print("Status of bin removal is: %d" % (status))
print("OK, bins removed from the record at", (namespace, set, pk))
except Exception as exception:
if exception.code == 602:
print("error: Record not found")
else:
print("error: {0}".format(
(exception.code, exception.msg, file, exception.line)), file=sys.stderr)
rc = 1
# ----------------------------------------------------------------------------
# Close Connection to Cluster
# ----------------------------------------------------------------------------
client.close()
except Exception as eargs:
print("error: {0}".format(eargs), file=sys.stderr)
exitCode = 3
##########################################################################
# Exit
##########################################################################
sys.exit(exitCode)
|
apache-2.0
| 4,883,970,964,990,964,000
| 32.300752
| 100
| 0.470761
| false
| 4.883131
| false
| false
| false
|
nigelb/gdata-utils
|
gdata_utils/fs/__init__.py
|
1
|
5204
|
# Helper utils for gdata.
#
# Copyright (C) 2012 NigelB
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from StringIO import StringIO
import os
import gdata.media, gdata.client, gdata.docs.data
from gdata_utils.fs.constants import *
from simpleui.utils import UserConfig
class GoogleDocs:
def __init__(self, client, cache_dir):
self.client = client
self.config = UserConfig(dir_name=cache_dir, config_file="cache", def_config_callback=lambda x:{})
self.cache_dir = cache_dir
if not self.config.read_config():
self.config.initialize_dir(None)
def getFolders(self):
folders = self.get_list('/feeds/default/private/full/-/folder')
if not folders.entry:
return None
return [Folder(self, x) for x in folders.entry]
def getFolder(self, descriptor, etag=None):
return Folder(self, self.client.GetResourceById(descriptor[id], etag=etag))
def __cached_entry(self, id):
return os.path.join(self.cache_dir, id)
def get_list(self, url):
feed = self.client.GetResources(uri=url)
if not feed.entry:
return None
if feed.GetNextLink():
feed.entry += self.get_list(feed.GetNextLink().href).entry
return feed
def get_cache_descriptor(self, id):
if self.config.has_key(id): self.config[id]
return None
def open_cached_file(self, id, **kwargs):
return open(self.__cached_entry(id), **kwargs)
def download(self, id, extra_params=None):
item_etag = None
if self.config.has_key(id):
item_etag = self.config[id][etag]
entry = self.client.GetResourceById(id, etag=item_etag)
self.client.DownloadResource(entry, self.__cached_entry(id), extra_params=extra_params)
self.config[id] = create_descriptor(entry)
self.config.write_config()
def create(self, title, folder_entry, mime_type="text/plain"):
ms = gdata.data.MediaSource(file_handle=StringIO(" "), content_type=mime_type, content_length=1)
entry = gdata.docs.data.Resource(type='file', title=title)
return self.client.CreateResource(entry, media=ms, collection=folder_entry)
def write(self, entry, stream, length, mime_type="text/plain"):
ms = gdata.data.MediaSource(file_handle=stream, content_type=mime_type, content_length=length)
self.client.UpdateResource(entry, media=ms)
def create_descriptor(entry):
return{
title: entry.title.text.encode('UTF-8'),
etag: entry.etag,
id: entry.resource_id.text,
mime: entry.content.type,
}
class GD:
def title(self):
return self.entry.title.text.encode('UTF-8')
def getID(self):
return self.entry.resource_id.text
def createDescriptor(self):
return create_descriptor(self.entry)
def content_type(self):
return self.entry.content.type
class Folder(GD):
def __init__(self, fs, entry):
self.fs = fs
self.entry = entry
def list(self):
feed = self.fs.get_list("%s/%s" % (self.entry.GetSelfLink().href, "contents"))
toRet = []
if feed is None: return toRet
for item in feed.entry:
for category in item.category:
if category.term == folder_type:
toRet.append(Folder(self.fs, item))
elif category.term == file_type:
toRet.append(File(self.fs, item))
return toRet
def __repr__(self):
return self.title()
def create_file(self, name, mime_type="text/plain"):
return File(self.fs, self.fs.create(name, folder_entry=self.entry, mime_type=mime_type))
def get_file(self, name):
for itm in self.list():
if itm.__class__ == File and itm.title() == name:
try:
itm.download()
except gdata.client.NotModified, ne:
pass
return itm
return None
class File(GD):
def __init__(self, fs, entry):
self.fs = fs
self.entry = entry
def getID(self):
return self.entry.resource_id.text
def open(self, **kwargs):
""" Opens the cached contents of this file. **kwargs is passed to the open function."""
return self.fs.open_cached_file(self.getID(), **kwargs)
def write(self, stream, length, mime_type="text/plain"):
self.fs.write(self.entry, stream, length, mime_type=mime_type)
def download(self, extra_params = None):
self.fs.download(self.getID(), extra_params=extra_params)
|
gpl-3.0
| -8,418,994,547,592,379,000
| 33.236842
| 106
| 0.63259
| false
| 3.733142
| true
| false
| false
|
saullocastro/pyNastran
|
pyNastran/utils/__init__.py
|
1
|
9338
|
# -*- coding: utf-8 -*-
from __future__ import print_function
from types import MethodType
import os
import io
import sys
from codecs import open as codec_open
from itertools import count
from six import PY2, string_types, iteritems, StringIO
import numpy as np
if PY2:
integer_types = (int, long, np.int32, np.int64)
integer_float_types = (int, long, np.int32, np.int64, float)
else:
integer_types = (int, np.int32, np.int64)
integer_float_types = (int, np.int32, np.int64, float)
def ipython_info():
"""determines if iPython/Jupyter notebook is running"""
ip = False
if 'ipykernel' in sys.modules:
ip = 'notebook'
elif 'Ipython' in sys.modules:
ip = 'terminal'
return ip
def is_file_obj(filename):
"""does this object behave like a file object?"""
#if not (hasattr(out_filename, 'read') and hasattr(out_filename, 'write')) or
# isinstance(out_filename, file) or isinstance(out_filename, StringIO):
return ((hasattr(filename, 'read') and hasattr(filename, 'write'))
or isinstance(filename, file)
or isinstance(filename, StringIO))
def b(string):
"""reimplementation of six.b(...) to work in Python 2"""
return string.encode('latin-1')
def merge_dicts(dict_list, strict=True):
"""merges two or more dictionaries"""
assert isinstance(dict_list, list), type(dict_list)
dict_out = {}
for adict in dict_list:
assert isinstance(adict, dict), adict
for key, value in iteritems(adict):
if key not in dict_out:
dict_out[key] = value
elif strict:
raise RuntimeError('key=%r exists in multiple dictionaries' % key)
else:
print('key=%r is dropped?' % key)
return dict_out
def is_binary_file(filename):
"""
Return true if the given filename is binary.
Parameters
----------
filename : str
the filename to test
Returns
-------
binary_flag : bool
True if filename is a binary file (contains null byte)
and False otherwise.
:raises: IOError if the file cannot be opened.
Based on the idea (.. seealso:: http://bytes.com/topic/python/answers/21222-determine-file-type-binary-text)
that file is binary if it contains null.
.. warning:: this may not work for unicode."""
assert isinstance(filename, string_types), '%r is not a valid filename' % filename
assert os.path.exists(filename), '%r does not exist\n%s' % (filename, print_bad_path(filename))
with io.open(filename, mode='rb') as fil:
for chunk in iter(lambda: fil.read(1024), bytes()):
if b'\0' in chunk: # found null byte
return True
return False
def print_bad_path(path):
"""
Prints information about the existence (access possibility) of the parts
of the given path. Useful for debugging when the path to a given file
is wrong.
Parameters
----------
path : str
path to check
Returns
-------
msg : str
string with informations whether access to parts of the path
is possible
"""
#raw_path = path
if len(path) > 255:
path = os.path.abspath(_filename(path))
npath = os.path.dirname(path)
res = [path]
while path != npath:
path, npath = npath, os.path.dirname(npath)
res.append(path)
msg = {True: 'passed', False: 'failed'}
return '\n'.join(['%s: %s' % (msg[os.path.exists(i)], i[4:]) for i in res])
else:
path = os.path.abspath(path)
npath = os.path.dirname(path)
res = [path]
while path != npath:
path, npath = npath, os.path.dirname(npath)
res.append(path)
msg = {True: 'passed', False: 'failed'}
return '\n'.join(['%s: %s' % (msg[os.path.exists(i)], i) for i in res])
def _filename(filename):
"""
Prepends some magic data to a filename in order to have long filenames.
.. warning:: This might be Windows specific.
"""
if len(filename) > 255:
return '\\\\?\\' + filename
return filename
def __object_attr(obj, mode, keys_to_skip, attr_type):
"""list object attributes of a given type"""
#print('keys_to_skip=%s' % keys_to_skip)
keys_to_skip = [] if keys_to_skip is None else keys_to_skip
test = {
'public': lambda k: (not k.startswith('_') and k not in keys_to_skip),
'private': lambda k: (k.startswith('_') and not k.startswith('__') and k not in keys_to_skip),
'both': lambda k: (not k.startswith('__') and k not in keys_to_skip),
'all': lambda k: (k not in keys_to_skip),
}
if not mode in test:
print('Wrong mode! Accepted modes: public, private, both, all.')
return None
check = test[mode]
out = []
for k in dir(obj):
if k in keys_to_skip:
continue
if check(k) and attr_type(getattr(obj, k)):
out.append(k)
out.sort()
return out
#return sorted([k for k in dir(obj) if (check(k) and
# attr_type(getattr(obj, k)))])
def object_methods(obj, mode='public', keys_to_skip=None):
"""
List the names of methods of a class as strings. Returns public methods
as default.
Parameters
----------
obj : instance
the object for checking
mode : str
defines what kind of methods will be listed
* "public" - names that do not begin with underscore
* "private" - names that begin with single underscore
* "both" - private and public
* "all" - all methods that are defined for the object
keys_to_skip : List[str]; default=None -> []
names to not consider to avoid deprecation warnings
Returns
-------
method : List[str]
sorted list of the names of methods of a given type
or None if the mode is wrong
"""
return __object_attr(obj, mode, keys_to_skip, lambda x: isinstance(x, MethodType))
def object_attributes(obj, mode='public', keys_to_skip=None):
"""
List the names of attributes of a class as strings. Returns public
attributes as default.
Parameters
----------
obj : instance
the object for checking
mode : str
defines what kind of attributes will be listed
* 'public' - names that do not begin with underscore
* 'private' - names that begin with single underscore
* 'both' - private and public
* 'all' - all attributes that are defined for the object
keys_to_skip : List[str]; default=None -> []
names to not consider to avoid deprecation warnings
Returns
-------
attribute_names : List[str]
sorted list of the names of attributes of a given type or None
if the mode is wrong
"""
return __object_attr(obj, mode, keys_to_skip, lambda x: not isinstance(x, MethodType))
#def write_object_attributes(name, obj, nspaces=0, nbase=0, is_class=True, debug=False):
#"""
#Writes a series of nested objects
#"""
#spaces = (nbase + nspaces) * ' '
#msg = spaces
#xml = spaces
#if is_class:
#equals = '='
#else:
#equals = ':'
#if debug:
#print('attr=%s equals=%r' % (name, equals))
## name
#if isinstance(obj, dict):
#if nspaces == 0:
#msg += '%s %s ' % (name, equals)
#else:
#if isinstance(name, tuple):
#msg += '%s %s ' % (str(name), equals)
#else:
#msg += "'%s' %s " % (name, equals)
#elif isinstance(name, string_types):
#if is_class:
#key = '%s' % name
#else:
#key = "'%s'" % name
## elif isinstance(name, unicode):
## if is_class:
## key = u'%s' % name
## else:
## key = "u'%s'" % name
#elif isinstance(name, (int, float, tuple)) or name is None:
#key = '%s' % str(name)
#else:
#raise RuntimeError('key=%s is not a string. Type=%s' % (name, type(name)))
#if debug:
#print('name=%s type=%s' % (name, type(obj)))
## write the object
#if isinstance(obj, (int, float)) or obj is None:
#xml += '<name=%s value=%s type=%s>' % (name, obj, type(obj))
#msg += '%s %s %s,\n' % (key, equals, write_value(obj, nspaces, nbase, is_class))
#elif is_string(obj):
#msg += '%s %s %s,\n' % (key, equals, write_value(obj, nspaces, nbase, is_class))
#elif isinstance(obj, dict):
#msg += write_dict(obj, nspaces, nbase, is_class) + ',\n'
#elif isinstance(obj, (tuple, list)):
#msg += '%s %s %s,\n' % (key, equals, write_value(obj, nspaces, nbase, is_class))
#elif isinstance(obj, np.ndarray):
#starter = '%s%s %s' % (nspaces, key, equals)
#msg += '%s %s %s,\n' % (key, equals, write_array(obj, nspaces + 6 + len(starter)))
#else: # generic class
#objectType = obj.__class__.__name__
##raise RuntimeError('objectType=%s is not supported' % objectType)
#msg += '%s %s ' % (key, equals)
#msg += write_class(name, obj, nspaces, nbase) + ',\n' # comma for class
#if nspaces == 0:
#msg = msg[:-2]
#if debug:
#print('%r' % msg)
#return msg
|
lgpl-3.0
| -8,305,409,958,395,564,000
| 31.996466
| 112
| 0.57507
| false
| 3.641966
| false
| false
| false
|
SWLBot/electronic-blackboard
|
board.py
|
1
|
2049
|
import tornado.ioloop
import tornado.web
import tornado.httpserver
from tornado.options import define, options, parse_command_line
import os.path
from broadcast_api import load_schedule
import argparse
import config.settings
define('port',default=4000,help='run the server on the given port',type=int)
#define('log_file_prefix',default='board.log',help='log file name',type=str)
class BaseHandler(tornado.web.RequestHandler):
def get_current_user(self):
return self.get_secure_cookie("user")
class MainHandler(BaseHandler):
def get(self):
self.set_cookie("_xsrf",self.xsrf_token)
self.render("board.html")
class Get_DB_Data(BaseHandler):
def get(self):
display_content = load_schedule()
if display_content['result'] == 'fail':
pass
elif display_content['display_type'] == 'image':
self.render('show-image.html',img_info=display_content)
elif display_content['display_type'] == 'text':
from tornado.template import Loader
loader = Loader('template')
print(loader.load('show-text.html').generate(text_info=display_content))
self.render('show-text.html', text_info=display_content)
elif display_content['display_type'] == 'news':
self.render('show-news.html', news_info=display_content)
def main():
base_dir = os.path.dirname(__file__)
settings = {
"cookie_secret": config.settings.board['cookie_secret'],
"template_path":os.path.join(base_dir,"template"),
"static_path":os.path.join(base_dir,"static"),
"thumbnail_path":os.path.join(base_dir,"thumbnail"),
"debug":True,
}
application = tornado.web.Application([
tornado.web.url(r"/",MainHandler,name="main"),
tornado.web.url(r"/db_schedule",Get_DB_Data),
],**settings)
http_server = tornado.httpserver.HTTPServer(application)
http_server.listen(options.port)
tornado.ioloop.IOLoop.instance().start()
if __name__ == "__main__":
main()
|
apache-2.0
| -8,772,363,605,874,281,000
| 33.728814
| 84
| 0.65349
| false
| 3.691892
| false
| false
| false
|
denys-duchier/django-wiki-py3
|
wiki/core/plugins/registry.py
|
1
|
1646
|
# -*- coding: utf-8 -*-
from django.utils.importlib import import_module
_cache = {}
_settings_forms = []
_markdown_extensions = []
_article_tabs = []
_sidebar = []
def register(PluginClass):
"""
Register a plugin class. This function will call back your plugin's
constructor.
"""
if PluginClass in list(_cache.keys()):
raise Exception("Plugin class already registered")
plugin = PluginClass()
_cache[PluginClass] = plugin
settings_form = getattr(PluginClass, 'settings_form', None)
if settings_form:
if isinstance(settings_form, str):
klassname = settings_form.split(".")[-1]
modulename = ".".join(settings_form.split(".")[:-1])
form_module = import_module(modulename)
settings_form = getattr(form_module, klassname)
_settings_forms.append(settings_form)
if getattr(PluginClass, 'article_tab', None):
_article_tabs.append(plugin)
if getattr(PluginClass, 'sidebar', None):
_sidebar.append(plugin)
_markdown_extensions.extend(getattr(PluginClass, 'markdown_extensions', []))
def get_plugins():
"""Get loaded plugins - do not call before all plugins are loaded."""
return _cache
def get_markdown_extensions():
"""Get all markdown extension classes from plugins"""
return _markdown_extensions
def get_article_tabs():
"""Get all article tab dictionaries from plugins"""
return _article_tabs
def get_sidebar():
"""Returns plugin classes that should connect to the sidebar"""
return _sidebar
def get_settings_forms():
return _settings_forms
|
gpl-3.0
| 4,763,319,060,811,272,000
| 28.945455
| 88
| 0.647631
| false
| 4.209719
| false
| false
| false
|
licon02/pnn
|
pruebas/prueba04_b.py
|
1
|
2843
|
#!/usr/bin/python
#prueba de red LSTM
#genera grafica de las senales de entrada y la respuesta de la redes
#entrada senoidal
from __future__ import division
import numpy as np
from pybrain.datasets import SequentialDataSet
from itertools import cycle
from pybrain.supervised import RPropMinusTrainer
from sys import stdout
from pybrain.tools.shortcuts import buildNetwork
from pybrain.structure.modules import LSTMLayer
import matplotlib.pyplot as plt
plt.close('all') #cierra figuras anteriores
#construct target signal:
T = 1 #periodo de la senal
Nyq = 20 #minimo 2 por el teorema de Nyquist
Ts = T/Nyq #periodo de muestreo
f = 1/T #frecuencia de la senal
fs = 1/Ts #frecuencia de periodo
A = 10 #amplitud
Tiempo = 5 #tiempo total de muestreo
#NN input signal:
t0 = np.arange(0,Tiempo,Ts) #genera un vector de n hasta N, con incrementos de i (n,N,i)
#valor en el instante t0
#np.sin(Wn*t0) Wn=2*pi*f t0=instante de tiempo
data = A*np.cos(2*np.pi*f*t0) #senal de entrada a la red
print 'numero de datos de entrenamiento %i'%len(data)
net = buildNetwork(1, 15, 1,hiddenclass=LSTMLayer, outputbias=False, recurrent=True)
ds = SequentialDataSet(1, 1)
for sample, next_sample in zip(data, cycle(data[1:])):
ds.addSample(sample, next_sample)
trainer = RPropMinusTrainer(net, dataset=ds)
train_errors = [] # save errors for plotting later
EPOCHS_PER_CYCLE = 5
CYCLES = 100
EPOCHS = EPOCHS_PER_CYCLE * CYCLES
for i in xrange(CYCLES):
trainer.trainEpochs(EPOCHS_PER_CYCLE)
train_errors.append(trainer.testOnData())
epoch = (i+1) * EPOCHS_PER_CYCLE
#print("\r epoch {}/{}".format(epoch, EPOCHS))
stdout.flush()
print "final error =", train_errors[-1]
y0 = []#muestra
y1 = []#red
y2 = []#objetivo
for sample, target in ds.getSequenceIterator(0):
y0.append(sample)
y1.append(net.activate(sample))
y2.append(target)
#print(" sample = %4.1f" % sample)
#print("predicted next sample = %4.1f" % net.activate(sample))
#print(" actual next sample = %4.1f" % target)
fsize=8
t0 = np.arange(0,len(data),1)
fig1 = plt.figure(1)
plt.plot(t0, y1, 'ro',label='original')
plt.plot(t0, y2, 'k',label='red')
plt.xlabel('Time',fontsize=fsize)
plt.ylabel('Amplitude',fontsize=fsize)
plt.grid()
plt.title('Target range = [0,%0.1f]'%len(data),fontsize=fsize)
plt.xlim(1.2*np.min(t0),1.2*np.max(t0))
plt.ylim(1.2*np.min(y1),1.2*np.max(y1))
fig1name = './prueba04_b_fig1.png'
print 'Saving Fig. 1 to:', fig1name
fig1.savefig(fig1name, bbox_inches='tight')
fig2 = plt.figure(2)
plt.plot(range(0, EPOCHS, EPOCHS_PER_CYCLE), train_errors)
plt.xlabel('epoch')
plt.ylabel('error')
fig2name = './prueba04_b_fig2.png'
print 'Saving Fig. 2 to:', fig2name
fig2.savefig(fig2name, bbox_inches='tight')
plt.show()
|
gpl-3.0
| 2,559,812,335,112,786,400
| 29.569892
| 90
| 0.687654
| false
| 2.593978
| false
| false
| false
|
google/verible
|
bazel/build-version.py
|
1
|
1343
|
#!/usr/bin/env python3
# Copyright 2020-2021 The Verible Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Invoke bazel with --workspace_status_command=bazel/build-version.py to get this invoked and populate bazel-out/volatile-status.txt
"""
import os
from subprocess import Popen, PIPE
def run(*cmd):
process = Popen(cmd, stdout=PIPE)
output, _ = process.communicate()
return output.strip().decode()
def main():
try:
date = run("git", "log", "-n1", "--date=short", "--format=%cd")
except:
date = ""
try:
version = run("git", "describe")
except:
version = ""
if not date:
date = os.environ["GIT_DATE"]
if not version:
version = os.environ["GIT_VERSION"]
print("GIT_DATE", '"{}"'.format(date))
print("GIT_DESCRIBE", '"{}"'.format(version))
if __name__ == "__main__":
main()
|
apache-2.0
| 656,127,483,831,813,100
| 24.826923
| 133
| 0.679821
| false
| 3.600536
| false
| false
| false
|
dimka665/vk
|
tests/conftest.py
|
1
|
1967
|
import requests
from pytest import fixture
from vk.session import APIBase
@fixture('session')
def v():
"""
Actual vk API version
"""
return '5.80'
class Attributable(object):
def set_attrs(self, attributes):
for attr_name, attr_value in attributes.items():
setattr(self, attr_name, attr_value)
class RequestData(Attributable):
def __init__(self, data):
self.set_attrs(data)
def __repr__(self):
return '<RequestData {}>'.format(self.__dict__)
class Request(Attributable):
def __init__(self, method, url, **kwargs):
self.method = method
self.url = url
self.data = RequestData(kwargs.pop('data', {}))
self.set_attrs(kwargs)
class Response(object):
def __init__(self, text='', status_code=200, url=None):
self.text = text
self.status_code = status_code
self.url = url
def raise_for_status(self):
if self.status_code != 200:
raise ValueError(self.status_code)
@fixture
def response_class():
return Response
class MockedSessionBase(requests.Session):
def __init__(self):
super(MockedSessionBase, self).__init__()
self.history = []
self.last_request = None
def request(self, method, url, **kwargs):
self.last_request = Request(method, url, **kwargs)
response = self.mocked_request(method, url, **kwargs)
if not response:
raise NotImplementedError
return response
@fixture
def session_class():
return MockedSessionBase
@fixture
def mock_requests_session(monkeypatch):
class MockedSession(MockedSessionBase):
def mocked_request(self, verb, url, **kwargs):
if verb == 'POST':
if url.startswith(APIBase.API_URL):
# method = url[len(vk.Session.API_URL):]
return Response()
monkeypatch.setattr('requests.Session', MockedSession)
|
mit
| 8,648,724,391,978,122,000
| 21.101124
| 61
| 0.608541
| false
| 3.926148
| false
| false
| false
|
WadeHsiao/B
|
3rd/yuv/libyuv/setup_links.py
|
1
|
17186
|
#!/usr/bin/env python
# Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
#
# Use of this source code is governed by a BSD-style license
# that can be found in the LICENSE file in the root of the source
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
"""Setup links to a Chromium checkout for WebRTC.
WebRTC standalone shares a lot of dependencies and build tools with Chromium.
To do this, many of the paths of a Chromium checkout is emulated by creating
symlinks to files and directories. This script handles the setup of symlinks to
achieve this.
It also handles cleanup of the legacy Subversion-based approach that was used
before Chrome switched over their master repo from Subversion to Git.
"""
import ctypes
import errno
import logging
import optparse
import os
import shelve
import shutil
import subprocess
import sys
import textwrap
DIRECTORIES = [
'build',
'buildtools',
'mojo', # TODO(kjellander): Remove, see webrtc:5629.
'native_client',
'net',
'testing',
'third_party/binutils',
'third_party/drmemory',
'third_party/instrumented_libraries',
'third_party/libjpeg',
'third_party/libjpeg_turbo',
'third_party/llvm-build',
'third_party/lss',
'third_party/yasm',
'third_party/WebKit', # TODO(kjellander): Remove, see webrtc:5629.
'tools/clang',
'tools/gn',
'tools/gyp',
'tools/memory',
'tools/python',
'tools/swarming_client',
'tools/valgrind',
'tools/vim',
'tools/win',
]
from sync_chromium import get_target_os_list
target_os = get_target_os_list()
if 'android' in target_os:
DIRECTORIES += [
'base',
'third_party/accessibility_test_framework',
'third_party/android_platform',
'third_party/android_tools',
'third_party/apache_velocity',
'third_party/appurify-python',
'third_party/ashmem',
'third_party/bouncycastle',
'third_party/catapult',
'third_party/closure_compiler',
'third_party/guava',
'third_party/hamcrest',
'third_party/icu',
'third_party/icu4j',
'third_party/ijar',
'third_party/intellij',
'third_party/jsr-305',
'third_party/junit',
'third_party/libxml',
'third_party/mockito',
'third_party/modp_b64',
'third_party/ow2_asm',
'third_party/protobuf',
'third_party/requests',
'third_party/robolectric',
'third_party/sqlite4java',
'tools/android',
'tools/grit',
]
if 'ios' in target_os:
DIRECTORIES.append('third_party/class-dump')
FILES = {
'tools/isolate_driver.py': None,
'third_party/BUILD.gn': None,
}
ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
CHROMIUM_CHECKOUT = os.path.join('chromium', 'src')
LINKS_DB = 'links'
# Version management to make future upgrades/downgrades easier to support.
SCHEMA_VERSION = 1
def query_yes_no(question, default=False):
"""Ask a yes/no question via raw_input() and return their answer.
Modified from http://stackoverflow.com/a/3041990.
"""
prompt = " [%s/%%s]: "
prompt = prompt % ('Y' if default is True else 'y')
prompt = prompt % ('N' if default is False else 'n')
if default is None:
default = 'INVALID'
while True:
sys.stdout.write(question + prompt)
choice = raw_input().lower()
if choice == '' and default != 'INVALID':
return default
if 'yes'.startswith(choice):
return True
elif 'no'.startswith(choice):
return False
print "Please respond with 'yes' or 'no' (or 'y' or 'n')."
# Actions
class Action(object):
def __init__(self, dangerous):
self.dangerous = dangerous
def announce(self, planning):
"""Log a description of this action.
Args:
planning - True iff we're in the planning stage, False if we're in the
doit stage.
"""
pass
def doit(self, links_db):
"""Execute the action, recording what we did to links_db, if necessary."""
pass
class Remove(Action):
def __init__(self, path, dangerous):
super(Remove, self).__init__(dangerous)
self._priority = 0
self._path = path
def announce(self, planning):
log = logging.warn
filesystem_type = 'file'
if not self.dangerous:
log = logging.info
filesystem_type = 'link'
if planning:
log('Planning to remove %s: %s', filesystem_type, self._path)
else:
log('Removing %s: %s', filesystem_type, self._path)
def doit(self, _):
os.remove(self._path)
class Rmtree(Action):
def __init__(self, path):
super(Rmtree, self).__init__(dangerous=True)
self._priority = 0
self._path = path
def announce(self, planning):
if planning:
logging.warn('Planning to remove directory: %s', self._path)
else:
logging.warn('Removing directory: %s', self._path)
def doit(self, _):
if sys.platform.startswith('win'):
# shutil.rmtree() doesn't work on Windows if any of the directories are
# read-only, which svn repositories are.
subprocess.check_call(['rd', '/q', '/s', self._path], shell=True)
else:
shutil.rmtree(self._path)
class Makedirs(Action):
def __init__(self, path):
super(Makedirs, self).__init__(dangerous=False)
self._priority = 1
self._path = path
def doit(self, _):
try:
os.makedirs(self._path)
except OSError as e:
if e.errno != errno.EEXIST:
raise
class Symlink(Action):
def __init__(self, source_path, link_path):
super(Symlink, self).__init__(dangerous=False)
self._priority = 2
self._source_path = source_path
self._link_path = link_path
def announce(self, planning):
if planning:
logging.info(
'Planning to create link from %s to %s', self._link_path,
self._source_path)
else:
logging.debug(
'Linking from %s to %s', self._link_path, self._source_path)
def doit(self, links_db):
# Files not in the root directory need relative path calculation.
# On Windows, use absolute paths instead since NTFS doesn't seem to support
# relative paths for symlinks.
if sys.platform.startswith('win'):
source_path = os.path.abspath(self._source_path)
else:
if os.path.dirname(self._link_path) != self._link_path:
source_path = os.path.relpath(self._source_path,
os.path.dirname(self._link_path))
os.symlink(source_path, os.path.abspath(self._link_path))
links_db[self._source_path] = self._link_path
class LinkError(IOError):
"""Failed to create a link."""
pass
# Handles symlink creation on the different platforms.
if sys.platform.startswith('win'):
def symlink(source_path, link_path):
flag = 1 if os.path.isdir(source_path) else 0
if not ctypes.windll.kernel32.CreateSymbolicLinkW(
unicode(link_path), unicode(source_path), flag):
raise OSError('Failed to create symlink to %s. Notice that only NTFS '
'version 5.0 and up has all the needed APIs for '
'creating symlinks.' % source_path)
os.symlink = symlink
class WebRTCLinkSetup(object):
def __init__(self, links_db, force=False, dry_run=False, prompt=False):
self._force = force
self._dry_run = dry_run
self._prompt = prompt
self._links_db = links_db
def CreateLinks(self, on_bot):
logging.debug('CreateLinks')
# First, make a plan of action
actions = []
for source_path, link_path in FILES.iteritems():
actions += self._ActionForPath(
source_path, link_path, check_fn=os.path.isfile, check_msg='files')
for source_dir in DIRECTORIES:
actions += self._ActionForPath(
source_dir, None, check_fn=os.path.isdir,
check_msg='directories')
if not on_bot and self._force:
# When making the manual switch from legacy SVN checkouts to the new
# Git-based Chromium DEPS, the .gclient_entries file that contains cached
# URLs for all DEPS entries must be removed to avoid future sync problems.
entries_file = os.path.join(os.path.dirname(ROOT_DIR), '.gclient_entries')
if os.path.exists(entries_file):
actions.append(Remove(entries_file, dangerous=True))
actions.sort()
if self._dry_run:
for action in actions:
action.announce(planning=True)
logging.info('Not doing anything because dry-run was specified.')
sys.exit(0)
if any(a.dangerous for a in actions):
logging.warn('Dangerous actions:')
for action in (a for a in actions if a.dangerous):
action.announce(planning=True)
print
if not self._force:
logging.error(textwrap.dedent("""\
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
A C T I O N R E Q I R E D
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
Because chromium/src is transitioning to Git (from SVN), we needed to
change the way that the WebRTC standalone checkout works. Instead of
individually syncing subdirectories of Chromium in SVN, we're now
syncing Chromium (and all of its DEPS, as defined by its own DEPS file),
into the `chromium/src` directory.
As such, all Chromium directories which are currently pulled by DEPS are
now replaced with a symlink into the full Chromium checkout.
To avoid disrupting developers, we've chosen to not delete your
directories forcibly, in case you have some work in progress in one of
them :).
ACTION REQUIRED:
Before running `gclient sync|runhooks` again, you must run:
%s%s --force
Which will replace all directories which now must be symlinks, after
prompting with a summary of the work-to-be-done.
"""), 'python ' if sys.platform.startswith('win') else '', sys.argv[0])
sys.exit(1)
elif self._prompt:
if not query_yes_no('Would you like to perform the above plan?'):
sys.exit(1)
for action in actions:
action.announce(planning=False)
action.doit(self._links_db)
if not on_bot and self._force:
logging.info('Completed!\n\nNow run `gclient sync|runhooks` again to '
'let the remaining hooks (that probably were interrupted) '
'execute.')
def CleanupLinks(self):
logging.debug('CleanupLinks')
for source, link_path in self._links_db.iteritems():
if source == 'SCHEMA_VERSION':
continue
if os.path.islink(link_path) or sys.platform.startswith('win'):
# os.path.islink() always returns false on Windows
# See http://bugs.python.org/issue13143.
logging.debug('Removing link to %s at %s', source, link_path)
if not self._dry_run:
if os.path.exists(link_path):
if sys.platform.startswith('win') and os.path.isdir(link_path):
subprocess.check_call(['rmdir', '/q', '/s', link_path],
shell=True)
else:
os.remove(link_path)
del self._links_db[source]
@staticmethod
def _ActionForPath(source_path, link_path=None, check_fn=None,
check_msg=None):
"""Create zero or more Actions to link to a file or directory.
This will be a symlink on POSIX platforms. On Windows this requires
that NTFS is version 5.0 or higher (Vista or newer).
Args:
source_path: Path relative to the Chromium checkout root.
For readability, the path may contain slashes, which will
automatically be converted to the right path delimiter on Windows.
link_path: The location for the link to create. If omitted it will be the
same path as source_path.
check_fn: A function returning true if the type of filesystem object is
correct for the attempted call. Otherwise an error message with
check_msg will be printed.
check_msg: String used to inform the user of an invalid attempt to create
a file.
Returns:
A list of Action objects.
"""
def fix_separators(path):
if sys.platform.startswith('win'):
return path.replace(os.altsep, os.sep)
else:
return path
assert check_fn
assert check_msg
link_path = link_path or source_path
link_path = fix_separators(link_path)
source_path = fix_separators(source_path)
source_path = os.path.join(CHROMIUM_CHECKOUT, source_path)
if os.path.exists(source_path) and not check_fn:
raise LinkError('_LinkChromiumPath can only be used to link to %s: '
'Tried to link to: %s' % (check_msg, source_path))
if not os.path.exists(source_path):
logging.debug('Silently ignoring missing source: %s. This is to avoid '
'errors on platform-specific dependencies.', source_path)
return []
actions = []
if os.path.exists(link_path) or os.path.islink(link_path):
if os.path.islink(link_path):
actions.append(Remove(link_path, dangerous=False))
elif os.path.isfile(link_path):
actions.append(Remove(link_path, dangerous=True))
elif os.path.isdir(link_path):
actions.append(Rmtree(link_path))
else:
raise LinkError('Don\'t know how to plan: %s' % link_path)
# Create parent directories to the target link if needed.
target_parent_dirs = os.path.dirname(link_path)
if (target_parent_dirs and
target_parent_dirs != link_path and
not os.path.exists(target_parent_dirs)):
actions.append(Makedirs(target_parent_dirs))
actions.append(Symlink(source_path, link_path))
return actions
def _initialize_database(filename):
links_database = shelve.open(filename)
# Wipe the database if this version of the script ends up looking at a
# newer (future) version of the links db, just to be sure.
version = links_database.get('SCHEMA_VERSION')
if version and version != SCHEMA_VERSION:
logging.info('Found database with schema version %s while this script only '
'supports %s. Wiping previous database contents.', version,
SCHEMA_VERSION)
links_database.clear()
links_database['SCHEMA_VERSION'] = SCHEMA_VERSION
return links_database
def main():
on_bot = os.environ.get('CHROME_HEADLESS') == '1'
parser = optparse.OptionParser()
parser.add_option('-d', '--dry-run', action='store_true', default=False,
help='Print what would be done, but don\'t perform any '
'operations. This will automatically set logging to '
'verbose.')
parser.add_option('-c', '--clean-only', action='store_true', default=False,
help='Only clean previously created links, don\'t create '
'new ones. This will automatically set logging to '
'verbose.')
parser.add_option('-f', '--force', action='store_true', default=on_bot,
help='Force link creation. CAUTION: This deletes existing '
'folders and files in the locations where links are '
'about to be created.')
parser.add_option('-n', '--no-prompt', action='store_false', dest='prompt',
default=(not on_bot),
help='Prompt if we\'re planning to do a dangerous action')
parser.add_option('-v', '--verbose', action='store_const',
const=logging.DEBUG, default=logging.INFO,
help='Print verbose output for debugging.')
options, _ = parser.parse_args()
if options.dry_run or options.force or options.clean_only:
options.verbose = logging.DEBUG
logging.basicConfig(format='%(message)s', level=options.verbose)
# Work from the root directory of the checkout.
script_dir = os.path.dirname(os.path.abspath(__file__))
os.chdir(script_dir)
if sys.platform.startswith('win'):
def is_admin():
try:
return os.getuid() == 0
except AttributeError:
return ctypes.windll.shell32.IsUserAnAdmin() != 0
if not is_admin():
logging.error('On Windows, you now need to have administrator '
'privileges for the shell running %s (or '
'`gclient sync|runhooks`).\nPlease start another command '
'prompt as Administrator and try again.', sys.argv[0])
return 1
if not os.path.exists(CHROMIUM_CHECKOUT):
logging.error('Cannot find a Chromium checkout at %s. Did you run "gclient '
'sync" before running this script?', CHROMIUM_CHECKOUT)
return 2
links_database = _initialize_database(LINKS_DB)
try:
symlink_creator = WebRTCLinkSetup(links_database, options.force,
options.dry_run, options.prompt)
symlink_creator.CleanupLinks()
if not options.clean_only:
symlink_creator.CreateLinks(on_bot)
except LinkError as e:
print >> sys.stderr, e.message
return 3
finally:
links_database.close()
return 0
if __name__ == '__main__':
sys.exit(main())
|
lgpl-3.0
| -4,101,265,656,489,156,600
| 32.897436
| 80
| 0.636216
| false
| 3.765557
| false
| false
| false
|
ProjectBabbler/ebird-api
|
src/ebird/api/statistics.py
|
1
|
2558
|
"""Functions for fetching basic statistics about observers and observations."""
from ebird.api.utils import call
from ebird.api.validation import (
clean_area,
clean_date,
clean_max_observers,
clean_rank,
clean_region,
)
TOP_100_URL = "https://ebird.org/ws2.0/product/top100/%s/%s"
TOTALS_URL = "https://ebird.org/ws2.0/product/stats/%s/%s"
def get_top_100(token, region, date, rank="spp", max_results=100):
"""
Get the observers who have seen the most species or submitted the
greatest number of checklists on a given date.
The maps to the end point in the eBird API 2.0,
https://documenter.getpostman.com/view/664302/S1ENwy59?version=latest#2d8d3f94-c4b0-42bd-9c8e-71edfa6347ba
:param token: the token needed to access the API.
:param region: the code for the region, eg. US-NV.
:param date: the date, since Jan 1st 1800.
:param rank: order results by species seen (spp) or checklists submitted (cl).
:param max_results: the maximum number of entries to return from
1 to 100. The default value is 100.
:return: the list of observers.
:raises ValueError: if any of the arguments fail the validation checks.
:raises URLError if there is an error with the connection to the
eBird site.
:raises HTTPError if the eBird API returns an error.
"""
url = TOP_100_URL % (clean_region(region), date.strftime("%Y/%m/%d"))
params = {
"maxObservers": clean_max_observers(max_results),
"rankedBy": clean_rank(rank),
}
headers = {
"X-eBirdApiToken": token,
}
return call(url, params, headers)
def get_totals(token, area, date):
"""
Get the number of contributors, checklists submitted and species seen
on a given date.
The maps to the end point in the eBird API 2.0,
https://documenter.getpostman.com/view/664302/S1ENwy59?version=latest#4416a7cc-623b-4340-ab01-80c599ede73e
:param token: the token needed to access the API.
:param area: the code for a country subnational1 , subnational2 region
or location
:param date: the date, since Jan 1st 1800.
:return: the totals for the given date
:raises ValueError: if any of the arguments fail the validation checks.
:raises URLError if there is an error with the connection to the
eBird site.
:raises HTTPError if the eBird API returns an error.
"""
url = TOTALS_URL % (clean_area(area), clean_date(date))
headers = {
"X-eBirdApiToken": token,
}
return call(url, {}, headers)
|
mit
| -4,960,243,422,118,333,000
| 27.422222
| 110
| 0.681392
| false
| 3.397078
| false
| false
| false
|
diggcoin/diggcoin
|
contrib/linearize/linearize-hashes.py
|
1
|
3037
|
#!/usr/bin/python
#
# linearize-hashes.py: List blocks in a linear, no-fork version of the chain.
#
# Copyright (c) 2013-2014 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
from __future__ import print_function
import json
import struct
import re
import base64
import httplib
import sys
settings = {}
class BitcoinRPC:
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def execute(self, obj):
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print("JSON-RPC: no response", file=sys.stderr)
return None
body = resp.read()
resp_obj = json.loads(body)
return resp_obj
@staticmethod
def build_request(idx, method, params):
obj = { 'version' : '1.1',
'method' : method,
'id' : idx }
if params is None:
obj['params'] = []
else:
obj['params'] = params
return obj
@staticmethod
def response_is_error(resp_obj):
return 'error' in resp_obj and resp_obj['error'] is not None
def get_block_hashes(settings, max_blocks_per_call=10000):
rpc = BitcoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpassword'])
height = settings['min_height']
while height < settings['max_height']+1:
num_blocks = min(settings['max_height']+1-height, max_blocks_per_call)
batch = []
for x in range(num_blocks):
batch.append(rpc.build_request(x, 'getblockhash', [height + x]))
reply = rpc.execute(batch)
for x,resp_obj in enumerate(reply):
if rpc.response_is_error(resp_obj):
print('JSON-RPC: error at height', height+x, ': ', resp_obj['error'], file=sys.stderr)
exit(1)
assert(resp_obj['id'] == x) # assume replies are in-sequence
print(resp_obj['result'])
height += num_blocks
if __name__ == '__main__':
if len(sys.argv) != 2:
print("Usage: linearize-hashes.py CONFIG-FILE")
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 9886
if 'min_height' not in settings:
settings['min_height'] = 0
if 'max_height' not in settings:
settings['max_height'] = 313000
if 'rpcuser' not in settings or 'rpcpassword' not in settings:
print("Missing username and/or password in cfg file", file=stderr)
sys.exit(1)
settings['port'] = int(settings['port'])
settings['min_height'] = int(settings['min_height'])
settings['max_height'] = int(settings['max_height'])
get_block_hashes(settings)
|
mit
| -6,657,036,970,384,412,000
| 25.876106
| 90
| 0.663813
| false
| 2.968719
| false
| false
| false
|
openstack/surveil
|
surveil/api/handlers/status/live_host_handler.py
|
1
|
2622
|
# Copyright 2014 - Savoir-Faire Linux inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from surveil.api.datamodel.status import live_host
from surveil.api.handlers.status import mongodb_query
from surveil.api.handlers.status import status_handler
class HostHandler(status_handler.StatusHandler):
"""Fulfills a request on the live hosts."""
def get(self, host_name):
"""Return a host."""
mongo_s = self.request.mongo_connection.alignak_live.hosts.find_one(
{"host_name": host_name}
)
return live_host.LiveHost(**_host_dict_from_mongo_item(mongo_s))
def get_all(self, live_query=None):
"""Return all live hosts."""
host_mappings = {
"last_check": "last_chk",
"description": "display_name",
"plugin_output": "output",
"acknowledged": "problem_has_been_acknowledged"
}
if live_query:
lq = mongodb_query.translate_live_query(live_query.as_dict(),
host_mappings)
else:
lq = {}
query, kwargs = mongodb_query.build_mongodb_query(lq)
mongo_dicts = (self.request.mongo_connection.
alignak_live.hosts.find(*query, **kwargs))
host_dicts = [
_host_dict_from_mongo_item(s) for s in mongo_dicts
]
hosts = []
for host_dict in host_dicts:
host = live_host.LiveHost(**host_dict)
hosts.append(host)
return hosts
def _host_dict_from_mongo_item(mongo_item):
"""Create a dict from a mongodb item."""
mappings = [
('last_chk', 'last_check', int),
('last_state_change', 'last_state_change', int),
('output', 'plugin_output', str),
('problem_has_been_acknowledged', 'acknowledged', bool),
('state', 'state', str),
('display_name', 'description', str),
]
for field in mappings:
value = mongo_item.pop(field[0], None)
if value is not None:
mongo_item[field[1]] = field[2](value)
return mongo_item
|
apache-2.0
| -4,852,115,580,305,140,000
| 31.37037
| 76
| 0.606789
| false
| 3.789017
| false
| false
| false
|
patrick91/pycon
|
backend/integrations/tests/test_tasks.py
|
1
|
1179
|
from unittest.mock import MagicMock, patch
from django.test import override_settings
from integrations.tasks import notify_new_submission, switchable_task
def test_notify_new_submission():
with patch("integrations.slack.send_message") as m1:
notify_new_submission(
"test_title",
"test_elevator_pitch",
"test_submission_type",
"test_admin_url",
"test_topic",
42,
)
blocks = m1.call_args[0][0]
attachments = m1.call_args[0][1]
assert blocks[0]["text"]["text"] == "New _test_submission_type_ Submission"
assert (
attachments[0]["blocks"][0]["text"]["text"]
== "*<test_admin_url|Test_title>*\n*"
"Elevator Pitch*\ntest_elevator_pitch"
)
assert attachments[0]["blocks"][0]["fields"][2]["text"] == "42"
assert attachments[0]["blocks"][0]["fields"][3]["text"] == "test_topic"
@override_settings(USE_SCHEDULER=True)
def test_switchable_task():
def dummy_task():
pass
dummy_task.delay = MagicMock()
switchable_dummy_task = switchable_task(dummy_task)
switchable_dummy_task()
assert dummy_task.delay.called
|
mit
| 500,003,179,471,033,660
| 27.756098
| 79
| 0.615776
| false
| 3.52994
| true
| false
| false
|
jpopelka/osbs-client
|
setup.py
|
1
|
1520
|
#!/usr/bin/python
"""
Copyright (c) 2015 Red Hat, Inc
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the LICENSE file for details.
"""
import re
import sys
from setuptools import setup, find_packages
data_files = {
"share/osbs": [
"inputs/image_stream.json",
"inputs/prod.json",
"inputs/prod_inner.json",
"inputs/simple.json",
"inputs/simple_inner.json",
],
}
def _get_requirements(path):
try:
with open(path) as f:
packages = f.read().splitlines()
except (IOError, OSError) as ex:
raise RuntimeError("Can't open file with requirements: %s", repr(ex))
return [p.strip() for p in packages if not re.match(r"^\s*#", p)]
def _install_requirements():
requirements = _get_requirements('requirements.txt')
if sys.version_info[0] >= 3:
requirements += _get_requirements('requirements-py3.txt')
return requirements
setup(
name="osbs-client",
description='Python module and command line client for OpenShift Build Service',
version="0.16",
author='Red Hat, Inc.',
author_email='atomic-devel@projectatomic.io',
url='https://github.com/projectatomic/osbs-client',
license="BSD",
packages=find_packages(exclude=["*.tests", "*.tests.*", "tests.*", "tests"]),
entry_points={
'console_scripts': ['osbs=osbs.cli.main:main'],
},
install_requires=_install_requirements(),
data_files=data_files.items(),
)
|
bsd-3-clause
| 2,019,618,046,527,079,000
| 27.679245
| 84
| 0.644737
| false
| 3.627685
| false
| false
| false
|
dethi/duhpy
|
duhpy.py
|
1
|
3987
|
#!/usr/bin/env python
from __future__ import print_function
from __future__ import unicode_literals
import sys
import time
import argparse
from os import getenv
from os.path import expanduser
from threading import Thread
try:
from Queue import Queue
except:
from queue import Queue
import dropbox
PY2 = (sys.version_info[0] == 2)
if PY2:
input = raw_input
API_KEY = getenv('DUHPY_API_KEY', 'YOUR_API_KEY')
APP_SECRET = getenv('DUHPY_APP_SECRET', 'YOUR_APP_SECRET')
CONFIG_PATH = expanduser('~/.duhpy')
RED = '\033[91m'
NO = '\033[0m'
class APICrawler(object):
def __init__(self, client, nb_threads=10):
self.client = client
self.values = Queue()
self.q = Queue()
for i in range(nb_threads):
worker = Thread(target=self.worker)
worker.daemon = True
worker.start()
def run(self, path='/'):
self.q.put(path)
self.q.join()
total_size = 0
self.values.put('--END--')
for i in iter(self.values.get, '--END--'):
total_size += i
return total_size
def worker(self):
while True:
path = self.q.get()
#print(path)
try:
json = self.client.metadata(path)
if not is_dir(json):
self.values.put(json['bytes'])
dir_size = 0
for item in json['contents']:
if is_dir(item):
self.q.put(item['path'])
else:
dir_size += item['bytes']
self.values.put(dir_size)
except dropbox.rest.ErrorResponse as e:
if e.status == 429:
#print(RED, '*** Dropbox API rate limit reached ***', NO)
time.sleep(1.5)
self.q.put(path)
self.q.task_done()
def request_token():
if API_KEY == 'YOUR_API_KEY' or APP_SECRET == 'YOUR_APP_SECRET':
print('Please, see the documentation https://github.com/dethi/duhpy')
sys.exit(1)
flow = dropbox.client.DropboxOAuth2FlowNoRedirect(API_KEY, APP_SECRET)
authorize_url = flow.start()
print('1. Go to: ', authorize_url)
print('2. Click "Allow".')
print('3. Copy the authorization code.')
code = input('Enter the authorization code here: ').strip()
try:
access_token, user_id = flow.finish(code)
except:
print('[ERROR] Invalid code')
access_token = None
return access_token
def is_dir(metadata):
if metadata is None:
return False
return metadata["is_dir"]
def sizeof_fmt(num, suffix='B'):
for unit in ['', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi']:
if abs(num) < 1024.0:
return '{:.1f} {}{}'.format(num, unit, suffix)
num /= 1024.0
return '{:.1f} {}{}'.format(num, 'Yi', suffix)
def main():
parser = argparse.ArgumentParser(
prog='duhpy',
description='`du -h` command for Dropbox (online).')
parser.add_argument('path', metavar='PATH', type=str, nargs='+')
parser.add_argument('--version', action='version', version='%(prog)s 1.0')
args = parser.parse_args()
try:
with open(CONFIG_PATH, 'r') as f:
token = f.read()
except IOError:
token = None
while (token is None):
token = request_token()
with open(CONFIG_PATH, 'w') as f:
f.write(token)
client = dropbox.client.DropboxClient(token)
crawler = APICrawler(client)
path_len = min(max(max(map(len, args.path)), 13), 64)
print('{0:^{2}} | {1:^13}'.format('PATH', 'SIZE', path_len))
print('{0:-<{1}}+{0:-<14}'.format('-', path_len + 1))
for path in args.path:
result = crawler.run(path)
print('{0:<{2}.{2}} | {1:>13}'.format(path, sizeof_fmt(result),
path_len))
print()
if __name__ == '__main__':
main()
|
mit
| 1,625,170,860,030,826,800
| 26.308219
| 78
| 0.536494
| false
| 3.559821
| false
| false
| false
|
grave-w-grave/zulip
|
confirmation/models.py
|
2
|
5962
|
# -*- coding: utf-8 -*-
# Copyright: (c) 2008, Jarek Zgoda <jarek.zgoda@gmail.com>
__revision__ = '$Id: models.py 28 2009-10-22 15:03:02Z jarek.zgoda $'
import re
from django.db import models
from django.core.urlresolvers import reverse
from django.core.mail import send_mail
from django.conf import settings
from django.template import loader, Context
from django.contrib.sites.models import Site
from django.contrib.contenttypes.models import ContentType
from django.contrib.contenttypes.fields import GenericForeignKey
from django.utils.translation import ugettext_lazy as _
from django.utils.timezone import now
from confirmation.util import get_status_field
from zerver.lib.utils import generate_random_token
from zerver.models import PreregistrationUser
from typing import Optional, Union, Any, Text
try:
import mailer
send_mail = mailer.send_mail
except ImportError:
# no mailer app present, stick with default
pass
B16_RE = re.compile('^[a-f0-9]{40}$')
def check_key_is_valid(creation_key):
# type: (Text) -> bool
if not RealmCreationKey.objects.filter(creation_key=creation_key).exists():
return False
days_sofar = (now() - RealmCreationKey.objects.get(creation_key=creation_key).date_created).days
# Realm creation link expires after settings.REALM_CREATION_LINK_VALIDITY_DAYS
if days_sofar <= settings.REALM_CREATION_LINK_VALIDITY_DAYS:
return True
return False
def generate_key():
# type: () -> Text
return generate_random_token(40)
def generate_activation_url(key, host=None):
# type: (Text, Optional[str]) -> Text
if host is None:
host = settings.EXTERNAL_HOST
return u'%s%s%s' % (settings.EXTERNAL_URI_SCHEME,
host,
reverse('confirmation.views.confirm',
kwargs={'confirmation_key': key}))
def generate_realm_creation_url():
# type: () -> Text
key = generate_key()
RealmCreationKey.objects.create(creation_key=key, date_created=now())
return u'%s%s%s' % (settings.EXTERNAL_URI_SCHEME,
settings.EXTERNAL_HOST,
reverse('zerver.views.create_realm',
kwargs={'creation_key': key}))
class ConfirmationManager(models.Manager):
def confirm(self, confirmation_key):
# type: (str) -> Union[bool, PreregistrationUser]
if B16_RE.search(confirmation_key):
try:
confirmation = self.get(confirmation_key=confirmation_key)
except self.model.DoesNotExist:
return False
obj = confirmation.content_object
status_field = get_status_field(obj._meta.app_label, obj._meta.model_name)
setattr(obj, status_field, getattr(settings, 'STATUS_ACTIVE', 1))
obj.save()
return obj
return False
def get_link_for_object(self, obj, host=None):
# type: (Union[ContentType, int], Optional[str]) -> Text
key = generate_key()
self.create(content_object=obj, date_sent=now(), confirmation_key=key)
return generate_activation_url(key, host=host)
def send_confirmation(self, obj, email_address, additional_context=None,
subject_template_path=None, body_template_path=None,
host=None):
# type: (ContentType, Text, Optional[Dict[str, Any]], Optional[str], Optional[str], Optional[str]) -> Confirmation
confirmation_key = generate_key()
current_site = Site.objects.get_current()
activate_url = generate_activation_url(confirmation_key, host=host)
context = Context({
'activate_url': activate_url,
'current_site': current_site,
'confirmation_key': confirmation_key,
'target': obj,
'days': getattr(settings, 'EMAIL_CONFIRMATION_DAYS', 10),
})
if additional_context is not None:
context.update(additional_context)
if obj.realm is not None and obj.realm.is_zephyr_mirror_realm:
template_name = "mituser"
else:
template_name = obj._meta.model_name
templates = [
'confirmation/%s_confirmation_email_subject.txt' % (template_name,),
'confirmation/confirmation_email_subject.txt',
]
if subject_template_path:
template = loader.get_template(subject_template_path)
else:
template = loader.select_template(templates)
subject = template.render(context).strip().replace(u'\n', u' ') # no newlines, please
templates = [
'confirmation/%s_confirmation_email_body.txt' % (template_name,),
'confirmation/confirmation_email_body.txt',
]
if body_template_path:
template = loader.get_template(body_template_path)
else:
template = loader.select_template(templates)
body = template.render(context)
send_mail(subject, body, settings.DEFAULT_FROM_EMAIL, [email_address])
return self.create(content_object=obj, date_sent=now(), confirmation_key=confirmation_key)
class Confirmation(models.Model):
content_type = models.ForeignKey(ContentType)
object_id = models.PositiveIntegerField()
content_object = GenericForeignKey('content_type', 'object_id')
date_sent = models.DateTimeField(_('sent'))
confirmation_key = models.CharField(_('activation key'), max_length=40)
objects = ConfirmationManager()
class Meta(object):
verbose_name = _('confirmation email')
verbose_name_plural = _('confirmation emails')
def __unicode__(self):
# type: () -> Text
return _('confirmation email for %s') % (self.content_object,)
class RealmCreationKey(models.Model):
creation_key = models.CharField(_('activation key'), max_length=40)
date_created = models.DateTimeField(_('created'), default=now)
|
apache-2.0
| 4,702,718,434,550,939,000
| 38.746667
| 122
| 0.643911
| false
| 4.001342
| false
| false
| false
|
bsamseth/project-euler
|
094/94.py
|
1
|
1451
|
"""
It is easily proved that no equilateral triangle exists with integral length
sides and integral area. However, the almost equilateral triangle 5-5-6 has an
area of 12 square units.
We shall define an almost equilateral triangle to be a triangle for which two
sides are equal and the third differs by no more than one unit.
Find the sum of the perimeters of all almost equilateral triangles with
integral side lengths and area and whose perimeters do not exceed one billion
(1,000,000,000).
Solution comment: Instant time.
Area is A = ((a ± 1) * h) / 2, and from Pythagoras
a^2 = ((a ± 1)/2)^2 + h^2
=> ((3a ± 1)/2)^2 - 3h^2 = 1
=> x^2 - 3y^2 = 1 -> Bell equation.
Wolfram Alpha then helpfully supplies integer solutions, with
x_n = 1/2 ((2 - √3)**n + (2 + √3)**n))
Only n >= 2 are valid for us. Then get side length from
a = (2x ± 1) / 3,
making sure to pick the sign that makes the division work.
"""
from math import sqrt
upper = int(1e9)
s = 0
sqrt3 = sqrt(3)
n = 2
while True:
x = int(round(0.5 * ((2 - sqrt3)**n + (2 + sqrt3)**n)))
a = 2*x-1
if a >= upper + 1:
break
if a % 3 == 0:
a = a//3
s += 3*a - 1
print(a, a, a-1)
else:
a = (a + 2) // 3
s += 3*a + 1
print(a, a, a+1)
n += 1
print(s)
|
mit
| -7,009,995,585,974,352,000
| 29.702128
| 79
| 0.544006
| false
| 3.130152
| false
| false
| false
|
flopp/airports_map
|
airports/db.py
|
1
|
3270
|
import os
import random
import typing
from airports.airport import Airport, AirportType
from airports.airportstable import AirportsTable
from airports.download import download
from airports.runwaystable import RunwaysTable
from airports.wikipediahelper import get_wikipedia_articles
class DB:
def __init__(self) -> None:
self._airports: typing.Dict[str, Airport] = {}
self._large: typing.List[str] = []
self._medium: typing.List[str] = []
self._small: typing.List[str] = []
self._other: typing.List[str] = []
def load(self, cache_dir: str, reset_cache: bool) -> None:
airports_csv = os.path.join(cache_dir, "airports.csv")
runways_csv = os.path.join(cache_dir, "runways.csv")
wikipedia_json = os.path.join(cache_dir, "wikipedia_json")
if reset_cache:
for file_name in [airports_csv, runways_csv, wikipedia_json]:
if os.path.exists(file_name):
os.remove(file_name)
airports = AirportsTable(download("https://ourairports.com/data/airports.csv", airports_csv))
runways = RunwaysTable(download("https://ourairports.com/data/runways.csv", runways_csv))
articles = get_wikipedia_articles(wikipedia_json)
airports.add_wikipedia(articles)
airports.compute_bounds(runways.to_dict())
airports.check()
for airport in airports.good_airports():
self._airports[airport.icao_code()] = airport
if airport.airport_type() == AirportType.LARGE_AIRPORT:
self._large.append(airport.icao_code())
elif airport.airport_type() == AirportType.MEDIUM_AIRPORT:
self._medium.append(airport.icao_code())
elif airport.airport_type() == AirportType.SMALL_AIRPORT:
self._small.append(airport.icao_code())
else:
self._other.append(airport.icao_code())
def get_all_icaos(self) -> typing.List[str]:
return list(self._airports.keys())
def get(self, icao: str) -> typing.Optional[Airport]:
icao = icao.strip().upper()
if icao in self._airports:
return self._airports[icao]
return None
def get_random(self) -> Airport:
if random.choice([True, False]):
return self._airports[random.choice(self._large)]
if random.choice([True, False]):
return self._airports[random.choice(self._medium)]
if random.choice([True, False]):
return self._airports[random.choice(self._small)]
return self._airports[random.choice(list(self._airports.keys()))]
def get_random_list(self, count: int) -> typing.List[Airport]:
return random.sample(list(self._airports.values()), count)
def search(self, needle: str) -> typing.Optional[Airport]:
needle = needle.strip().upper()
for airport in self._airports.values():
if airport.matches_code(needle):
return airport
for airport in self._airports.values():
if airport.matches_name(needle):
return airport
for airport in self._airports.values():
if airport.matches_location(needle):
return airport
return None
|
mit
| -1,287,605,393,672,882,000
| 41.467532
| 101
| 0.62263
| false
| 3.637375
| false
| false
| false
|
arcan1s/git-etc
|
sources/ctrlconf/aboutwin.py
|
1
|
5647
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'aboutwin.ui'
#
# Created: Mon Feb 18 04:26:37 2013
# by: PyQt4 UI code generator 4.9.6
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_AboutWindow(object):
def setupUi(self, AboutWindow):
AboutWindow.setObjectName(_fromUtf8("AboutWindow"))
AboutWindow.resize(418, 298)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(AboutWindow.sizePolicy().hasHeightForWidth())
AboutWindow.setSizePolicy(sizePolicy)
self.centralwidget = QtGui.QWidget(AboutWindow)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.centralwidget.sizePolicy().hasHeightForWidth())
self.centralwidget.setSizePolicy(sizePolicy)
self.centralwidget.setObjectName(_fromUtf8("centralwidget"))
self.gridLayout = QtGui.QGridLayout(self.centralwidget)
self.gridLayout.setObjectName(_fromUtf8("gridLayout"))
self.button_close = QtGui.QPushButton(self.centralwidget)
self.button_close.setMinimumSize(QtCore.QSize(100, 20))
self.button_close.setMaximumSize(QtCore.QSize(100, 25))
self.button_close.setDefault(True)
self.button_close.setObjectName(_fromUtf8("button_close"))
self.gridLayout.addWidget(self.button_close, 1, 1, 1, 1)
spacerItem = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.gridLayout.addItem(spacerItem, 1, 0, 1, 1)
self.text_about = QtGui.QTextBrowser(self.centralwidget)
self.text_about.setMinimumSize(QtCore.QSize(410, 260))
self.text_about.setObjectName(_fromUtf8("text_about"))
self.gridLayout.addWidget(self.text_about, 0, 0, 1, 3)
AboutWindow.setCentralWidget(self.centralwidget)
self.retranslateUi(AboutWindow)
QtCore.QMetaObject.connectSlotsByName(AboutWindow)
AboutWindow.setTabOrder(self.text_about, self.button_close)
def retranslateUi(self, AboutWindow):
AboutWindow.setWindowTitle(_translate("AboutWindow", "About", None))
self.button_close.setText(_translate("AboutWindow", "Закрыть", None))
self.text_about.setHtml(_translate("AboutWindow", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'Droid Sans\'; font-size:10pt; font-weight:400; font-style:normal;\">\n"
"<p align=\"center\" style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\">git2etc 2.0.0</p>\n"
"<p align=\"center\" style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\">Лицензия: GPL</p>\n"
"<p align=\"justify\" style=\"-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><br /></p>\n"
"<p align=\"justify\" style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\">GUI интерфейс к демону git-etc, написанный на python2.7/PyQt4. Позволяет посмотреть список коммитов и изменения в файлах, записанные в коммитах. Также данное приложение позволяет откатить к определенному коммиту все файлы (git reset --hard) или отдельно взятые (git diff && git apply). Дополнительно предусмотрена возможность слияния старых и новых конфигурационных файлов (используются две ветки репозитория - master и experimental).</p>\n"
"<p align=\"justify\" style=\"-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><br /></p>\n"
"<p align=\"justify\" style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\">Автор: Евгений Алексеев aka arcanis</p>\n"
"<p align=\"justify\" style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\">e-mail: esalexeev@gmail.com</p>\n"
"<p align=\"justify\" style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\">Jabber: arcanis@jabber.ru</p>\n"
"<p align=\"justify\" style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\">ICQ: 407-398-235</p></body></html>", None))
|
gpl-3.0
| -5,526,175,977,805,946,000
| 67.217949
| 589
| 0.713588
| false
| 2.87311
| false
| false
| false
|
DXCanas/content-curation
|
contentcuration/contentcuration/node_metadata/annotations.py
|
1
|
8076
|
from django.contrib.postgres.aggregates.general import ArrayAgg
from django.contrib.postgres.aggregates.general import BoolOr
from django.db.models import BooleanField
from django.db.models import CharField
from django.db.models import IntegerField
from django.db.models.aggregates import Count
from django.db.models.aggregates import Max
from django.db.models.expressions import Case
from django.db.models.expressions import F
from django.db.models.expressions import Value
from django.db.models.expressions import When
from django.db.models.functions import Coalesce
from le_utils.constants import content_kinds
from le_utils.constants import roles
from contentcuration.db.models.expressions import BooleanComparison
from contentcuration.db.models.expressions import WhenQ
from contentcuration.node_metadata.cte import AssessmentCountCTE
from contentcuration.node_metadata.cte import ResourceSizeCTE
from contentcuration.node_metadata.cte import TreeMetadataCTE
class MetadataAnnotation(object):
cte = None
cte_columns = ()
def get_annotation(self, cte):
"""
:type cte: With|None
"""
raise NotImplementedError("Metadata annotation needs to implement this method")
def build_kind_condition(self, kind_id, value, comparison="="):
return [BooleanComparison(kind_id, comparison, Value(value))]
def build_topic_condition(self, kind_id, comparison="="):
return self.build_kind_condition(kind_id, content_kinds.TOPIC, comparison)
class AncestorAnnotation(MetadataAnnotation):
cte = TreeMetadataCTE
cte_columns = ("lft", "rght", "pk")
def __init__(self, *args, **kwargs):
self.include_self = kwargs.pop("include_self", False)
super(AncestorAnnotation, self).__init__(*args, **kwargs)
def build_ancestor_condition(self, cte):
"""
@see MPTTModel.get_ancestors()
"""
left_op = "<="
right_op = ">="
if not self.include_self:
left_op = "<"
right_op = ">"
return [
BooleanComparison(cte.col.lft, left_op, F("lft")),
BooleanComparison(cte.col.rght, right_op, F("rght")),
]
class AncestorArrayAgg(AncestorAnnotation):
def get_annotation(self, cte):
ancestor_condition = self.build_ancestor_condition(cte)
return ArrayAgg(
Case(
When(condition=WhenQ(*ancestor_condition), then=cte.col.pk),
default=Value(None),
),
output_field=CharField(),
)
class DescendantCount(MetadataAnnotation):
def get_annotation(self, cte=None):
"""
@see MPTTModel.get_descendant_count()
"""
return Max(
Case(
# when selected node is topic, use algorithm to get descendant count
When(
condition=WhenQ(*self.build_topic_condition(F("kind_id"))),
then=(F("rght") - F("lft") - Value(1)) / Value(2),
),
# integer division floors the result in postgres
default=Value(1),
)
)
class DescendantAnnotation(MetadataAnnotation):
cte = TreeMetadataCTE
cte_columns = ("lft", "rght")
def __init__(self, *args, **kwargs):
self.include_self = kwargs.pop("include_self", False)
super(DescendantAnnotation, self).__init__(*args, **kwargs)
def build_descendant_condition(self, cte):
"""
@see MPTTModel.get_descendants()
"""
left_op = ">="
right_op = "<="
if not self.include_self:
left_op = ">"
right_op = "<"
return [
BooleanComparison(cte.col.lft, left_op, F("lft")),
BooleanComparison(cte.col.lft, right_op, F("rght")),
]
class AssessmentCount(DescendantAnnotation):
cte = AssessmentCountCTE
cte_columns = ("content_id", "assessment_count")
def get_annotation(self, cte):
return Coalesce(cte.col.assessment_count, Value(0), output_field=IntegerField())
class ResourceCount(DescendantAnnotation):
cte_columns = ("content_id", "kind_id") + DescendantAnnotation.cte_columns
def get_annotation(self, cte):
resource_condition = self.build_topic_condition(F("kind_id"), "!=")
topic_condition = self.build_topic_condition(cte.col.kind_id, "!=")
topic_condition += self.build_descendant_condition(cte)
return Count(
Case(
# when selected node is not a topic, then count = 1
When(condition=WhenQ(*resource_condition), then=F("content_id")),
# when it is a topic, then count descendants
When(condition=WhenQ(*topic_condition), then=cte.col.content_id),
default=Value(None),
),
distinct=True,
)
class CoachCount(DescendantAnnotation):
cte_columns = ("content_id", "role_visibility") + DescendantAnnotation.cte_columns
def get_annotation(self, cte):
topic_condition = self.build_topic_condition(F("kind_id"))
topic_condition += self.build_descendant_condition(cte)
topic_condition += self.build_coach_condition(cte.col.role_visibility)
resource_condition = self.build_topic_condition(F("kind_id"), "!=")
resource_condition += self.build_coach_condition(F("role_visibility"))
return Count(
Case(
# when selected node is a coach topic, then count descendent content_id's
When(condition=WhenQ(*topic_condition), then=cte.col.content_id),
# when selected node is not a topic, count its content_id
When(condition=WhenQ(*resource_condition), then=F("content_id")),
default=Value(None),
),
distinct=True,
)
def build_coach_condition(self, role_visibility):
return [BooleanComparison(role_visibility, "=", Value(roles.COACH))]
class HasChanged(DescendantAnnotation):
cte_columns = ("changed",) + DescendantAnnotation.cte_columns
def get_annotation(self, cte):
resource_condition = self.build_topic_condition(F("kind_id"), "!=")
whens = [
# when selected node is not a topic, just use its changed status
When(condition=WhenQ(*resource_condition), then=F("changed")),
]
if self.include_self:
# when selected node is a topic and it's changed and including self, then return that
whens.append(When(condition=WhenQ(*[F("changed")]), then=F("changed")))
return Coalesce(
Case(
*whens,
# fallback to aggregating descendant changed status when a unchanged topic
default=BoolOr(cte.col.changed)
),
Value(False),
output_field=BooleanField(),
)
class SortOrderMax(DescendantAnnotation):
cte_columns = ("parent_id", "sort_order") + DescendantAnnotation.cte_columns
def get_annotation(self, cte):
resource_condition = self.build_topic_condition(F("kind_id"), "!=")
topic_condition = self.build_child_condition(cte)
return Coalesce(
Max(
Case(
# when selected node is not a topic, use its sort_order
When(condition=WhenQ(*resource_condition), then=F("sort_order")),
# when selected node is a topic, then find max of children
When(condition=WhenQ(*topic_condition), then=cte.col.sort_order),
default=Value(None),
)
),
Value(1),
output_field=IntegerField(),
)
def build_child_condition(self, cte):
return [BooleanComparison(cte.col.parent_id, "=", F("id"))]
class ResourceSize(DescendantAnnotation):
cte = ResourceSizeCTE
cte_columns = ("resource_size",)
def get_annotation(self, cte):
return Max(cte.col.resource_size, output_field=IntegerField())
|
mit
| -4,523,287,446,646,386,000
| 33.810345
| 97
| 0.616518
| false
| 3.978325
| false
| false
| false
|
lipis/github-stats
|
main/api/v1/repo.py
|
1
|
1840
|
# coding: utf-8
from __future__ import absolute_import
from google.appengine.ext import ndb
import flask_restful
import flask
from api import helpers
import auth
import model
import util
from main import api_v1
@api_v1.resource('/repo/', endpoint='api.repo.list')
class RepoListAPI(flask_restful.Resource):
def get(self):
repo_dbs, repo_cursor = model.Repo.get_dbs()
return helpers.make_response(repo_dbs, model.Repo.FIELDS, repo_cursor)
@api_v1.resource('/repo/<string:repo_key>/', endpoint='api.repo')
class RepoAPI(flask_restful.Resource):
def get(self, repo_key):
repo_db = ndb.Key(urlsafe=repo_key).get()
if not repo_db:
helpers.make_not_found_exception('Repo %s not found' % repo_key)
return helpers.make_response(repo_db, model.Repo.FIELDS)
###############################################################################
# Admin
###############################################################################
@api_v1.resource('/admin/repo/', endpoint='api.admin.repo.list')
class AdminRepoListAPI(flask_restful.Resource):
@auth.admin_required
def get(self):
repo_keys = util.param('repo_keys', list)
if repo_keys:
repo_db_keys = [ndb.Key(urlsafe=k) for k in repo_keys]
repo_dbs = ndb.get_multi(repo_db_keys)
return helpers.make_response(repo_dbs, model.repo.FIELDS)
repo_dbs, repo_cursor = model.Repo.get_dbs()
return helpers.make_response(repo_dbs, model.Repo.FIELDS, repo_cursor)
@api_v1.resource('/admin/repo/<string:repo_key>/', endpoint='api.admin.repo')
class AdminRepoAPI(flask_restful.Resource):
@auth.admin_required
def get(self, repo_key):
repo_db = ndb.Key(urlsafe=repo_key).get()
if not repo_db:
helpers.make_not_found_exception('Repo %s not found' % repo_key)
return helpers.make_response(repo_db, model.Repo.FIELDS)
|
mit
| 9,207,477,670,009,611,000
| 31.280702
| 79
| 0.646196
| false
| 3.351548
| false
| false
| false
|
flavoi/diventi
|
diventi/products/migrations/0060_auto_20200901_1950.py
|
1
|
3075
|
# Generated by Django 2.2.13 on 2020-09-01 17:50
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('products', '0059_auto_20200830_1804'),
]
operations = [
migrations.AlterField(
model_name='chapter',
name='title',
field=models.CharField(max_length=80, verbose_name='title'),
),
migrations.AlterField(
model_name='chapter',
name='title_en',
field=models.CharField(max_length=80, null=True, verbose_name='title'),
),
migrations.AlterField(
model_name='chapter',
name='title_it',
field=models.CharField(max_length=80, null=True, verbose_name='title'),
),
migrations.AlterField(
model_name='imagepreview',
name='title',
field=models.CharField(max_length=80, verbose_name='title'),
),
migrations.AlterField(
model_name='imagepreview',
name='title_en',
field=models.CharField(max_length=80, null=True, verbose_name='title'),
),
migrations.AlterField(
model_name='imagepreview',
name='title_it',
field=models.CharField(max_length=80, null=True, verbose_name='title'),
),
migrations.AlterField(
model_name='productcategory',
name='title',
field=models.CharField(max_length=80, verbose_name='title'),
),
migrations.AlterField(
model_name='productcategory',
name='title_en',
field=models.CharField(max_length=80, null=True, verbose_name='title'),
),
migrations.AlterField(
model_name='productcategory',
name='title_it',
field=models.CharField(max_length=80, null=True, verbose_name='title'),
),
migrations.AlterField(
model_name='productdetail',
name='title',
field=models.CharField(max_length=80, verbose_name='title'),
),
migrations.AlterField(
model_name='productdetail',
name='title_en',
field=models.CharField(max_length=80, null=True, verbose_name='title'),
),
migrations.AlterField(
model_name='productdetail',
name='title_it',
field=models.CharField(max_length=80, null=True, verbose_name='title'),
),
migrations.AlterField(
model_name='productformat',
name='title',
field=models.CharField(max_length=80, verbose_name='title'),
),
migrations.AlterField(
model_name='productformat',
name='title_en',
field=models.CharField(max_length=80, null=True, verbose_name='title'),
),
migrations.AlterField(
model_name='productformat',
name='title_it',
field=models.CharField(max_length=80, null=True, verbose_name='title'),
),
]
|
apache-2.0
| 258,824,874,770,298,750
| 33.943182
| 83
| 0.555122
| false
| 4.34322
| false
| false
| false
|
erdc/proteus
|
proteus/tests/LS_with_edgeBased_EV/MCorr/thelper_cons_ls.py
|
1
|
3450
|
from __future__ import division
from past.utils import old_div
from proteus import Domain
from proteus import Context
ct=Context.Options([
# General parameters #
("T",0.1,"Final time"),
("nDTout",1,"Number of time steps to archive"),
("refinement",0,"Level of refinement"),
("unstructured",False,"Use unstructured mesh. Set to false for periodic BCs"),
("SSPOrder",2,"SSP method of order 1, 2 or 3") ,
("cfl",0.5,"Target cfl"),
# PARAMETERS FOR NCLS #
("level_set_function" ,1,"0: distance function, 1: saturated distance function"),
("STABILIZATION_TYPE_ncls",1,"0: SUPG, 1: EV, 2: smoothness based indicator"),
("SATURATED_LEVEL_SET",True,"Saturate the distance function or not?"),
("ENTROPY_TYPE_ncls",2,"1: parabolic, 2: logarithmic"),
("COUPEZ",True,"Flag to turn on/off the penalization term in Coupez approach"),
("DO_REDISTANCING",True,"Solve Eikonal type equation after transport?"),
("cE_ncls",1.0,"Entropy viscosity constant"),
# PARAMETERS FOR VOF #
("STABILIZATION_TYPE_vof",1,"0: SUPG, 1: TG, 2: edge based EV, 3: edge based smoothness ind."),
("ENTROPY_TYPE_vof",1,"0: quadratic, 1: logarithmic"),
("FCT",True,"Use Flux Corrected Transport"),
("cE_vof",0.1,"Entropy viscosity constant"),
("cK",1.0,"Artificial compression constant")
],mutable=True)
# OTHER NUMERICAL PARAMETERS FOR NCLS #
epsCoupez=3
redist_tolerance=0.1
epsFactRedistance=0.33 #For signed dist function
lambda_coupez = 1.0 #Strength of redistancing and coupez force
epsFactHeaviside=epsFactDirac=1.5
# number of space dimensions #
nd=2
# MASS CORRECTION #
applyCorrection=True
correctionType = 'cg'
# General parameters #
parallel = False
linearSmoother = None
checkMass=False
runCFL = ct.cfl
# Finite elmenet spaces #
pDegree_ncls=1
pDegree_vof=pDegree_ncls #volume of fluid should match ls for now
useHex=False
useBernstein=False
# Quadrature order #
quad_order = 2*pDegree_ncls+1
# parallel partitioning info #
from proteus import MeshTools
partitioningType = MeshTools.MeshParallelPartitioningTypes.node
# Create mesh #
nn=nnx=(2**ct.refinement)*10+1
nny=nnx
nnz=1
he=old_div(1.0,(nnx-1.0))
box=Domain.RectangularDomain(L=(1.0,1.0),
x=(0.0,0.0),
name="box");
box.writePoly("box")
if ct.unstructured:
domain=Domain.PlanarStraightLineGraphDomain(fileprefix="box")
domain.boundaryTags = box.boundaryTags
bt = domain.boundaryTags
triangleOptions="pAq30Dena%8.8f" % (0.5*he**2,)
else:
domain = box
domain.MeshOptions.nnx = nnx
domain.MeshOptions.nny = nny
domain.MeshOptions.nnz = nnz
domain.MeshOptions.nn = nn
domain.MeshOptions.triangleFlag=0
# REDISTANCING #
redist_Newton=True
onlyVOF=False
# SMOOTHING FACTORS # (eps)
epsFactHeaviside=epsFactDirac=epsFact_vof=1.5
epsFactRedistance=0.33
epsFactDiffusion=10.0
# SHOCK CAPTURING PARAMETERS #
shockCapturingFactor_vof=0.2
shockCapturingFactor_ncls=0.2
shockCapturingFactor_rd=0.9
lag_shockCapturing_vof=True
lag_shockCapturing_ls=True
lag_shockCapturing_rd=False
# use absolute tolerances on al models
atolRedistance = 1.0e-5
atolConservation = 1.0e-9
atolVolumeOfFluid= 1.0e-9
atolLevelSet = 1.0e-9
#controls
linearSolverConvergenceTest = 'r-true' #rits is do a set number of iterations, r-true uses true residual, PETSc default is preconditioned residual
#redist solver
fmmFlag=0
soname="cons_ls_level_"+repr(ct.refinement)
|
mit
| -2,499,542,141,089,966,600
| 29.263158
| 146
| 0.71913
| false
| 2.863071
| false
| false
| false
|
frePPLe/frePPLe
|
freppledb/common/report.py
|
1
|
134827
|
#
# Copyright (C) 2007-2019 by frePPLe bv
#
# This library is free software; you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero
# General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public
# License along with this program. If not, see <http://www.gnu.org/licenses/>.
#
r"""
This module implements a generic view to presents lists and tables.
It provides the following functionality:
- Pagination of the results.
- Ability to filter on fields, using different operators.
- Ability to sort on a field.
- Export the results as a CSV file, ready for use in a spreadsheet.
- Import CSV formatted data files.
- Show time buckets to show data by time buckets.
The time buckets and time boundaries can easily be updated.
"""
import codecs
import csv
from datetime import date, datetime, timedelta, time
from decimal import Decimal
import functools
import logging
import math
import operator
import json
import re
from time import timezone, daylight
from io import StringIO, BytesIO
import urllib
from openpyxl import load_workbook, Workbook
from openpyxl.utils import get_column_letter
from openpyxl.cell import WriteOnlyCell
from openpyxl.styles import NamedStyle, PatternFill
from dateutil.parser import parse
from openpyxl.comments import Comment as CellComment
from django.db.models import Model
from django.db.utils import DEFAULT_DB_ALIAS, load_backend
from django.contrib.auth.models import Group
from django.contrib.auth import get_permission_codename
from django.conf import settings
from django.views.decorators.csrf import csrf_protect
from django.contrib import messages
from django.contrib.admin.views.decorators import staff_member_required
from django.contrib.admin.utils import unquote, quote
from django.core.exceptions import ValidationError
from django.core.management.color import no_style
from django.db import connections, transaction, models
from django.db.models.fields import CharField, AutoField
from django.db.models.fields.related import RelatedField
from django.forms.models import modelform_factory
from django.http import HttpResponse, StreamingHttpResponse, HttpResponseNotFound
from django.http import Http404, HttpResponseNotAllowed, HttpResponseForbidden
from django.shortcuts import render
from django.utils import translation
from django.utils.decorators import method_decorator
from django.utils.encoding import smart_str, force_str
from django.utils.html import escape
from django.utils.translation import gettext as _
from django.utils.formats import get_format
from django.utils.text import capfirst, get_text_list, format_lazy
from django.contrib.admin.models import LogEntry, CHANGE, ADDITION, DELETION
from django.contrib.contenttypes.models import ContentType
from django.views.generic.base import View
from freppledb.boot import getAttributeFields
from freppledb.common.models import (
User,
Comment,
Parameter,
BucketDetail,
Bucket,
HierarchyModel,
)
from freppledb.common.dataload import parseExcelWorksheet, parseCSVdata
logger = logging.getLogger(__name__)
# A list of models with some special, administrative purpose.
# They should be excluded from bulk import, export and erasing actions.
EXCLUDE_FROM_BULK_OPERATIONS = (Group, User, Comment)
separatorpattern = re.compile(r"[\s\-_]+")
def create_connection(alias=DEFAULT_DB_ALIAS):
connections.ensure_defaults(alias)
connections.prepare_test_settings(alias)
db = connections.databases[alias]
backend = load_backend(db["ENGINE"])
return backend.DatabaseWrapper(db, alias)
def matchesModelName(name, model):
"""
Returns true if the first argument is a valid name for the model passed as second argument.
The string must match either:
- the model name
- the verbose name
- the pural verbose name
The comparison is case insensitive and also ignores whitespace, dashes and underscores.
The comparison tries to find a match using the current active language, as well as in English.
"""
checkstring = re.sub(separatorpattern, "", name.lower())
# Try with the localized model names
if checkstring == re.sub(separatorpattern, "", model._meta.model_name.lower()):
return True
elif checkstring == re.sub(separatorpattern, "", model._meta.verbose_name.lower()):
return True
elif checkstring == re.sub(
separatorpattern, "", model._meta.verbose_name_plural.lower()
):
return True
else:
# Try with English model names
with translation.override("en"):
if checkstring == re.sub(
separatorpattern, "", model._meta.model_name.lower()
):
return True
elif checkstring == re.sub(
separatorpattern, "", model._meta.verbose_name.lower()
):
return True
elif checkstring == re.sub(
separatorpattern, "", model._meta.verbose_name_plural.lower()
):
return True
else:
return False
def getHorizon(request, future_only=False):
# Pick up the current date
try:
current = parse(
Parameter.objects.using(request.database).get(name="currentdate").value
)
except Exception:
current = datetime.now()
current = current.replace(microsecond=0)
horizontype = request.GET.get("horizontype", request.user.horizontype)
horizonunit = request.GET.get("horizonunit", request.user.horizonunit)
try:
horizonlength = int(request.GET.get("horizonlength"))
except Exception:
horizonlength = request.user.horizonlength
if horizontype:
# First type: Horizon relative to the current date
start = current.replace(hour=0, minute=0, second=0, microsecond=0)
if horizonunit == "day":
end = start + timedelta(days=horizonlength or 60)
end = end.replace(hour=0, minute=0, second=0)
elif horizonunit == "week":
end = start.replace(hour=0, minute=0, second=0) + timedelta(
weeks=horizonlength or 8, days=7 - start.weekday()
)
else:
y = start.year
m = start.month + (horizonlength or 2) + (start.day > 1 and 1 or 0)
while m > 12:
y += 1
m -= 12
end = datetime(y, m, 1)
else:
# Second type: Absolute start and end dates given
try:
horizonstart = datetime.strptime(
request.GET.get("horizonstart"), "%Y-%m-%d"
)
except Exception:
horizonstart = request.user.horizonstart
try:
horizonend = datetime.strptime(request.GET.get("horizonend"), "%Y-%m-%d")
except Exception:
horizonend = request.user.horizonend
start = horizonstart
if not start or (future_only and start < current):
start = current.replace(hour=0, minute=0, second=0, microsecond=0)
end = horizonend
if end:
if end < start:
if future_only and end < current:
# Special case to assure a minimum number of future buckets
if horizonunit == "day":
end = start + timedelta(days=horizonlength or 60)
elif horizonunit == "week":
end = start + timedelta(weeks=horizonlength or 8)
else:
end = start + timedelta(weeks=horizonlength or 8)
else:
# Swap start and end to assure the start is before the end
tmp = start
start = end
end = tmp
else:
if horizonunit == "day":
end = start + timedelta(days=horizonlength or 60)
elif horizonunit == "week":
end = start + timedelta(weeks=horizonlength or 8)
else:
end = start + timedelta(weeks=horizonlength or 8)
return (current, start, end)
class GridField(object):
"""
Base field for columns in grid views.
"""
def __init__(self, name, **kwargs):
self.name = name
for key, value in kwargs.items():
setattr(self, key, value)
if "key" in kwargs:
self.editable = False
if "title" not in kwargs and not self.title:
self.title = self.name and _(self.name) or ""
if not self.name:
self.sortable = False
self.search = False
if "field_name" not in kwargs:
self.field_name = self.name
def __str__(self):
o = [
'"name":"%s","index":"%s","editable":%s,"label":"%s","align":"%s","title":false,"field_name":"%s"'
% (
self.name or "",
self.name or "",
self.editable and "true" or "false",
force_str(self.title).title().replace("'", "\\'"),
self.align,
self.field_name,
)
]
if self.key:
o.append(',"key":true')
if not self.sortable:
o.append(',"sortable":false')
if not self.search:
o.append(',"search":false')
if self.formatter:
o.append(',"formatter":"%s"' % self.formatter)
if self.unformat:
o.append(',"unformat":"%s"' % self.unformat)
if self.searchrules:
o.append(',"searchrules":{%s}' % self.searchrules)
if self.hidden:
o.append(',"alwayshidden":true, "hidden":true')
if self.searchoptions:
o.append(',"searchoptions":%s' % self.searchoptions)
if self.extra:
if callable(self.extra):
o.append(",%s" % force_str(self.extra()))
else:
o.append(",%s" % force_str(self.extra))
return "".join(o)
name = None
field_name = None
formatter = None
width = 100
editable = True
sortable = True
search = True
key = False
unformat = None
title = None
extra = None
align = "center"
searchrules = None
hidden = False # NEVER display this field
initially_hidden = False # Hide the field by default, but allow the user to add it
searchoptions = '{"searchhidden": true}'
class GridFieldDateTime(GridField):
formatter = "date"
extra = '"formatoptions":{"srcformat":"Y-m-d H:i:s","newformat":"Y-m-d H:i:s"}'
searchoptions = (
'{"sopt":["cn","eq","ne","lt","le","gt","ge","win"],"searchhidden": true}'
)
width = 140
class GridFieldTime(GridField):
formatter = "time"
extra = '"formatoptions":{"srcformat":"H:i:s","newformat":"H:i:s"}'
width = 80
class GridFieldDate(GridField):
formatter = "date"
extra = '"formatoptions":{"srcformat":"Y-m-d","newformat":"Y-m-d"}'
searchoptions = (
'{"sopt":["cn","eq","ne","lt","le","gt","ge","win"],"searchhidden":true}'
)
width = 140
class GridFieldInteger(GridField):
formatter = "integer"
extra = '"formatoptions":{"defaultValue": ""}'
searchoptions = (
'{sopt:["eq","ne","in","ni","lt","le","gt","ge"],"searchhidden":true}'
)
width = 70
searchrules = '"integer":true'
class GridFieldNumber(GridField):
formatter = "number"
extra = '"formatoptions":{"defaultValue":"","decimalPlaces":"auto"}'
searchoptions = (
'{sopt:["eq","ne","in","ni","lt","le","gt","ge"],"searchhidden":true}'
)
width = 70
searchrules = '"number":true'
class GridFieldBool(GridField):
extra = '"formatoptions":{"disabled":false}, "edittype":"checkbox", "editoptions":{"value":"True:False"}'
width = 60
class GridFieldLastModified(GridField):
formatter = "date"
extra = '"formatoptions":{"srcformat":"Y-m-d H:i:s","newformat":"Y-m-d H:i:s"}'
searchoptions = '{"sopt":["cn","em","nm","in","ni","eq","bw","ew","bn","nc","en","win"],"searchhidden":true}'
title = _("last modified")
editable = False
width = 140
class GridFieldJSON(GridField):
width = 200
align = "left"
searchoptions = '{"sopt":["cn","nc"],"searchhidden":true}'
class GridFieldLocalDateTime(GridFieldDateTime):
pass
class GridFieldText(GridField):
width = 200
align = "left"
searchoptions = '{"sopt":["cn","nc","eq","ne","lt","le","gt","ge","bw","bn","in","ni","ew","en"],"searchhidden":true}'
class GridFieldChoice(GridField):
width = 100
align = "center"
def __init__(self, name, **kwargs):
super().__init__(name, **kwargs)
e = ['"formatter":"select", "edittype":"select", "editoptions":{"value":"']
first = True
self.choices = kwargs.get("choices", [])
for i in self.choices:
if first:
first = False
e.append("%s:" % i[0])
else:
e.append(";%s:" % i[0])
e.append(i[1])
e.append('"}')
self.extra = format_lazy("{}" * len(e), *e)
def validateValues(self, data):
result = []
for f in data.split(","):
for c in self.choices:
if f.lower() in (c[0].lower(), force_str(c[1]).lower()):
result.append(c[0])
return ",".join(result)
class GridFieldBoolNullable(GridFieldChoice):
width = 60
def __init__(self, name, **kwargs):
kwargs["choices"] = (("", ""), ("False", _("No")), ("True", _("Yes")))
super().__init__(name, **kwargs)
def getCurrency():
try:
cur = Parameter.getValue("currency").split(",")
if len(cur) < 2:
return ("", " %s" % escape(cur[0]))
else:
return ("%s " % escape(cur[0]), " %s" % escape(cur[1]))
except Exception:
return ("", " $")
class GridFieldCurrency(GridField):
formatter = "currency"
searchoptions = (
'{sopt:["eq","ne","in","ni","lt","le","gt","ge"],"searchhidden":true}'
)
def extra(self):
cur = getCurrency()
return '"formatoptions":%s' % json.dumps(
{"prefix": cur[0], "suffix": cur[1], "defaultvalue": ""}
)
width = 80
class GridFieldDuration(GridField):
formatter = "duration"
width = 80
searchoptions = (
'{sopt:["eq","ne","in","ni","lt","le","gt","ge"],"searchhidden":true}'
)
class EncodedCSVReader:
"""
A CSV reader which will iterate over lines in the CSV data buffer.
The reader will scan the BOM header in the data to detect the right encoding.
"""
def __init__(self, datafile, **kwds):
# Read the file into memory
# TODO Huge file uploads can overwhelm your system!
data = datafile.read()
# Detect the encoding of the data by scanning the BOM.
# Skip the BOM header if it is found.
if data.startswith(codecs.BOM_UTF32_BE):
self.reader = StringIO(data.decode("utf_32_be"))
self.reader.read(1)
elif data.startswith(codecs.BOM_UTF32_LE):
self.reader = StringIO(data.decode("utf_32_le"))
self.reader.read(1)
elif data.startswith(codecs.BOM_UTF16_BE):
self.reader = StringIO(data.decode("utf_16_be"))
self.reader.read(1)
elif data.startswith(codecs.BOM_UTF16_LE):
self.reader = StringIO(data.decode("utf_16_le"))
self.reader.read(1)
elif data.startswith(codecs.BOM_UTF8):
self.reader = StringIO(data.decode("utf_8"))
self.reader.read(1)
else:
# No BOM header found. We assume the data is encoded in the default CSV character set.
self.reader = StringIO(data.decode(settings.CSV_CHARSET))
self.csvreader = csv.reader(self.reader, **kwds)
def __next__(self):
return next(self.csvreader)
def __iter__(self):
return self
class GridReport(View):
"""
The base class for all jqgrid views.
The parameter values defined here are used as defaults for all reports, but
can be overwritten.
"""
# Points to template to be used
template = "admin/base_site_grid.html"
# The title of the report. Used for the window title
title = ""
# A optional text shown after the title in the content.
# It is however not added in the page title or the breadcrumb name
post_title = ""
# Link to the documentation
help_url = None
# The resultset that returns a list of entities that are to be
# included in the report.
# This query is used to return the number of records.
# It is also used to generate the actual results, in case no method
# "query" is provided on the class.
basequeryset = None
# Specifies which column is used for an initial ordering
default_sort = (0, "asc")
# A model class from which we can inherit information.
model = None
# Allow editing in this report or not
editable = True
# Allow filtering of the results or not
filterable = True
# Include time bucket support in the report
hasTimeBuckets = False
# Allow to exclude time buckets in the past
showOnlyFutureTimeBuckets = False
# Default logic: if there is an argument to the report, we always show table + graph
# New logic: if there is an argument, we can still choose whether or not to use table and/or graph
# Not very clean, but doing otherwise is backward incompatible and needs changing quite some templates :-(
new_arg_logic = False
# Allow this report to automatically restore the previous filter
# (unless a filter is already applied explicitly in the URL of course)
autofilter = True
# Specify a minimum level for the time buckets available in the report.
# Higher values (ie more granular) buckets can then not be selected.
maxBucketLevel = None
minBucketLevel = None
# Show a select box in front to allow selection of records
multiselect = True
# Control the height of the grid. By default the full browser window is used.
height = None
# Number of columns frozen in the report
frozenColumns = 0
# A list with required user permissions to view the report
permissions = ()
# Defines the difference between height of the grid and its boundaries
heightmargin = 75
# Define a list of actions
actions = None
_attributes_added = False
@classmethod
def getKey(cls, request, *args, **kwargs):
return "%s.%s" % (cls.__module__, cls.__name__)
@classmethod
def _localize(cls, value, decimal_separator):
"""
Localize numbers.
Dates are always represented as YYYY-MM-DD hh:mm:ss since this is
a format that is understood uniformly across different regions in the
world.
"""
if callable(value):
value = value()
if isinstance(value, numericTypes):
return (
decimal_separator == "," and str(value).replace(".", ",") or str(value)
)
elif isinstance(value, timedelta):
return _parseSeconds(value)
elif isinstance(value, (list, tuple)):
return "|".join([str(cls._localize(i, decimal_separator)) for i in value])
else:
return value
@staticmethod
def getBOM(encoding):
try:
# Get the official name of the encoding (since encodings can have many alias names)
name = codecs.lookup(encoding).name
except Exception:
return "" # Unknown encoding, without BOM header
if name == "utf-32-be":
return codecs.BOM_UTF32_BE
elif name == "utf-32-le":
return codecs.BOM_UTF32_LE
elif name == "utf-16-be":
return codecs.BOM_UTF16_BE
elif name == "utf-16-le":
return codecs.BOM_UTF16_LE
elif name == "utf-8":
return codecs.BOM_UTF8
else:
return ""
@classmethod
def getAppLabel(cls):
"""
Return the name of the Django application which defines this report.
"""
if hasattr(cls, "app_label"):
return cls.app_label
s = cls.__module__.split(".")
for i in range(len(s), 0, -1):
x = ".".join(s[0:i])
if x in settings.INSTALLED_APPS:
cls.app_label = s[i - 1]
return cls.app_label
raise Exception("Can't identify app of reportclass %s" % cls)
# Extra variables added to the report template
@classmethod
def extra_context(cls, request, *args, **kwargs):
return {}
@staticmethod
def _getJSONValue(data, field=None, request=None):
if isinstance(data, str) or isinstance(data, (list, tuple)):
return json.dumps(data)
elif isinstance(data, timedelta):
return data.total_seconds()
elif data is None:
return '""'
elif (
isinstance(data, datetime)
and isinstance(field, (GridFieldLastModified, GridFieldLocalDateTime))
and request
):
if not hasattr(request, "tzoffset"):
request.tzoffset = GridReport.getTimezoneOffset(request)
return '"%s"' % (data + request.tzoffset)
else:
return '"%s"' % data
@classmethod
def _getCSVValue(cls, data, field=None, request=None, decimal_separator=""):
if data is None:
return ""
else:
if (
isinstance(data, datetime)
and isinstance(field, (GridFieldLastModified, GridFieldLocalDateTime))
and request
):
if not hasattr(request, "tzoffset"):
request.tzoffset = GridReport.getTimezoneOffset(request)
data += request.tzoffset
return force_str(
cls._localize(data, decimal_separator),
encoding=settings.CSV_CHARSET,
errors="ignore",
)
@classmethod
def getBuckets(cls, request, *args, **kwargs):
"""
This function gets passed a name of a bucketization.
It returns a tuple with:
- the start date of the report horizon
- the end date of the reporting horizon
- a list of buckets.
The functions takes into consideration some special flags:
- showOnlyFutureTimeBuckets: filter to allow only future time buckets to be shown
- maxBucketLevel: respect the lowest supported level in the time bucket hierarchy
- minBucketLevel: respect the highest supported level in the time bucket hierarchy
"""
# Select the bucket size
if not cls.maxBucketLevel:
maxlvl = 999
elif callable(cls.maxBucketLevel):
maxlvl = cls.maxBucketLevel(request)
else:
maxlvl = cls.maxBucketLevel
if not cls.minBucketLevel:
minlvl = -999
elif callable(cls.minBucketLevel):
minlvl = cls.minBucketLevel(request)
else:
minlvl = cls.minBucketLevel
arg_buckets = request.GET.get("buckets", None)
try:
bucket = (
Bucket.objects.using(request.database)
.get(
name=arg_buckets or request.user.horizonbuckets,
level__lte=maxlvl,
level__gte=minlvl,
)
.name
)
except Exception:
try:
bucket = (
Bucket.objects.using(request.database)
.filter(level__lte=maxlvl, level__gte=minlvl)
.order_by("-level")[0]
.name
)
except Exception:
bucket = None
if not arg_buckets and not request.user.horizonbuckets and bucket:
request.user.horizonbuckets = bucket
request.user.save()
# Get the report horizon
current, start, end = getHorizon(
request, future_only=cls.showOnlyFutureTimeBuckets
)
# Filter based on the start and end date
request.current_date = str(current)
request.report_startdate = start
request.report_enddate = end
request.report_bucket = str(bucket)
if bucket:
res = BucketDetail.objects.using(request.database).filter(bucket=bucket)
if start:
res = res.filter(enddate__gt=start)
if end:
res = res.filter(startdate__lt=end)
request.report_bucketlist = res.values("name", "startdate", "enddate")
else:
request.report_bucketlist = []
@staticmethod
def getTimezoneOffset(request):
"""
Return the difference between the end user's UTC offset and the server's UTC offset
"""
return timedelta(
seconds=timezone - int(request.COOKIES.get("tzoffset", 0)) - daylight * 3600
)
@classmethod
def has_permission(cls, user):
for perm in cls.permissions:
if not user.has_perm("auth.%s" % perm[0]):
return False
if cls.model:
return user.has_perm(
"%s.view_%s" % (cls.model._meta.app_label, cls.model._meta.model_name)
)
return True
@method_decorator(staff_member_required)
@method_decorator(csrf_protect)
def dispatch(self, request, *args, **kwargs):
# Verify the user is authorized to view the report
if not self.has_permission(request.user):
return HttpResponseForbidden("<h1>%s</h1>" % _("Permission denied"))
# Unescape special characters in the arguments.
# All arguments are encoded with escaping function used on the django admin.
args_unquoted = [unquote(i) for i in args]
# Add attributes if not done already
if hasattr(self.__class__, "initialize"):
self.__class__.initialize(request)
if not self._attributes_added and self.model:
self.__class__._attributes_added = True
for f in getAttributeFields(self.model):
self.__class__.rows += (f,)
# Set row and cross attributes on the request
if hasattr(self, "rows"):
if callable(self.rows):
request.rows = self.rows(request, *args, **kwargs)
else:
request.rows = self.rows
if hasattr(self, "crosses"):
if callable(self.crosses):
request.crosses = self.crosses(request, *args, **kwargs)
else:
request.crosses = self.crosses
# Dispatch to the correct method
if request.method == "GET":
return self.get(request, *args_unquoted, **kwargs)
elif request.method == "POST":
return self.post(request, *args_unquoted, **kwargs)
else:
return HttpResponseNotAllowed(["get", "post"])
@classmethod
def _validate_rows(cls, request, prefs):
if not prefs:
return [
(
i,
request.rows[i].hidden or request.rows[i].initially_hidden,
request.rows[i].width,
)
for i in range(len(request.rows))
]
else:
# Validate the preferences to 1) map from name to index, 2) assure all rows
# are included, 3) ignore non-existing fields
defaultrows = {request.rows[i].name: i for i in range(len(request.rows))}
rows = []
for r in prefs:
try:
idx = int(r[0])
if idx < len(request.rows):
defaultrows.pop(request.rows[idx].name, None)
rows.append(r)
except (ValueError, IndexError):
if r[0] in defaultrows:
rows.append((defaultrows[r[0]], r[1], r[2]))
defaultrows.pop(r[0], None)
for r, idx in defaultrows.items():
rows.append(
(
idx,
request.rows[idx].hidden or request.rows[idx].initially_hidden,
request.rows[idx].width,
)
)
return rows
@classmethod
def _render_colmodel(
cls, request, is_popup=False, prefs=None, mode="graph", *args, **kwargs
):
if not prefs:
frozencolumns = cls.frozenColumns
rows = [
(i, request.rows[i].initially_hidden, request.rows[i].width)
for i in range(len(request.rows))
]
else:
frozencolumns = prefs.get("frozen", cls.frozenColumns)
rows = cls._validate_rows(request, prefs.get("rows"))
result = []
if is_popup:
result.append(
'{"name":"select","label":gettext("Select"),"width":75,"align":"center","formatter":"selectbutton","sortable":false,"search":false}'
)
count = -1
for (index, hidden, width) in rows:
count += 1
try:
result.append(
'{%s,"width":%s,"counter":%d%s%s%s}'
% (
request.rows[index],
width,
index,
count < frozencolumns and ',"frozen":true' or "",
is_popup and ',"popup":true' or "",
hidden
and not request.rows[index].hidden
and ',"hidden":true'
or "",
)
)
except IndexError:
logger.warning(
"Invalid preference value for %s: %s"
% (cls.getKey(request, *args, **kwargs), prefs)
)
return ",\n".join(result)
@classmethod
def _generate_spreadsheet_data(
cls, request, scenario_list, output, *args, **kwargs
):
# Create a workbook
wb = Workbook(write_only=True)
if callable(cls.title):
title = cls.title(request, *args, **kwargs)
else:
title = cls.model._meta.verbose_name_plural if cls.model else cls.title
# Excel can't have longer worksheet names
ws = wb.create_sheet(title=force_str(title)[:31])
# Create a named style for the header row
headerstyle = NamedStyle(name="headerstyle")
headerstyle.fill = PatternFill(fill_type="solid", fgColor="70c4f4")
wb.add_named_style(headerstyle)
readlonlyheaderstyle = NamedStyle(name="readlonlyheaderstyle")
readlonlyheaderstyle.fill = PatternFill(fill_type="solid", fgColor="d0ebfb")
wb.add_named_style(readlonlyheaderstyle)
# Choose fields to export and write the title row
if not hasattr(request, "prefs"):
request.prefs = request.user.getPreference(
cls.getKey(request, *args, **kwargs), database=request.database
)
if request.prefs and request.prefs.get("rows", None):
# Customized settings
fields = [
request.rows[f[0]]
for f in cls._validate_rows(request, request.prefs["rows"])
if not f[1] and not request.rows[f[0]].hidden
]
else:
# Default settings
fields = [
i
for i in request.rows
if i.field_name and not i.hidden and not i.initially_hidden
]
# Write a formatted header row
header = []
comment = None
for f in fields:
cell = WriteOnlyCell(ws, value=force_str(f.title).title())
if f.editable or f.key:
cell.style = "headerstyle"
fname = getattr(f, "field_name", f.name)
if not f.key and f.formatter == "detail" and fname.endswith("__name"):
cell.comment = CellComment(
force_str(
_("Values in this field must exist in the %s table")
% force_str(_(fname[:-6]))
),
"Author",
)
elif isinstance(f, GridFieldChoice):
cell.comment = CellComment(
force_str(
_("Accepted values are: %s")
% ", ".join([c[0] for c in f.choices])
),
"Author",
)
else:
cell.style = "readlonlyheaderstyle"
if not comment:
comment = CellComment(
force_str(_("Read only")), "Author", height=20, width=80
)
cell.comment = comment
header.append(cell)
if len(scenario_list) > 1:
cell = WriteOnlyCell(ws, value=force_str(_("scenario")).title())
cell.style = "readlonlyheaderstyle"
header.insert(0, cell)
ws.append(header)
# Add an auto-filter to the table
ws.auto_filter.ref = "A1:%s1048576" % get_column_letter(len(header))
original_database = request.database
try:
for scenario in scenario_list:
request.database = scenario
# Loop over all records
for row in cls.data_query(request, *args, fields=fields, **kwargs):
if hasattr(row, "__getitem__"):
r = [
_getCellValue(row[f.field_name], field=f, request=request)
for f in fields
]
else:
r = [
_getCellValue(
getattr(row, f.field_name), field=f, request=request
)
for f in fields
]
if len(scenario_list) > 1:
r.insert(0, scenario)
ws.append(r)
finally:
request.database = original_database
# Write the spreadsheet
wb.save(output)
@classmethod
def _generate_csv_data(cls, request, scenario_list, *args, **kwargs):
sf = StringIO()
decimal_separator = get_format("DECIMAL_SEPARATOR", request.LANGUAGE_CODE, True)
if decimal_separator == ",":
writer = csv.writer(sf, quoting=csv.QUOTE_NONNUMERIC, delimiter=";")
else:
writer = csv.writer(sf, quoting=csv.QUOTE_NONNUMERIC, delimiter=",")
if translation.get_language() != request.LANGUAGE_CODE:
translation.activate(request.LANGUAGE_CODE)
# Write a Unicode Byte Order Mark header, aka BOM (Excel needs it to open UTF-8 file properly)
yield cls.getBOM(settings.CSV_CHARSET)
# Choose fields to export
if not hasattr(request, "prefs"):
request.prefs = request.user.getPreference(
cls.getKey(request, *args, **kwargs), database=request.database
)
if request.prefs and request.prefs.get("rows", None):
# Customized settings
custrows = cls._validate_rows(request, request.prefs["rows"])
r = [
force_str(
request.rows[f[0]].title,
encoding=settings.CSV_CHARSET,
errors="ignore",
).title()
for f in custrows
if not f[1] and not request.rows[f[0]].hidden
]
if len(scenario_list) > 1:
r.insert(0, _("scenario"))
writer.writerow(r)
fields = [
request.rows[f[0]]
for f in custrows
if not f[1] and not request.rows[f[0]].hidden
]
else:
# Default settings
r = [
force_str(
f.title, encoding=settings.CSV_CHARSET, errors="ignore"
).title()
for f in request.rows
if f.title and not f.hidden and not f.initially_hidden
]
if len(scenario_list) > 1:
r.insert(0, _("scenario"))
writer.writerow(r)
fields = [
i
for i in request.rows
if i.field_name and not i.hidden and not i.initially_hidden
]
# Write a header row
yield sf.getvalue()
# Write the report content
original_database = request.database
try:
for scenario in scenario_list:
request.database = scenario
for row in cls.data_query(request, *args, fields=fields, **kwargs):
# Clear the return string buffer
sf.seek(0)
sf.truncate(0)
# Build the return value, encoding all output
if hasattr(row, "__getitem__"):
r = [
cls._getCSVValue(
row[f.field_name],
field=f,
request=request,
decimal_separator=decimal_separator,
)
for f in fields
]
else:
r = [
cls._getCSVValue(
getattr(row, f.field_name),
field=f,
request=request,
decimal_separator=decimal_separator,
)
for f in fields
]
if len(scenario_list) > 1:
r.insert(0, scenario)
writer.writerow(r)
# Return string
yield sf.getvalue()
finally:
request.database = original_database
@classmethod
def getSortName(cls, request):
"""
Build a jqgrid sort configuration pair sidx and sord:
For instance:
("fieldname1 asc, fieldname2", "desc")
"""
if request.GET.get("sidx", ""):
# 1) Sorting order specified on the request
return (request.GET["sidx"], request.GET.get("sord", "asc"))
elif request.prefs:
# 2) Sorting order from the preferences
sortname = (
request.prefs.get("sidx", None),
request.prefs.get("sord", "asc"),
)
if sortname[0] and sortname[1]:
return sortname
# 3) Default sort order
if not cls.default_sort:
return ("", "")
elif len(cls.default_sort) >= 6:
return (
"%s %s, %s %s, %s"
% (
request.rows[cls.default_sort[0]].name,
cls.default_sort[1],
request.rows[cls.default_sort[2]].name,
cls.default_sort[3],
request.rows[cls.default_sort[4]].name,
),
cls.default_sort[5],
)
elif len(cls.default_sort) >= 4:
return (
"%s %s, %s"
% (
request.rows[cls.default_sort[0]].name,
cls.default_sort[1],
request.rows[cls.default_sort[2]].name,
),
cls.default_sort[3],
)
elif len(cls.default_sort) >= 2:
return (request.rows[cls.default_sort[0]].name, cls.default_sort[1])
@classmethod
def _apply_sort(cls, request, query):
"""
Applies a sort to the query.
"""
sortname = None
if request.GET.get("sidx", ""):
# 1) Sorting order specified on the request
sortname = "%s %s" % (request.GET["sidx"], request.GET.get("sord", "asc"))
elif request.prefs:
# 2) Sorting order from the preferences
sortname = "%s %s" % (
request.prefs.get("sidx", ""),
request.GET.get("sord", "asc"),
)
if not sortname or sortname == " asc":
# 3) Default sort order
if not cls.default_sort:
return query
elif len(cls.default_sort) > 6:
return query.order_by(
request.rows[cls.default_sort[0]].field_name
if cls.default_sort[1] == "asc"
else ("-%s" % request.rows[cls.default_sort[0]].field_name),
request.rows[cls.default_sort[2]].field_name
if cls.default_sort[3] == "asc"
else ("-%s" % request.rows[cls.default_sort[2]].field_name),
request.rows[cls.default_sort[4]].field_name
if cls.default_sort[5] == "asc"
else ("-%s" % request.rows[cls.default_sort[4]].field_name),
)
elif len(cls.default_sort) >= 4:
return query.order_by(
request.rows[cls.default_sort[0]].field_name
if cls.default_sort[1] == "asc"
else ("-%s" % request.rows[cls.default_sort[0]].field_name),
request.rows[cls.default_sort[2]].field_name
if cls.default_sort[3] == "asc"
else ("-%s" % request.rows[cls.default_sort[2]].field_name),
)
elif len(cls.default_sort) >= 2:
return query.order_by(
request.rows[cls.default_sort[0]].field_name
if cls.default_sort[1] == "asc"
else ("-%s" % request.rows[cls.default_sort[0]].field_name)
)
else:
return query
else:
# Validate the field does exist.
# We only validate the first level field, and not the fields
# on related models.
sortargs = []
for s in sortname.split(","):
stripped = s.strip()
if not stripped:
continue
sortfield, direction = stripped.split(" ", 1)
try:
query.order_by(sortfield).query.__str__()
if direction.strip() != "desc":
sortargs.append(sortfield)
else:
sortargs.append("-%s" % sortfield)
except Exception:
for r in request.rows:
if r.name == sortfield:
try:
query.order_by(r.field_name).query.__str__()
if direction.strip() != "desc":
sortargs.append(r.field_name)
else:
sortargs.append("-%s" % r.field_name)
except Exception:
# Can't sort on this field
pass
break
if sortargs:
return query.order_by(*sortargs)
else:
return query
@classmethod
def _apply_sort_index(cls, request):
"""
Build an SQL fragment to sort on: Eg "1 asc, 2 desc"
"""
sortname = None
if request.GET.get("sidx", ""):
# 1) Sorting order specified on the request
sortname = "%s %s" % (request.GET["sidx"], request.GET.get("sord", "asc"))
elif request.prefs:
# 2) Sorting order from the preferences
sortname = "%s %s" % (
request.prefs.get("sidx", ""),
request.prefs.get("sord", "asc"),
)
if not sortname or sortname == " asc":
# 3) Default sort order
if not cls.default_sort:
return "1 asc"
elif len(cls.default_sort) > 6:
return "%s %s, %s %s, %s %s" % (
cls.default_sort[0] + 1,
cls.default_sort[1],
cls.default_sort[2] + 1,
cls.default_sort[3],
cls.default_sort[4] + 1,
cls.default_sort[5],
)
elif len(cls.default_sort) >= 4:
return "%s %s, %s %s" % (
cls.default_sort[0] + 1,
cls.default_sort[1],
cls.default_sort[2] + 1,
cls.default_sort[3],
)
elif len(cls.default_sort) >= 2:
return "%s %s" % (cls.default_sort[0] + 1, cls.default_sort[1])
else:
return "1 asc"
else:
# Validate the field does exist.
# We only validate the first level field, and not the fields
# on related models.
sortargs = []
for s in sortname.split(","):
sortfield, dir = s.strip().split(" ", 1)
idx = 1
has_one = False
for i in request.rows:
if i.name == sortfield:
sortargs.append(
"%s %s" % (idx, "desc" if dir == "desc" else "asc")
)
if idx == 1:
has_one = True
idx += 1
if sortargs:
if not has_one:
sortargs.append("1 asc")
return ", ".join(sortargs)
else:
return "1 asc"
@classmethod
def defaultSortString(cls, request):
if not cls.default_sort:
return " asc"
elif len(cls.default_sort) >= 6:
return "%s %s, %s %s, %s %s" % (
request.rows[cls.default_sort[0]].name,
cls.default_sort[1],
request.rows[cls.default_sort[2]].name,
cls.default_sort[3],
request.rows[cls.default_sort[4]].name,
cls.default_sort[5],
)
elif len(cls.default_sort) >= 4:
return (
"%s %s, %s %s"
% (
request.rows[cls.default_sort[0]].name,
cls.default_sort[1],
request.rows[cls.default_sort[2]].name,
cls.default_sort[3],
),
)
elif len(cls.default_sort) >= 2:
return "%s %s" % (
request.rows[cls.default_sort[0]].name,
cls.default_sort[1],
)
else:
return " asc"
@classmethod
def get_sort(cls, request):
try:
if "sidx" in request.GET:
# Special case when views have grouping.
# The group-by column is then added automatically.
column = request.GET["sidx"]
comma = column.find(",")
if comma > 0:
column = column[comma + 2 :]
sort = 1
ok = False
for r in request.rows:
if r.name == column:
ok = True
break
sort += 1
if not ok:
sort = cls.default_sort[0]
else:
sort = cls.default_sort[0]
except Exception:
sort = cls.default_sort[0]
if request.GET.get("sord", None) == "desc" or cls.default_sort[1] == "desc":
return "%s desc" % sort
else:
return "%s asc" % sort
@classmethod
def data_query(cls, request, *args, fields=None, page=None, **kwargs):
if not fields:
raise Exception("No fields gives")
if not hasattr(request, "query"):
if callable(cls.basequeryset):
request.query = cls.filter_items(
request, cls.basequeryset(request, *args, **kwargs), False
).using(request.database)
else:
request.query = cls.filter_items(request, cls.basequeryset).using(
request.database
)
query = cls._apply_sort(request, request.query)
if page:
cnt = (page - 1) * request.pagesize + 1
if hasattr(cls, "query"):
return cls.query(request, query[cnt - 1 : cnt + request.pagesize])
else:
return query[cnt - 1 : cnt + request.pagesize].values(*fields)
else:
if hasattr(cls, "query"):
return cls.query(request, query)
else:
fields = [i.field_name for i in request.rows if i.field_name]
return query.values(*fields)
@classmethod
def count_query(cls, request, *args, **kwargs):
if not hasattr(request, "query"):
if callable(cls.basequeryset):
request.query = cls.filter_items(
request, cls.basequeryset(request, *args, **kwargs), False
).using(request.database)
else:
request.query = cls.filter_items(request, cls.basequeryset).using(
request.database
)
with connections[request.database].cursor() as cursor:
tmp = request.query.query.get_compiler(request.database).as_sql(
with_col_aliases=False
)
cursor.execute("select count(*) from (" + tmp[0] + ") t_subquery", tmp[1])
return cursor.fetchone()[0]
@classmethod
def _generate_json_data(cls, request, *args, **kwargs):
request.prefs = request.user.getPreference(
cls.getKey(request, *args, **kwargs), database=request.database
)
recs = cls.count_query(request, *args, **kwargs)
page = "page" in request.GET and int(request.GET["page"]) or 1
total_pages = math.ceil(float(recs) / request.pagesize)
if page > total_pages:
page = total_pages
if page < 1:
page = 1
yield '{"total":%d,\n' % total_pages
yield '"page":%d,\n' % page
yield '"records":%d,\n' % recs
if hasattr(cls, "extraJSON"):
# Hook to insert extra fields to the json
tmp = cls.extraJSON(request)
if tmp:
yield tmp
yield '"rows":[\n'
# GridReport
first = True
fields = [i.field_name for i in request.rows if i.field_name]
for i in cls.data_query(request, *args, fields=fields, page=page, **kwargs):
if first:
r = ["{"]
first = False
else:
r = [",\n{"]
first2 = True
for f in request.rows:
if not f.name:
continue
s = cls._getJSONValue(i[f.field_name], field=f, request=request)
if first2:
r.append('"%s":%s' % (f.name, s))
first2 = False
elif i[f.field_name] is not None:
r.append(', "%s":%s' % (f.name, s))
r.append("}")
yield "".join(r)
yield "\n]}\n"
@classmethod
def post(cls, request, *args, **kwargs):
if len(request.FILES) > 0:
# Note: the detection of the type of uploaded file depends on the
# browser setting the right mime type of the file.
csvcount = 0
xlscount = 0
for filename, file in request.FILES.items():
if (
file.content_type
== "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet"
):
xlscount += 1
elif filename.endswith(".xls"):
return HttpResponseNotFound(
"""
Files in the old .XLS excel format can't be read.<br>
Please convert them to the new .XLSX format.
"""
)
else:
csvcount += 1
if csvcount == 0:
# Uploading a spreadsheet file
return StreamingHttpResponse(
content_type="text/plain; charset=%s" % settings.DEFAULT_CHARSET,
streaming_content=cls.parseSpreadsheetUpload(request),
)
elif xlscount == 0:
# Uploading a CSV file
return StreamingHttpResponse(
content_type="text/plain; charset=%s" % settings.DEFAULT_CHARSET,
streaming_content=cls.parseCSVupload(request),
)
else: # mixed files
return HttpResponseNotFound("Files must have the same type.")
else:
# Saving after inline edits
return cls.parseJSONupload(request)
@classmethod
def _validate_crosses(cls, request, prefs):
cross_idx = []
for i in prefs:
try:
num = int(i)
if num < len(request.crosses) and request.crosses[num][1].get(
"visible", True
):
cross_idx.append(num)
except ValueError:
for j in range(len(request.crosses)):
if request.crosses[j][0] == i and request.crosses[j][1].get(
"visible", True
):
cross_idx.append(j)
return cross_idx
@classmethod
def getScenarios(cls, request, *args, **kwargs):
scenario_permissions = []
if len(request.user.scenarios) > 1:
original_database = request.database
for scenario in request.user.scenarios:
try:
# request.database needs to be changed for has_perm to work properly
request.database = scenario.name
user = User.objects.using(scenario.name).get(
username=request.user.username
)
if cls.has_permission(user):
scenario_permissions.append(
[
scenario.name,
scenario.description
if scenario.description
else scenario.name,
1 if request.database == original_database else 0,
]
)
except Exception:
pass
# reverting to original request database as permissions are checked
request.database = original_database
return scenario_permissions
@classmethod
def get(cls, request, *args, **kwargs):
# Pick up the list of time buckets
if cls.hasTimeBuckets:
cls.getBuckets(request, args, kwargs)
bucketnames = Bucket.objects.using(request.database)
if cls.maxBucketLevel:
if callable(cls.maxBucketLevel):
maxlvl = cls.maxBucketLevel(request)
bucketnames = bucketnames.filter(level__lte=maxlvl)
else:
bucketnames = bucketnames.filter(level__lte=cls.maxBucketLevel)
if cls.minBucketLevel:
if callable(cls.minBucketLevel):
minlvl = cls.minBucketLevel(request)
bucketnames = bucketnames.filter(level__gte=minlvl)
else:
bucketnames = bucketnames.filter(level__gte=cls.minBucketLevel)
bucketnames = bucketnames.order_by("-level").values_list("name", flat=True)
else:
bucketnames = None
fmt = request.GET.get("format", None)
reportkey = cls.getKey(request, *args, **kwargs)
request.prefs = request.user.getPreference(reportkey, database=request.database)
if request.prefs:
kwargs["preferences"] = request.prefs
# scenario_permissions is used to display multiple scenarios in the export dialog
if len(request.user.scenarios) > 1:
scenario_permissions = cls.getScenarios(request, *args, **kwargs)
else:
scenario_permissions = []
if not fmt:
# Return HTML page
if not hasattr(request, "crosses"):
cross_idx = None
cross_list = None
elif request.prefs and "crosses" in request.prefs:
cross_idx = str(
cls._validate_crosses(request, request.prefs["crosses"])
)
cross_list = cls._render_cross(request)
else:
cross_idx = str(
[
i
for i in range(len(request.crosses))
if request.crosses[i][1].get("visible", True)
and not request.crosses[i][1].get("initially_hidden", False)
]
)
cross_list = cls._render_cross(request)
if args and not cls.new_arg_logic:
mode = "table"
else:
mode = request.GET.get("mode", None)
if mode:
# Store the mode passed in the URL on the session to remember for the next report
request.session["mode"] = mode
else:
# Pick up the mode from the session
mode = request.session.get("mode", "graph")
is_popup = "_popup" in request.GET
sidx, sord = cls.getSortName(request)
autofilter = "noautofilter" not in request.GET and cls.autofilter
filters = cls.getQueryString(request)
if not filters and request.prefs and autofilter:
# Inherit the filter settings from the preferences
filters = request.prefs.get("filter", None)
if request.prefs and autofilter:
page = request.prefs.get("page", 1)
else:
page = 1
context = {
"reportclass": cls,
"title": _("%(title)s for %(entity)s")
% {"title": force_str(cls.title), "entity": force_str(args[0])}
if args and args[0]
else cls.title,
"post_title": cls.post_title,
"preferences": request.prefs,
"reportkey": reportkey,
"colmodel": cls._render_colmodel(
request, is_popup, request.prefs, mode, *args, **kwargs
),
"cross_idx": cross_idx,
"cross_list": cross_list,
"object_id": args and quote(args[0]) or None,
"page": page,
"sord": sord,
"sidx": sidx,
"default_sort": cls.defaultSortString(request),
"is_popup": is_popup,
"filters": json.loads(filters) if filters else None,
"args": args,
"bucketnames": bucketnames,
"model": cls.model,
"scenario_permissions": scenario_permissions,
"hasaddperm": cls.editable
and cls.model
and request.user.has_perm(
"%s.%s"
% (
cls.model._meta.app_label,
get_permission_codename("add", cls.model._meta),
)
),
"hasdeleteperm": cls.editable
and cls.model
and request.user.has_perm(
"%s.%s"
% (
cls.model._meta.app_label,
get_permission_codename("delete", cls.model._meta),
)
),
"haschangeperm": cls.editable
and cls.model
and request.user.has_perm(
"%s.%s"
% (
cls.model._meta.app_label,
get_permission_codename("change", cls.model._meta),
)
),
"active_tab": "plan",
"mode": mode,
"actions": cls.actions,
}
for k, v in cls.extra_context(request, *args, **kwargs).items():
context[k] = v
return render(request, cls.template, context)
elif fmt == "json":
# Return JSON data to fill the grid.
response = StreamingHttpResponse(
content_type="application/json; charset=%s" % settings.DEFAULT_CHARSET,
streaming_content=cls._generate_json_data(request, *args, **kwargs),
)
response["Cache-Control"] = "no-cache, no-store"
return response
elif fmt in ("spreadsheetlist", "spreadsheettable", "spreadsheet"):
scenarios = request.GET.get("scenarios", None)
scenario_list = scenarios.split(",") if scenarios else [request.database]
# Make sure scenarios are in the scenario_permissions list
if scenarios:
accepted_scenarios = [t[0] for t in scenario_permissions]
scenario_list = [x for x in scenario_list if x in accepted_scenarios]
# Return an excel spreadsheet
output = BytesIO()
cls._generate_spreadsheet_data(
request, scenario_list, output, *args, **kwargs
)
response = HttpResponse(
content_type="application/vnd.openxmlformats-officedocument.spreadsheetml.sheet",
content=output.getvalue(),
)
# Filename parameter is encoded as specified in rfc5987
if callable(cls.title):
title = cls.title(request, *args, **kwargs)
else:
title = cls.model._meta.verbose_name_plural if cls.model else cls.title
response["Content-Disposition"] = (
"attachment; filename*=utf-8''%s.xlsx"
% urllib.parse.quote(force_str(title))
)
response["Cache-Control"] = "no-cache, no-store"
return response
elif fmt in ("csvlist", "csvtable", "csv"):
scenarios = request.GET.get("scenarios", None)
scenario_list = scenarios.split(",") if scenarios else [request.database]
# Make sure scenarios are in the scenario_permissions list
if scenarios:
accepted_scenarios = [t[0] for t in scenario_permissions]
scenario_list = [x for x in scenario_list if x in accepted_scenarios]
# Return CSV data to export the data
response = StreamingHttpResponse(
content_type="text/csv; charset=%s" % settings.CSV_CHARSET,
streaming_content=cls._generate_csv_data(
request, scenario_list, *args, **kwargs
),
)
# Filename parameter is encoded as specified in rfc5987
if callable(cls.title):
title = cls.title(request, *args, **kwargs)
else:
title = cls.model._meta.verbose_name_plural if cls.model else cls.title
response["Content-Disposition"] = (
"attachment; filename*=utf-8''%s.csv"
% urllib.parse.quote(force_str(title))
)
response["Cache-Control"] = "no-cache, no-store"
return response
else:
raise Http404("Unknown format type")
@classmethod
def parseJSONupload(cls, request):
# Check permissions
if not cls.model or not cls.editable:
return HttpResponseForbidden(_("Permission denied"))
permname = get_permission_codename("change", cls.model._meta)
if not request.user.has_perm("%s.%s" % (cls.model._meta.app_label, permname)):
return HttpResponseForbidden(_("Permission denied"))
# Loop over the data records
resp = HttpResponse()
ok = True
with transaction.atomic(using=request.database, savepoint=False):
content_type_id = ContentType.objects.get_for_model(cls.model).pk
for rec in json.JSONDecoder().decode(
request.read().decode(request.encoding or settings.DEFAULT_CHARSET)
):
if "delete" in rec:
# Deleting records
for key in rec["delete"]:
sid = transaction.savepoint(using=request.database)
try:
obj = cls.model.objects.using(request.database).get(pk=key)
obj.delete()
LogEntry(
user_id=request.user.id,
content_type_id=content_type_id,
object_id=force_str(key),
object_repr=force_str(key)[:200],
action_flag=DELETION,
).save(using=request.database)
transaction.savepoint_commit(sid)
except cls.model.DoesNotExist:
transaction.savepoint_rollback(sid)
ok = False
resp.write(escape(_("Can't find %s" % key)))
resp.write("<br>")
except Exception as e:
transaction.savepoint_rollback(sid)
ok = False
resp.write(escape(e))
resp.write("<br>")
elif "copy" in rec:
# Copying records
for key in rec["copy"]:
sid = transaction.savepoint(using=request.database)
try:
obj = cls.model.objects.using(request.database).get(pk=key)
if isinstance(cls.model._meta.pk, CharField):
# The primary key is a string
obj.pk = "Copy of %s" % key
elif isinstance(cls.model._meta.pk, AutoField):
# The primary key is an auto-generated number
obj.pk = None
else:
raise Exception(
_("Can't copy %s") % cls.model._meta.app_label
)
obj.save(using=request.database, force_insert=True)
LogEntry(
user_id=request.user.pk,
content_type_id=content_type_id,
object_id=obj.pk,
object_repr=force_str(obj),
action_flag=ADDITION,
change_message=_("Copied from %s.") % key,
).save(using=request.database)
transaction.savepoint_commit(sid)
except cls.model.DoesNotExist:
transaction.savepoint_rollback(sid)
ok = False
resp.write(escape(_("Can't find %s" % key)))
resp.write("<br>")
except Exception as e:
transaction.savepoint_rollback(sid)
ok = False
resp.write(escape(e))
resp.write("<br>")
else:
# Editing records
pk = rec["id"]
sid = transaction.savepoint(using=request.database)
try:
obj = cls.model.objects.using(request.database).get(
pk=rec["id"]
)
del rec["id"]
for i in rec:
if (
rec[i] == "\xa0"
): # Workaround for Jqgrid issue: date field can't be set to blank
rec[i] = None
if hasattr(cls.model, "getModelForm"):
UploadForm = cls.model.getModelForm(
tuple(rec.keys()), database=request.database
)
else:
UploadForm = modelform_factory(
cls.model,
fields=tuple(rec.keys()),
formfield_callback=lambda f: (
isinstance(f, RelatedField)
and f.formfield(using=request.database)
)
or f.formfield(),
)
form = UploadForm(rec, instance=obj)
if form.has_changed():
obj = form.save(commit=False)
obj.save(using=request.database)
LogEntry(
user_id=request.user.pk,
content_type_id=content_type_id,
object_id=obj.pk,
object_repr=force_str(obj),
action_flag=CHANGE,
change_message=_("Changed %s.")
% get_text_list(form.changed_data, _("and")),
).save(using=request.database)
transaction.savepoint_commit(sid)
except cls.model.DoesNotExist:
transaction.savepoint_rollback(sid)
ok = False
resp.write(escape(_("Can't find %s" % pk)))
resp.write("<br>")
except (ValidationError, ValueError):
transaction.savepoint_rollback(sid)
ok = False
for error in form.non_field_errors():
resp.write(escape("%s: %s" % (pk, error)))
resp.write("<br>")
for field in form:
for error in field.errors:
resp.write(
escape(
"%s %s: %s: %s"
% (obj.pk, field.name, rec[field.name], error)
)
)
resp.write("<br>")
except Exception as e:
transaction.savepoint_rollback(sid)
ok = False
resp.write(escape(e))
resp.write("<br>")
if ok:
resp.write("OK")
resp.status_code = ok and 200 or 500
return resp
@staticmethod
def dependent_models(m, found):
""" An auxilary method that constructs a set of all dependent models"""
for f in m._meta.get_fields():
if (
f.is_relation
and f.auto_created
and f.related_model != m
and f.related_model not in found
):
for sub in f.related_model.__subclasses__():
# if sub not in found:
found.update([sub])
found.update([f.related_model])
GridReport.dependent_models(f.related_model, found)
@staticmethod
def sort_models(models):
# Inject additional dependencies that are not reflected in database constraints
for m in models:
for e in getattr(m[1], "extra_dependencies", []):
for m2 in models:
if m2[1] == e:
m2[3].update([m[1]])
# Sort the list of models, based on dependencies between models
models.sort(key=lambda m: (m[1].__name__, m[0].upper()))
cnt = len(models)
ok = False
while not ok:
ok = True
for i in range(cnt):
j = i + 1
while j < cnt and ok:
if models[i][1] != models[j][1] and models[i][1] in models[j][3]:
i_base = models[i][1].__base__
if i_base == Model or i_base._meta.abstract:
i_base = None
j_base = models[j][1].__base__
if j_base == Model or j_base._meta.abstract:
j_base = None
if i_base == j_base and i_base and j_base:
j += 1
continue
if i_base == models[j][1] or j_base == models[i][1]:
j += 1
continue
models.append(models.pop(i))
while j < cnt:
if models[i][1] == models[j][1]:
models.append(models.pop(j))
j += 1
ok = False
break
elif (
models[i][1] == models[j][1]
and models[i][0].upper() > models[j][0].upper()
):
models[i], models[j] = models[j], models[i]
ok = False
j += 1
return models
@classmethod
def erase(cls, request):
# Build a list of dependencies
deps = set([cls.model])
# Special case for MO/PO/DO/DLVR that cannot be truncated
if cls.model.__name__ not in (
"PurchaseOrder",
"ManufacturingOrder",
"DistributionOrder",
"DeliveryOrder",
):
GridReport.dependent_models(cls.model, deps)
# Check the delete permissions for all related objects
for m in deps:
permname = get_permission_codename("delete", m._meta)
if not request.user.has_perm("%s.%s" % (m._meta.app_label, permname)):
return format_lazy(
"{}:{}", m._meta.verbose_name, _("Permission denied")
)
# Delete the data records
cursor = connections[request.database].cursor()
with transaction.atomic(using=request.database):
sql_list = []
containsOperationPlan = any(m.__name__ == "OperationPlan" for m in deps)
for m in deps:
if "getDeleteStatements" in dir(m) and not containsOperationPlan:
sql_list.extend(m.getDeleteStatements())
else:
sql_list = connections[request.database].ops.sql_flush(
no_style(), [m._meta.db_table for m in deps], []
)
for sql in sql_list:
cursor.execute(sql)
# Erase comments and history
content_ids = [ContentType.objects.get_for_model(m) for m in deps]
LogEntry.objects.filter(content_type__in=content_ids).delete()
Comment.objects.filter(content_type__in=content_ids).delete()
# Prepare message
for m in deps:
messages.add_message(
request,
messages.INFO,
_("Erasing data from %(model)s")
% {"model": force_str(m._meta.verbose_name)},
)
# Finished successfully
return None
@classmethod
def parseCSVupload(cls, request):
"""
This method reads CSV data from a string (in memory) and creates or updates
the database records.
The data must follow the following format:
- the first row contains a header, listing all field names
- a first character # marks a comment line
- empty rows are skipped
"""
# Check permissions
if not cls.model:
yield "<div>%s</div>" % _("Invalid upload request")
return
permname = get_permission_codename("add", cls.model._meta)
if not cls.editable or not request.user.has_perm(
"%s.%s" % (cls.model._meta.app_label, permname)
):
yield "<div>%s</div>" % _("Permission denied")
return
# Choose the right delimiter and language
delimiter = (
get_format("DECIMAL_SEPARATOR", request.LANGUAGE_CODE, True) == ","
and ";"
or ","
)
if translation.get_language() != request.LANGUAGE_CODE:
translation.activate(request.LANGUAGE_CODE)
# Handle the complete upload as a single database transaction
try:
with transaction.atomic(using=request.database):
# Erase all records and related tables
if "erase" in request.POST:
returnvalue = cls.erase(request)
if returnvalue:
yield format_lazy("<div>{}</div>", returnvalue)
return
yield (
'<div class="table-responsive">'
'<table class="table table-condensed" style="white-space: nowrap"><tbody>'
)
for filename, file in request.FILES.items():
numerrors = 0
numwarnings = 0
firsterror = True
yield '<tr style="text-align: center"><th colspan="5">%s</th></tr>' % filename
data = EncodedCSVReader(file, delimiter=delimiter)
for error in parseCSVdata(
cls.model,
data,
user=request.user,
database=request.database,
ping=True,
):
if error[0] == logging.DEBUG:
# Yield some result so we can detect disconnect clients and interrupt the upload
yield " "
continue
if firsterror and error[0] in (logging.ERROR, logging.WARNING):
yield '<tr><th class="sr-only">%s</th><th>%s</th><th>%s</th><th>%s</th><th>%s%s%s</th></tr>' % (
capfirst(_("worksheet")),
capfirst(_("row")),
capfirst(_("field")),
capfirst(_("value")),
capfirst(_("error")),
" / ",
capfirst(_("warning")),
)
firsterror = False
if error[0] == logging.ERROR:
yield '<tr><td class="sr-only">%s</td><td>%s</td><td>%s</td><td>%s</td><td>%s: %s</td></tr>' % (
cls.model._meta.verbose_name,
error[1] if error[1] else "",
error[2] if error[2] else "",
error[3] if error[3] else "",
capfirst(_("error")),
error[4],
)
numerrors += 1
elif error[1] == logging.WARNING:
yield '<tr><td class="sr-only">%s</td><td>%s</td><td>%s</td><td>%s</td><td>%s: %s</td></tr>' % (
cls.model._meta.verbose_name,
error[1] if error[1] else "",
error[2] if error[2] else "",
error[3] if error[3] else "",
capfirst(_("warning")),
error[4],
)
numwarnings += 1
else:
yield '<tr class=%s><td class="sr-only">%s</td><td>%s</td><td>%s</td><td>%s</td><td>%s</td></tr>' % (
"danger" if numerrors > 0 else "success",
cls.model._meta.verbose_name,
error[1] if error[1] else "",
error[2] if error[2] else "",
error[3] if error[3] else "",
error[4],
)
yield "</tbody></table></div>"
except GeneratorExit:
logging.warning("Connection Aborted")
except NameError:
pass
@classmethod
def parseSpreadsheetUpload(cls, request):
"""
This method reads a spreadsheet file (in memory) and creates or updates
the database records.
The data must follow the following format:
- only the first tab in the spreadsheet is read
- the first row contains a header, listing all field names
- a first character # marks a comment line
- empty rows are skipped
"""
# Check permissions
if not cls.model:
yield "<div>%s</div>" % _("Invalid upload request")
return
permname = get_permission_codename("add", cls.model._meta)
if not cls.editable or not request.user.has_perm(
"%s.%s" % (cls.model._meta.app_label, permname)
):
yield "<div>%s</div>" % _("Permission denied")
return
# Choose the right language
if translation.get_language() != request.LANGUAGE_CODE:
translation.activate(request.LANGUAGE_CODE)
# Handle the complete upload as a single database transaction
try:
with transaction.atomic(using=request.database):
# Erase all records and related tables
if "erase" in request.POST:
returnvalue = cls.erase(request)
if returnvalue:
yield '<br><samp style="padding-left: 15px;">%s</samp><br>' % returnvalue
raise StopIteration
# Header in output
yield (
'<div class="table-responsive">'
'<table class="table table-condensed" style="white-space: nowrap"><tbody>'
)
for filename, file in request.FILES.items():
numerrors = 0
numwarnings = 0
firsterror = True
yield '<tr style="text-align: center"><th colspan="5">%s</th></tr>' % filename
# Loop through the data records
wb = load_workbook(filename=file, read_only=True, data_only=True)
numsheets = len(wb.sheetnames)
for ws_name in wb.sheetnames:
rowprefix = "" if numsheets == 1 else "%s " % ws_name
ws = wb[ws_name]
for error in parseExcelWorksheet(
cls.model,
ws,
user=request.user,
database=request.database,
ping=True,
):
if error[0] == logging.DEBUG:
# Yield some result so we can detect disconnect clients and interrupt the upload
yield " "
continue
if firsterror and error[0] in (
logging.ERROR,
logging.WARNING,
):
yield '<tr><th class="sr-only">%s</th><th>%s</th><th>%s</th><th>%s</th><th>%s%s%s</th></tr>' % (
capfirst(_("worksheet")),
capfirst(_("row")),
capfirst(_("field")),
capfirst(_("value")),
capfirst(_("error")),
" / ",
capfirst(_("warning")),
)
firsterror = False
if error[0] == logging.ERROR:
yield '<tr><td class="sr-only">%s</td><td>%s</td><td>%s</td><td>%s</td><td>%s: %s</td></tr>' % (
cls.model._meta.verbose_name,
error[1] if error[1] else "",
"%s%s" % (rowprefix, error[2]) if error[2] else "",
error[3] if error[3] else "",
capfirst(_("error")),
error[4],
)
numerrors += 1
elif error[1] == logging.WARNING:
yield '<tr><td class="sr-only">%s</td><td>%s</td><td>%s</td><td>%s</td><td>%s: %s</td></tr>' % (
cls.model._meta.verbose_name,
error[1] if error[1] else "",
"%s%s" % (rowprefix, error[2]) if error[2] else "",
error[3] if error[3] else "",
capfirst(_("warning")),
error[4],
)
numwarnings += 1
else:
yield '<tr class=%s><td class="sr-only">%s</td><td>%s</td><td>%s</td><td>%s</td><td>%s</td></tr>' % (
"danger" if numerrors > 0 else "success",
cls.model._meta.verbose_name,
error[1] if error[1] else "",
"%s%s" % (rowprefix, error[2]) if error[2] else "",
error[3] if error[3] else "",
error[4],
)
yield "</tbody></table></div>"
except GeneratorExit:
logger.warning("Connection Aborted")
except NameError:
pass
@classmethod
def _getRowByName(cls, request, name):
if not hasattr(cls, "_rowsByName"):
cls._rowsByName = {}
for i in request.rows:
cls._rowsByName[i.name] = i
if i.field_name != i.name:
cls._rowsByName[i.field_name] = i
return cls._rowsByName[name]
@staticmethod
def _filter_ne(query, reportrow, data):
if isinstance(
reportrow, (GridFieldCurrency, GridFieldInteger, GridFieldNumber)
):
return ~models.Q(
**{"%s__exact" % reportrow.field_name: smart_str(data).strip()}
)
else:
return ~models.Q(
**{"%s__iexact" % reportrow.field_name: smart_str(data).strip()}
)
@staticmethod
def _filter_bn(query, reportrow, data):
return ~models.Q(
**{"%s__istartswith" % reportrow.field_name: smart_str(data).strip()}
)
@staticmethod
def _filter_en(query, reportrow, data):
return ~models.Q(
**{"%s__iendswith" % reportrow.field_name: smart_str(data).strip()}
)
@staticmethod
def _filter_nc(query, reportrow, data):
if isinstance(
reportrow, (GridFieldCurrency, GridFieldInteger, GridFieldNumber)
):
return ~models.Q(
**{"%s__contains" % reportrow.field_name: smart_str(data).strip()}
)
else:
return ~models.Q(
**{"%s__icontains" % reportrow.field_name: smart_str(data).strip()}
)
@staticmethod
def _filter_ni(query, reportrow, data):
return ~models.Q(
**{"%s__in" % reportrow.field_name: smart_str(data).strip().split(",")}
)
@staticmethod
def _filter_in(query, reportrow, data):
return models.Q(
**{"%s__in" % reportrow.field_name: smart_str(data).strip().split(",")}
)
@staticmethod
def _filter_eq(query, reportrow, data):
if isinstance(
reportrow, (GridFieldCurrency, GridFieldInteger, GridFieldNumber)
):
return models.Q(
**{"%s__exact" % reportrow.field_name: smart_str(data).strip()}
)
else:
return models.Q(
**{"%s__iexact" % reportrow.field_name: smart_str(data).strip()}
)
@staticmethod
def _filter_bw(query, reportrow, data):
return models.Q(
**{"%s__istartswith" % reportrow.field_name: smart_str(data).strip()}
)
@staticmethod
def _filter_gt(query, reportrow, data):
return models.Q(**{"%s__gt" % reportrow.field_name: smart_str(data).strip()})
@staticmethod
def _filter_gte(query, reportrow, data):
return models.Q(**{"%s__gte" % reportrow.field_name: smart_str(data).strip()})
@staticmethod
def _filter_lt(query, reportrow, data):
return models.Q(**{"%s__lt" % reportrow.field_name: smart_str(data).strip()})
@staticmethod
def _filter_lte(query, reportrow, data):
return models.Q(**{"%s__lte" % reportrow.field_name: smart_str(data).strip()})
@staticmethod
def _filter_ew(query, reportrow, data):
return models.Q(
**{"%s__iendswith" % reportrow.field_name: smart_str(data).strip()}
)
@staticmethod
def _filter_cn(query, reportrow, data):
return models.Q(
**{"%s__icontains" % reportrow.field_name: smart_str(data).strip()}
)
@staticmethod
def _filter_win(query, reportrow, data):
limit = date.today() + timedelta(int(float(smart_str(data))))
return models.Q(**{"%s__lte" % reportrow.field_name: limit})
_filter_map_jqgrid_django = {
# jqgrid op: (django_lookup, use_exclude, use_extra_where)
"ne": _filter_ne.__func__,
"bn": _filter_bn.__func__,
"en": _filter_en.__func__,
"nc": _filter_nc.__func__,
"ni": _filter_ni.__func__,
"in": _filter_in.__func__,
"eq": _filter_eq.__func__,
"bw": _filter_bw.__func__,
"gt": _filter_gt.__func__,
"ge": _filter_gte.__func__,
"lt": _filter_lt.__func__,
"le": _filter_lte.__func__,
"ew": _filter_ew.__func__,
"cn": _filter_cn.__func__,
"win": _filter_win.__func__,
}
_filter_map_django_jqgrid = {
# django lookup: jqgrid op
"in": "in",
"exact": "eq",
"startswith": "bw",
"iexact": "eq",
"istartswith": "bw",
"gt": "gt",
"gte": "ge",
"lt": "lt",
"lte": "le",
"endswith": "ew",
"contains": "cn",
"iendswith": "ew",
"icontains": "cn"
# 'win' exist in jqgrid, but not in django
}
@classmethod
def getQueryString(cls, request):
# Django-style filtering (which uses URL parameters) are converted to a jqgrid filter expression
filtered = False
filters = ['{"groupOp":"AND","rules":[']
first = True
for i, j in request.GET.items():
for r in request.rows:
if r.field_name and (
(i == r.field_name) or i.startswith(r.field_name + "__")
):
operator = (i == r.field_name) and "exact" or i[i.rfind("__") + 2 :]
try:
if first:
first = False
else:
filters.append(",")
filters.append(
'{"field":"%s","op":"%s","data":"%s"}'
% (
r.field_name,
cls._filter_map_django_jqgrid[operator],
unquote(j).replace('"', '\\"'),
)
)
filtered = True
except Exception:
pass # Ignore invalid operators
if not filtered:
return None
filters.append("]}")
return "".join(filters)
@classmethod
def _get_q_filter(cls, request, filterdata):
q_filters = []
for rule in filterdata["rules"]:
try:
op, field, data = rule["op"], rule["field"], rule["data"]
reportrow = cls._getRowByName(request, field)
if data == "":
# No filter value specified, which makes the filter invalid
continue
else:
q_filters.append(
cls._filter_map_jqgrid_django[op](
q_filters,
reportrow,
reportrow.validateValues(data)
if isinstance(reportrow, GridFieldChoice)
else data,
)
)
except Exception:
pass # Silently ignore invalid filters
if "groups" in filterdata:
for group in filterdata["groups"]:
try:
z = cls._get_q_filter(request, group)
if z:
q_filters.append(z)
except Exception:
pass # Silently ignore invalid groups
if len(q_filters) == 0:
return None
elif filterdata["groupOp"].upper() == "OR":
return functools.reduce(operator.ior, q_filters)
else:
return functools.reduce(operator.iand, q_filters)
@classmethod
def filter_items(cls, request, items, plus_django_style=True):
# Jqgrid-style advanced filtering
filters = None
_filters = request.GET.get("filters")
if _filters:
# Validate complex search JSON data
try:
filters = json.loads(_filters)
except ValueError:
filters = None
# Single field searching, which is currently not used
if request.GET.get("_search") == "true" and not filters:
field = request.GET.get("searchField")
op = request.GET.get("searchOper")
data = request.GET.get("searchString")
if all([field, op, data]):
filters = {
"groupOp": "AND",
"rules": [{"op": op, "field": field, "data": data}],
}
if filters:
z = cls._get_q_filter(request, filters)
if z:
return items.filter(z)
else:
return items
# Django-style filtering, using URL parameters
if plus_django_style:
for i, j in request.GET.items():
for r in request.rows:
if r.name and (
i == r.field_name or i.startswith(r.field_name + "__")
):
try:
items = items.filter(
**{
i: r.validateValues(unquote(j))
if isinstance(r, GridFieldChoice)
else unquote(j)
}
)
except Exception:
pass # silently ignore invalid filters
return items
class GridPivot(GridReport):
# Cross definitions.
# Possible attributes for a cross field are:
# - title:
# Name of the cross that is displayed to the user.
# It defaults to the name of the field.
# - editable:
# True when the field is editable in the page.
# The default value is false.
crosses = ()
template = "admin/base_site_gridpivot.html"
hasTimeBuckets = True
editable = False
multiselect = False
@classmethod
def _render_cross(cls, request):
result = []
for i in request.crosses:
m = {"key": i[0]}
for key, value in i[1].items():
if callable(value):
if key == "title":
m["name"] = capfirst(force_str(value(request)))
else:
m[key] = force_str(value(request), strings_only=True)
else:
if key == "title":
m["name"] = capfirst(force_str(value))
else:
m[key] = force_str(value, strings_only=True)
if "editable" not in m:
m["editable"] = False
result.append(json.dumps(m))
return ",\n".join(result)
@classmethod
def _render_colmodel(
cls, request, is_popup=False, prefs=None, mode="graph", *args, **kwargs
):
if prefs and "rows" in prefs:
# Validate the preferences to 1) map from name to index, 2) assure all rows
# are included, 3) ignore non-existing fields
prefrows = prefs["rows"]
defaultrows = {request.rows[i].name: i for i in range(len(request.rows))}
rows = []
for r in prefrows:
try:
idx = int(r[0])
defaultrows.pop(request.rows[idx].name, None)
rows.append(r)
except (ValueError, IndexError):
if r[0] in defaultrows:
rows.append((defaultrows[r[0]], r[1], r[2]))
defaultrows.pop(r[0], None)
for r, idx in defaultrows.items():
rows.append(
(
idx,
request.rows[idx].hidden or request.rows[idx].initially_hidden,
request.rows[idx].width,
)
)
else:
# Default configuration
rows = [
(
i,
request.rows[i].initially_hidden or request.rows[i].hidden,
request.rows[i].width,
)
for i in range(len(request.rows))
]
result = []
if is_popup:
result.append(
'{"name":"select","label":gettext("Select"),"width":75,"align":"center","sortable":false,"search":false,"fixed":true}'
)
count = -1
for (index, hidden, width) in rows:
try:
result.append(
'{%s,"width":%s,"counter":%d,"frozen":true%s,"hidden":%s,"fixed":true}'
% (
request.rows[index],
width,
index,
is_popup and ',"popup":true' or "",
hidden and "true" or "false",
)
)
count += 1
except IndexError:
pass
return ",\n".join(result)
@classmethod
def count_query(cls, request, *args, **kwargs):
if not hasattr(request, "basequery"):
if callable(cls.basequeryset):
request.basequery = cls.basequeryset(request, *args, **kwargs)
else:
request.basequery = cls.basequeryset
if args and args[0] and not cls.new_arg_logic:
request.basequery = request.basequery.filter(pk__exact=args[0])
return (
cls.filter_items(request, request.basequery, False)
.using(request.database)
.count()
)
@classmethod
def data_query(cls, request, *args, page=None, fields=None, **kwargs):
if not fields:
raise Exception("No fields for pivot report")
if not hasattr(request, "basequery"):
if callable(cls.basequeryset):
request.basequery = cls.basequeryset(request, *args, **kwargs)
else:
request.basequery = cls.basequeryset
if args and args[0] and not cls.new_arg_logic:
request.basequery = request.basequery.filter(pk__exact=args[0])
if page:
cnt = (page - 1) * request.pagesize + 1
return cls.query(
request,
cls._apply_sort(
request, cls.filter_items(request, request.basequery, False)
).using(request.database)[cnt - 1 : cnt + request.pagesize],
sortsql=cls._apply_sort_index(request),
)
else:
return cls.query(
request,
cls._apply_sort(
request, cls.filter_items(request, request.basequery, False)
).using(request.database),
sortsql=cls._apply_sort_index(request),
)
@classmethod
def _generate_json_data(cls, request, *args, **kwargs):
# Prepare the query
request.prefs = request.user.getPreference(
cls.getKey(request, *args, **kwargs), database=request.database
)
recs = cls.count_query(request, *args, **kwargs)
page = "page" in request.GET and int(request.GET["page"]) or 1
total_pages = math.ceil(float(recs) / request.pagesize)
if page > total_pages:
page = total_pages
if page < 1:
page = 1
# Generate header of the output
yield '{"total":%d,\n' % total_pages
yield '"page":%d,\n' % page
yield '"records":%d,\n' % recs
yield '"rows":[\n'
# Generate output
currentkey = None
r = []
fields = [i.field_name for i in request.rows if i.field_name]
for i in cls.data_query(request, *args, page=page, fields=fields, **kwargs):
# We use the first field in the output to recognize new rows.
if currentkey != i[request.rows[0].name]:
# New line
if currentkey:
yield "".join(r)
r = ["},\n{"]
else:
r = ["{"]
currentkey = i[request.rows[0].name]
first2 = True
for f in request.rows:
try:
s = cls._getJSONValue(i[f.name], field=f, request=request)
if first2:
r.append('"%s":%s' % (f.name, s))
first2 = False
elif i[f.name] is not None:
r.append(', "%s":%s' % (f.name, s))
except Exception:
pass
r.append(', "%s":[' % i["bucket"])
first2 = True
for f in request.crosses:
if i[f[0]] is None:
if first2:
r.append("null")
first2 = False
else:
r.append(",null")
else:
if first2:
r.append("%s" % i[f[0]])
first2 = False
else:
r.append(",%s" % i[f[0]])
r.append("]")
r.append("}")
r.append("\n]}\n")
yield "".join(r)
@classmethod
def _generate_csv_data(cls, request, scenario_list, *args, **kwargs):
sf = StringIO()
decimal_separator = get_format("DECIMAL_SEPARATOR", request.LANGUAGE_CODE, True)
if decimal_separator == ",":
writer = csv.writer(sf, quoting=csv.QUOTE_NONNUMERIC, delimiter=";")
else:
writer = csv.writer(sf, quoting=csv.QUOTE_NONNUMERIC, delimiter=",")
if translation.get_language() != request.LANGUAGE_CODE:
translation.activate(request.LANGUAGE_CODE)
listformat = request.GET.get("format", "csvlist") == "csvlist"
# Write a Unicode Byte Order Mark header, aka BOM (Excel needs it to open UTF-8 file properly)
yield cls.getBOM(settings.CSV_CHARSET)
# Pick up the preferences
if not hasattr(request, "prefs"):
request.prefs = request.user.getPreference(
cls.getKey(request, *args, **kwargs), database=request.database
)
if request.prefs and "rows" in request.prefs:
myrows = [
request.rows[f[0]]
for f in cls._validate_rows(request, request.prefs["rows"])
if not f[1]
]
else:
myrows = [
f
for f in request.rows
if f.name and not f.hidden and not f.initially_hidden
]
if request.prefs and "crosses" in request.prefs:
mycrosses = [
request.crosses[f]
for f in cls._validate_crosses(request, request.prefs["crosses"])
]
else:
mycrosses = [f for f in request.crosses if f[1].get("visible", True)]
# Write a header row
fields = [
force_str(f.title, encoding=settings.CSV_CHARSET, errors="ignore").title()
for f in myrows
if f.name
]
if listformat:
fields.extend(
[
capfirst(
force_str(
_("bucket"), encoding=settings.CSV_CHARSET, errors="ignore"
)
)
]
)
fields.extend(
[
capfirst(
force_str(
_(
(
f[1]["title"](request)
if callable(f[1]["title"])
else f[1]["title"]
)
if "title" in f[1]
else f[0]
),
encoding=settings.CSV_CHARSET,
errors="ignore",
)
)
for f in mycrosses
]
)
else:
fields.extend(
[
capfirst(
force_str(
_("data field"),
encoding=settings.CSV_CHARSET,
errors="ignore",
)
)
]
)
fields.extend(
[
force_str(b["name"], encoding=settings.CSV_CHARSET, errors="ignore")
for b in request.report_bucketlist
]
)
if len(scenario_list) > 1:
fields.insert(0, _("scenario"))
writer.writerow(fields)
yield sf.getvalue()
# Write the report content
orginal_database = request.database
try:
for scenario in scenario_list:
request.database = scenario
query = cls.data_query(request, *args, fields=fields, **kwargs)
if listformat:
for row in query:
# Clear the return string buffer
sf.seek(0)
sf.truncate(0)
# Data for rows
if hasattr(row, "__getitem__"):
fields = [
cls._getCSVValue(
row[f.name],
field=f,
request=request,
decimal_separator=decimal_separator,
)
for f in myrows
if f.name
]
fields.extend(
[
force_str(
row["bucket"],
encoding=settings.CSV_CHARSET,
errors="ignore",
)
]
)
fields.extend(
[
force_str(
cls._localize(row[f[0]], decimal_separator),
encoding=settings.CSV_CHARSET,
errors="ignore",
)
if row[f[0]] is not None
else ""
for f in mycrosses
]
)
else:
fields = [
cls._getCSVValue(
getattr(row, f.name),
field=f,
request=request,
decimal_separator=decimal_separator,
)
for f in myrows
if f.name
]
fields.extend(
[
force_str(
getattr(row, "bucket"),
encoding=settings.CSV_CHARSET,
errors="ignore",
)
]
)
fields.extend(
[
force_str(
cls._localize(
getattr(row, f[0]), decimal_separator
),
encoding=settings.CSV_CHARSET,
errors="ignore",
)
if getattr(row, f[0]) is not None
else ""
for f in mycrosses
]
)
# Return string
if len(scenario_list) > 1:
fields.insert(0, scenario)
writer.writerow(fields)
yield sf.getvalue()
else:
currentkey = None
row_of_buckets = None
for row in query:
# We use the first field in the output to recognize new rows.
if not currentkey:
currentkey = row[request.rows[0].name]
row_of_buckets = [row]
elif currentkey == row[request.rows[0].name]:
row_of_buckets.append(row)
else:
# Write an entity
for cross in mycrosses:
# Clear the return string buffer
sf.seek(0)
sf.truncate(0)
fields = [
cls._getCSVValue(
row_of_buckets[0][s.name],
field=s,
request=request,
decimal_separator=decimal_separator,
)
for s in myrows
if s.name
]
fields.extend(
[
force_str(
capfirst(
_(
(
cross[1]["title"](request)
if callable(cross[1]["title"])
else cross[1]["title"]
)
if "title" in cross[1]
else cross[0]
)
),
encoding=settings.CSV_CHARSET,
errors="ignore",
)
]
)
fields.extend(
[
force_str(
cls._localize(
bucket[cross[0]], decimal_separator
),
encoding=settings.CSV_CHARSET,
errors="ignore",
)
if bucket[cross[0]] is not None
else ""
for bucket in row_of_buckets
]
)
# Return string
if len(scenario_list) > 1:
fields.insert(0, scenario)
writer.writerow(fields)
yield sf.getvalue()
currentkey = row[request.rows[0].name]
row_of_buckets = [row]
# Write the last entity
if row_of_buckets:
for cross in mycrosses:
# Clear the return string buffer
sf.seek(0)
sf.truncate(0)
fields = [
cls._getCSVValue(
row_of_buckets[0][s.name],
field=s,
request=request,
decimal_separator=decimal_separator,
)
for s in myrows
if s.name
]
fields.extend(
[
force_str(
capfirst(
_(
(
cross[1]["title"](request)
if callable(cross[1]["title"])
else cross[1]["title"]
)
if "title" in cross[1]
else cross[0]
)
),
encoding=settings.CSV_CHARSET,
errors="ignore",
)
]
)
fields.extend(
[
force_str(
cls._localize(
bucket[cross[0]], decimal_separator
),
encoding=settings.CSV_CHARSET,
errors="ignore",
)
for bucket in row_of_buckets
]
)
# Return string
if len(scenario_list) > 1:
fields.insert(0, scenario)
writer.writerow(fields)
yield sf.getvalue()
finally:
request.database = orginal_database
@classmethod
def _generate_spreadsheet_data(
cls, request, scenario_list, output, *args, **kwargs
):
# Create a workbook
wb = Workbook(write_only=True)
if callable(cls.title):
title = cls.title(request, *args, **kwargs)
else:
title = cls.model._meta.verbose_name_plural if cls.model else cls.title
# Excel can't have longer worksheet names
ws = wb.create_sheet(title=force_str(title)[:31])
# Create a named style for the header row
headerstyle = NamedStyle(name="headerstyle")
headerstyle.fill = PatternFill(fill_type="solid", fgColor="70c4f4")
wb.add_named_style(headerstyle)
readlonlyheaderstyle = NamedStyle(name="readlonlyheaderstyle")
readlonlyheaderstyle.fill = PatternFill(fill_type="solid", fgColor="d0ebfb")
wb.add_named_style(readlonlyheaderstyle)
# Pick up the preferences
listformat = request.GET.get("format", "spreadsheetlist") == "spreadsheetlist"
if not hasattr(request, "prefs"):
request.prefs = request.user.getPreference(
cls.getKey(request, *args, **kwargs), database=request.database
)
if request.prefs and "rows" in request.prefs:
myrows = [
request.rows[f[0]]
for f in cls._validate_rows(request, request.prefs["rows"])
if not f[1]
]
else:
myrows = [
f
for f in request.rows
if f.name and not f.initially_hidden and not f.hidden
]
if request.prefs and "crosses" in request.prefs:
mycrosses = [
request.crosses[f]
for f in cls._validate_crosses(request, request.prefs["crosses"])
]
else:
mycrosses = [f for f in request.crosses if f[1].get("visible", True)]
# Write a header row
fields = []
comment = None
for f in myrows:
if f.name:
cell = WriteOnlyCell(ws, value=force_str(f.title).title())
if f.editable or f.key:
cell.style = "headerstyle"
fname = getattr(f, "field_name", f.name)
if (
not f.key
and f.formatter == "detail"
and fname.endswith("__name")
):
cell.comment = CellComment(
force_str(
_("Values in this field must exist in the %s table")
% force_str(_(fname[:-6]))
),
"Author",
)
elif isinstance(f, GridFieldChoice):
cell.comment = CellComment(
force_str(
_("Accepted values are: %s")
% ", ".join([c[0] for c in f.choices])
),
"Author",
)
else:
cell.style = "readlonlyheaderstyle"
if not comment:
comment = CellComment(
force_str(_("Read only")), "Author", height=20, width=80
)
cell.comment = comment
fields.append(cell)
if listformat:
cell = WriteOnlyCell(ws, value=capfirst(force_str(_("bucket"))))
if f.editable or f.key:
cell.style = "headerstyle"
fname = getattr(f, "field_name", f.name)
if not f.key and f.formatter == "detail" and fname.endswith("__name"):
cell.comment = CellComment(
force_str(
_("Values in this field must exist in the %s table")
% force_str(_(fname[:-6]))
),
"Author",
)
elif isinstance(f, GridFieldChoice):
cell.comment = CellComment(
force_str(
_("Accepted values are: %s")
% ", ".join([c[0] for c in f.choices])
),
"Author",
)
else:
cell.style = "readlonlyheaderstyle"
if not comment:
comment = CellComment(
force_str(_("Read only")), "Author", height=20, width=80
)
cell.comment = comment
fields.append(cell)
for f in mycrosses:
cell = WriteOnlyCell(
ws,
value=capfirst(
force_str(
_(
(
f[1]["title"](request)
if callable(f[1]["title"])
else f[1]["title"]
)
if "title" in f[1]
else f[0]
)
)
),
)
if f[1].get("editable", False):
cell.style = "headerstyle"
else:
cell.style = "readlonlyheaderstyle"
if not comment:
comment = CellComment(
force_str(_("Read only")), "Author", height=20, width=80
)
cell.comment = comment
fields.append(cell)
else:
cell = WriteOnlyCell(ws, value=capfirst(_("data field")))
cell.style = "readlonlyheaderstyle"
fields.append(cell)
for b in request.report_bucketlist:
cell = WriteOnlyCell(ws, value=str(b["name"]))
cell.style = "readlonlyheaderstyle"
fields.append(cell)
if len(scenario_list) > 1:
cell = WriteOnlyCell(ws, value=capfirst(_("scenario")))
cell.style = "readlonlyheaderstyle"
fields.insert(0, cell)
ws.append(fields)
# Add an auto-filter to the table
ws.auto_filter.ref = "A1:%s1048576" % get_column_letter(len(fields))
# Write the report content
original_database = request.database
try:
for scenario in scenario_list:
request.database = scenario
query = cls.data_query(request, *args, fields=fields, **kwargs)
if listformat:
for row in query:
# Append a row
if hasattr(row, "__getitem__"):
fields = [
_getCellValue(row[f.name], field=f, request=request)
for f in myrows
if f.name
]
fields.extend([_getCellValue(row["bucket"])])
fields.extend([_getCellValue(row[f[0]]) for f in mycrosses])
else:
fields = [
_getCellValue(
getattr(row, f.name), field=f, request=request
)
for f in myrows
if f.name
]
fields.extend([_getCellValue(getattr(row, "bucket"))])
fields.extend(
[_getCellValue(getattr(row, f[0])) for f in mycrosses]
)
if len(scenario_list) > 1:
fields.insert(0, scenario)
ws.append(fields)
else:
currentkey = None
row_of_buckets = None
for row in query:
# We use the first field in the output to recognize new rows.
if not currentkey:
currentkey = row[request.rows[0].name]
row_of_buckets = [row]
elif currentkey == row[request.rows[0].name]:
row_of_buckets.append(row)
else:
# Write a row
for cross in mycrosses:
if not cross[1].get("visible", True):
continue
fields = [
_getCellValue(
row_of_buckets[0][s.name],
field=s,
request=request,
)
for s in myrows
if s.name
]
fields.extend(
[
_getCellValue(
(
capfirst(
cross[1]["title"](request)
if callable(cross[1]["title"])
else cross[1]["title"]
)
)
if "title" in cross[1]
else capfirst(cross[0])
)
]
)
fields.extend(
[
_getCellValue(bucket[cross[0]])
for bucket in row_of_buckets
]
)
if len(scenario_list) > 1:
fields.insert(0, scenario)
ws.append(fields)
currentkey = row[request.rows[0].name]
row_of_buckets = [row]
# Write the last row
if row_of_buckets:
for cross in mycrosses:
if cross[1].get("visible", False):
continue
fields = [
_getCellValue(
row_of_buckets[0][s.name], field=s, request=request
)
for s in myrows
if s.name
]
fields.extend(
[
_getCellValue(
(
capfirst(
cross[1]["title"](request)
if callable(cross[1]["title"])
else cross[1]["title"]
)
)
if "title" in cross[1]
else capfirst(cross[0])
)
]
)
fields.extend(
[
_getCellValue(bucket[cross[0]])
for bucket in row_of_buckets
]
)
if len(scenario_list) > 1:
fields.insert(0, scenario)
ws.append(fields)
finally:
request.database = original_database
# Write the spreadsheet
wb.save(output)
numericTypes = (Decimal, float, int)
def _buildMaskedNames(model, exportConfig):
"""
Build a map with anonymous names for a model, and store it in the exportConfiguration.
"""
modelname = model._meta.model_name
if modelname in exportConfig:
return
exportConfig[modelname] = {}
if issubclass(model, HierarchyModel):
keys = (
model.objects.only("pk").order_by("lvl", "pk").values_list("pk", flat=True)
)
else:
keys = model.objects.only("pk").order_by("pk").values_list("pk", flat=True)
idx = 1
for key in keys:
exportConfig[modelname][key] = "%s %07d" % (modelname, idx)
idx += 1
def _parseSeconds(data):
"""
Formats a number of seconds into format HH:MM:SS.XXXX
"""
total_seconds = data.total_seconds()
hours = math.floor(total_seconds / 3600)
minutes = math.floor((total_seconds - hours * 3600) / 60)
seconds = math.floor(total_seconds - hours * 3600 - minutes * 60)
remainder = total_seconds - 3600 * hours - 60 * minutes - seconds
return "%02d:%02d:%02d%s" % (
hours,
minutes,
seconds,
(".%s" % str(round(remainder, 8))[2:]) if remainder > 0 else "",
)
def _getCellValue(data, field=None, exportConfig=None, request=None):
if data is None:
return ""
elif isinstance(data, datetime):
if (
field
and request
and isinstance(field, (GridFieldLastModified, GridFieldLocalDateTime))
):
if not hasattr(request, "tzoffset"):
request.tzoffset = GridReport.getTimezoneOffset(request)
return data + request.tzoffset
else:
return data
elif isinstance(data, numericTypes) or isinstance(data, date):
return data
elif isinstance(data, timedelta):
return _parseSeconds(data)
elif isinstance(data, time):
return data.isoformat()
elif not exportConfig or not exportConfig.get("anonymous", False):
return str(data)
else:
if field.primary_key and not isinstance(field, AutoField):
model = field.model
elif isinstance(field, RelatedField):
model = field.related_model
else:
return str(data)
if model._meta.app_label == "common":
return str(data)
modelname = model._meta.model_name
if modelname not in exportConfig:
_buildMaskedNames(model, exportConfig)
# Return the mapped value
return exportConfig[modelname].get(data, "unknown")
|
agpl-3.0
| -1,733,423,649,352,162,600
| 39.234855
| 148
| 0.454501
| false
| 4.834588
| false
| false
| false
|
ph1l/halo_radio
|
HaloRadio/UserSongStatsListMaker.py
|
1
|
3143
|
#
#
# Copyright (C) 2004 Philip J Freeman
#
# This file is part of halo_radio
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
import HaloRadio.TopListMaker as TopListMaker
import HaloRadio.Exception as HaloException
import re
class UserSongStatsListMaker(TopListMaker.TopListMaker):
"""
- UserSongStatsListMaker -
"""
def __init__( self ):
self.list = [ ]
self.tablename = 'user_song_stats'
return
def GetRandomSong ( self, userid ):
self.list = [ ]
query = """SELECT songid FROM %s WHERE userid='%d' and requests > kills ORDER BY rand() DESC LIMIT 1""" % ( self.tablename, userid )
result = self.do_my_query( query )
for row in result:
(id, ) = row
self.list.append(id)
return
def GetBySong ( self, songid ):
self.list = [ ]
query = """SELECT id FROM %s WHERE songid="%s";"""%(self.tablename, songid )
result = self.do_my_query( query )
for row in result:
(id, ) = row
self.list.append(id)
return
def GetTopRank ( self, userid, num ):
self.list = [ ]
query = """SELECT songid, requests, kills FROM %s WHERE userid='%d' AND requests> kills > 0 ORDER BY requests DESC LIMIT %d""" % ( self.tablename, userid, num )
result = self.do_my_query( query )
for row in result:
(id, requests, kills) = row
self.list.append((id, requests, kills))
return
def GetBottomRank ( self, userid, num ):
self.list = [ ]
query = """SELECT songid, kills, requests FROM %s WHERE userid=%d ORDER BY kills - requests DESC LIMIT %d""" % ( self.tablename, userid, num )
result = self.do_my_query( query )
for row in result:
(id, kills, requests) = row
self.list.append((id, kills, requests))
return
def GetRandomSongForUsers ( self, useridlist ):
import HaloRadio.Song as Song
wheres = []
for userid in useridlist:
wheres.append(" userid = \"%s\" "%userid)
query = """SELECT SUM(requests) as requests, SUM(kills) as kills, (rand()*100) as rank, songid FROM %s WHERE %s GROUP BY songid HAVING requests > kills ORDER BY rank DESC LIMIT 1;""" % (self.tablename, " OR ".join(wheres) )
try:
((requests, kills, rank, songid),) = self.do_my_query( query )
except ValueError:
raise HaloException.SongNotExistant
try:
song = Song.Song(songid)
except HaloException.SongNotFound, snf:
song = self.GetRandomSongForUsers(useridlist)
return song
|
gpl-2.0
| 6,443,673,913,120,863,000
| 35.126437
| 239
| 0.646834
| false
| 3.408894
| false
| false
| false
|
zstackio/zstack-woodpecker
|
integrationtest/vm/multihosts/invalid_path/snapshot/paths/invalid_path4.py
|
1
|
1360
|
import zstackwoodpecker.test_state as ts_header
import os
TestAction = ts_header.TestAction
def path():
return dict(initial_formation="template5", checking_point=8, faild_point=13, path_list=[
[TestAction.create_vm, 'vm1', ],
[TestAction.create_volume, 'volume1', 'flag=scsi'],
[TestAction.attach_volume, 'vm1', 'volume1'],
[TestAction.create_volume, 'volume2', 'flag=scsi'],
[TestAction.attach_volume, 'vm1', 'volume2'],
[TestAction.create_volume, 'volume3', 'flag=scsi'],
[TestAction.attach_volume, 'vm1', 'volume3'],
[TestAction.create_vm_snapshot, 'vm1', 'vm1-snapshot1'],
[TestAction.create_vm_backup, 'vm1', 'vm1-backup1'],
[TestAction.create_vm, 'vm2', ],
[TestAction.detach_volume, 'volume1'],
[TestAction.stop_vm, 'vm1'],
[TestAction.use_vm_snapshot, 'vm1-snapshot1'],
])
'''
The final status:
Running:['vm2']
Stopped:['vm1']
Enadbled:['vm1-snapshot1', 'volume1-snapshot1', 'volume2-snapshot1', 'volume3-snapshot1', 'vm1-backup1', 'volume1-backup1', 'volume2-backup1', 'volume3-backup1']
attached:['volume2', 'volume3']
Detached:['volume1']
Deleted:[]
Expunged:[]
Ha:[]
Group:
vm_snap1:['vm1-snapshot1', 'volume1-snapshot1', 'volume2-snapshot1', 'volume3-snapshot1']---vm1@volume1_volume2_volume3
vm_backup1:['vm1-backup1', 'volume1-backup1', 'volume2-backup1', 'volume3-backup1']---vm1@volume1_volume2_volume3
'''
|
apache-2.0
| 2,703,161,563,352,621,600
| 34.815789
| 161
| 0.7
| false
| 2.661448
| true
| false
| false
|
liqd/adhocracy3.mercator
|
src/adhocracy_meinberlin/adhocracy_meinberlin/sheets/test_kiezkassen.py
|
2
|
1796
|
import colander
from pyramid import testing
from pytest import mark
from pytest import fixture
from pytest import raises
@mark.usefixtures('integration')
def test_includeme_register_proposal_sheet(registry):
from .kiezkassen import IProposal
context = testing.DummyResource(__provides__=IProposal)
assert registry.content.get_sheet(context, IProposal)
class TestProposalSheet:
@fixture
def meta(self):
from .kiezkassen import proposal_meta
return proposal_meta
@fixture
def context(self):
from adhocracy_core.interfaces import IItem
return testing.DummyResource(__provides__=IItem)
def test_create_valid(self, meta, context):
from zope.interface.verify import verifyObject
from adhocracy_core.interfaces import IResourceSheet
from .kiezkassen import IProposal
from .kiezkassen import ProposalSchema
inst = meta.sheet_class(meta, context, None)
assert IResourceSheet.providedBy(inst)
assert verifyObject(IResourceSheet, inst)
assert inst.meta.isheet == IProposal
assert inst.meta.schema_class == ProposalSchema
def test_get_empty(self, meta, context):
from decimal import Decimal
inst = meta.sheet_class(meta, context, None)
wanted = {'budget': Decimal(0),
'creator_participate': False,
'location_text': '',
}
assert inst.get() == wanted
class TestProposalSchema:
@fixture
def inst(self):
from .kiezkassen import ProposalSchema
return ProposalSchema()
def test_create(self, inst):
assert inst['budget'].validator.max == 50000
assert inst['budget'].required
assert inst['location_text'].validator.max == 100
|
agpl-3.0
| 4,342,760,450,687,123,000
| 29.965517
| 60
| 0.671492
| false
| 4.147806
| true
| false
| false
|
philscher/gkc
|
Benchmarks/IntegralCode/SolveOK.py
|
1
|
8023
|
from pylab import *
import scipy
#import math
import mpmath as mp
import traceback
import random
import numpy
import Dispersion_ConstTheta
#import fpectl
#fpectl.turnon_sigfpe()
import scipy.linalg as la
import scipy.sparse.linalg as sla
import SlepcDet
import gkcStyle
import iCode
class Species:
def __init__(self, m=0., q=1., T=1., n=0., eta=0., name="Unamed"):
self.m = m
self.q = q
self.T = T
self.n = n
self.eta = eta
self.name = name
############################## Settings for Integral Mode ######################################
Nx = 65
# Gao case Ln, Ls, Lx, Ly, theta, lambda_D2 = 1., 40., 12., 32., 0.1, 1.
# My setup
species = [ Species(m=0.,q=-1.,T=1.,n=1., eta=0.,name= "Adiab"), Species(1.,1.,1.,1., 5., "Ion") ]#, Species(1./1836.,-1.,1.,1., 4., "Electron")]
#species = [ Species(name= "Adiab"), Species(m=1.,q=1.,T=1.,n=1.,eta=5., name="Ion"), Species(m=0.0025,q=-1.,T=1.,n=1., eta=0., name="Electron") ]
Ln, Ls, Lx, Ly, lambda_D2, ky_list = 1., 1./0.2, 64., 64., 0., [0.5]
## Gao Setup
species = [ Species(name= "Adiab"), Species(m=1836.,q=1.,T=1.,n=1.,eta=0., name="Ion"), Species(m=1.,q=-1.,T=1.,n=1., eta=3., name="Electron") ]
Ln, Ls, Lx, Ly, lambda_D2, ky_list = 1., 0.025, 60., 64., 0., 2.*pi/64. * arange(1, 8)
#Ln, Ls, Lx, Ly, lambda_D2, ky_list = 1., 0.025, 60., 64., 0., [0.3]
######################## Setup Grid ######################
kx_list = 2*pi/Lx * linspace(-Nx/2., Nx/2., Nx)
X = linspace(-Lx/2, Lx/2, Nx)
dx, dk = Lx/Nx, 2.*pi/Lx
dx, dk = dx * dk *dk , 1.
fig = figure(figsize=(30,10))
global w_min, D_min
w_min = 0.+0.j
D_min = 1e99 + 1.j*1.e99
sol = []
def solveDispersion(ky):
A = zeros((Nx,Nx), dtype=complex)
def setupA(w):
A[:,:] = 0.
iCode.setupMatrixPy(species, w, ky, X, kx_list, Ls, Ln, Nx, A, dk*dx, lambda_D2)
return A
def solveEquation(w):
global D_min, w_min
A = setupA(complex(w))
#print A
#val = SlepcDet.getMinAbsEigenvalue(A)
val = SlepcDet.getMinAbsEigenvaluLA(A)
#val = det(A)
#(sign, logdet) = np.linalg.slogdet(A)
#val = sign * logdet
if abs(val) < abs(D_min) : w_min = complex(w)
print ky, " w : %.3f+%.3f j" % (real(complex(w)), imag(complex(w))) , " Determinant : %.2e " % abs(val)
if val != val: return 0. + 0.j
return val
try :
w0= -0.01 + 0.02j
w_damp = complex(mp.findroot(solveEquation, (w0, w0-0.005j, w0+0.005), solver='muller', tol=1.e-8, ftol=1.e-8, maxsteps=5000))
#w_damp = PyPPL.getZero(solveEquation, init=(w0, w0+0.01j, w0+0.02), solver='muller', tol=1.e-9, ftol=1.e-6, maxsteps=5000)
except:
traceback.print_exc(file=sys.stdout)
try:
#for n in range(Nx):
n = 0
global w_min
print "-----------------> ", w_min
# solution found for w0, get solution vector
werr = solveEquation(w_min)
A = setupA(w_min)
#print A
#S = solve(A, append(1.+0.j,zeros(Nx-1, dtype=complex)))
#S = solve(A, append(1.+0.j, append(zeros(Nx-2, dtype=complex), 1.+0.j)))
#b = append(0., append(1.+0., zeros(Nx-2, dtype=complex)))
#b = zeros(Nx, dtype=complex)
#b = ones(Nx, dtype=complex)
#b[:] = 0. ;
#b[0] = 1.
#S, err = solve(A, b), 0.
#S, err = sla.lgmres(A,b, tol=1.e-9)
# We found our eigenvalue w_min, now we use the
# inverse iteration to find the closest eigenvector
I = (1.+0.j) * eye(Nx)
b = (1.+1.j) * ones(Nx, dtype=complex)
for n in range(4096):
b_prev = b
b = solve(A - w_min * I, b)
# RESCALE
b = b / sum(abs(b))
if (abs(sum( sqrt(sum(b**2)/sum(b_prev**2)) * b_prev - b )) < 1.e-10) : break
#print("Eigv Error : %.2e Abs : %.2e " % (abs(sum( sqrt(sum(b**2)/sum(b_prev**2)) * b_prev - b )), sum(abs(b))) )
#print "Sol : " , b
clf()
gkcStyle.newFigure(ratio='2.33:1', basesize=12)
subplot(131)
fig.suptitle("$\omega_0$ = %.4f %.4fi $\pm$ %.2e %.2e i" % (real(w_min), imag(w_min), real(werr), imag(werr)))
###################### Plot Fourier Modes ##########3
b = -real(b) + 1.j * imag(b)
plot(kx_list, real(b), 'r.-', label="real")
plot(kx_list, imag(b), '.-', label="imag", color=gkcStyle.color_indigo)
xlim((min(kx_list), max(kx_list)))
xlabel("$k_x$")
ylabel("$\phi(k_x)$")
legend(ncol=2).draw_frame(0)
################### Plot real modes ########3
subplot(132)
# Remove High frequency modes
#b[:3] = 0.;
#b[-4:] = 0.;
# We have to transform to FFTW format
F = append(append(b[Nx/2], b[Nx/2+1:]), b[:Nx/2])
print "F--------------->", F
plot(X,real(np.fft.ifft(F)), 'r.-', label='real')
plot(X,imag(np.fft.ifft(F)), '.-', label='imag', color=gkcStyle.color_indigo)
xlim((min(X), max(X)))
xlabel("$x$")
ylabel("$\phi(x)$")
legend(ncol=2).draw_frame(0)
################ Plot Contour
subplot(133)
y = linspace(0., Ly, 128)
KxKy = zeros((Nx, 65), dtype=complex)
nky = ky * Ly / (2.*pi)
KxKy[:,nky] = np.fft.ifft(F)
XY = np.fft.irfft(KxKy, axis=1)
xlabel("$x$")
ylabel("$y$")
contourf(X, y, XY.T, 20, vmin=-abs(XY).max(), vmax=abs(XY).max())
colorbar()
savefig("Plot2_" + str(ky) + ".png", bbox_inches='tight')
# append and normalize
sol.append(np.fft.ifft(b/abs(b).max()))
except:
#species = [ Species(name= "Adiab"), Species(m=1.,q=1.,T=1.,n=1.,eta=5., name="Ion"), Species(m=0.0025,q=-1.,T=1.,n=1., eta=0., name="Electron") ]
traceback.print_exc(file=sys.stdout)
return w_min, abs(solveEquation(w_min))
w_list1 = []
def plotMode():
for ky in ky_list:
wn, err = solveDispersion(ky)
w_list1.append (wn)
def plotContours():
ky = 0.5
R = linspace(-1.5, 0.5, 16)
I = linspace(0., 10., 16)
V = zeros((len(R),len(I)), dtype=complex)
for r in range(len(R)):
for i in range(len(I)):
A = zeros((Nx,Nx), dtype=complex)
iCode.setupMatrixPy(species, R[r]+1.j*I[i], ky, X, kx_list, Ls, Ln, Nx, A, dk*dx, lambda_D2)
val = det(A)
#(sign, logdet) = np.linalg.slogdet(A)
#val = sign * logdet
V[r,i] = val
print "x, y", R[r], I[i] , " r : ", val
subplot(131)
contourf(R,I,real(V), 100)
colorbar()
subplot(132)
contourf(R,I,imag(V), 100)
colorbar()
subplot(133)
contourf(R,I,abs(V), 100)
colorbar()
#pcolor(R,I,imag(V))
savefig("Contour.png")
#print "(Integral) Solution is w : ",w0
#print "(local) Solution is w : ",w_Local
#plotContours()
plotMode()
################################## Plot Figures ############################
### Plot
clf()
ky_list = array(ky_list)
plot(ky_list, real(w_list1), 'o-', label='real')
plot(ky_list, imag(w_list1), 'o-', label='imag')
legend(ncol=2, loc='best').draw_frame(0)
xlim((min(ky_list), max(ky_list)))
savefig("Results.png")
"""
# Make 3D Plot kx, ky, z
from mpl_toolkits.mplot3d import Axes3D
import matplotlib
import numpy as np
from matplotlib import cm
from matplotlib import pyplot as plt
clf()
Z = array(sol)
ax = fig.add_subplot(121, projection='3d')
_X,_ky = np.meshgrid(X,ky_list)
ax.plot_surface(_X, _ky, real(Z), rstride=1, cstride=1, cmap=cm.jet)
ax = fig.add_subplot(122, projection='3d')
ax.plot_surface(_X, _ky, imag(Z), rstride=1, cstride=1, cmap=cm.jet)
#ax.set_zlim3d(0, 1)
ax.set_xlabel(r'$\phi_\mathrm{real}$')
ax.set_ylabel(r'$\phi_\mathrm{im}$')
ax.w_yaxis.set_scale("log")
savefig("Results_3D.png")
"""
|
gpl-3.0
| 5,590,745,320,015,303,000
| 27.756272
| 150
| 0.510408
| false
| 2.578914
| false
| false
| false
|
TomAugspurger/DSADD
|
setup.py
|
1
|
1281
|
from setuptools import setup, find_packages
# To use a consistent encoding
from os import path
here = path.abspath(path.dirname(__file__))
setup(
name='dsadd',
version='0.0.2',
description='A python package for defensive data analysis.',
long_description='A python package for defensive data analysis.',
url='https://github.com/tomaugspurger/dsadd',
# Author details
author='Tom Augspurger',
author_email='tom.w.augspurger@gmail.com',
# Choose your license
license='MIT',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
],
keywords='data analysis',
packages=find_packages(exclude=['tests']),
# install_requires=['numpy', 'pandas'],
# List additional groups of dependencies here (e.g. development
# dependencies). You can install these using the following syntax,
# for example:
# $ pip install -e .[dev,test]
extras_require={
'dev': [''],
'test': ['coverage', 'pytest'],
},
)
|
mit
| -496,449,566,151,237,250
| 26.255319
| 70
| 0.622951
| false
| 4.092652
| false
| false
| false
|
schuhumi/timetravel
|
timetravel-gui.py
|
1
|
3226
|
#!/usr/bin/env python3
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
#
from gi.repository import Gtk, Gio
import cairo
import math
class MyWindow(Gtk.Window):
def __init__(self):
Gtk.Window.__init__(self, title="Timetravel")
#self.set_border_width(10)
self.set_default_size(800, 600)
hb = Gtk.HeaderBar()
hb.set_show_close_button(True)
hb.props.title = "Timetravel"
self.set_titlebar(hb)
button = Gtk.Button()
icon = Gio.ThemedIcon(name="emblem-system-symbolic")
image = Gtk.Image.new_from_gicon(icon, Gtk.IconSize.BUTTON)
button.add(image)
hb.pack_end(button)
box = Gtk.Box(spacing=6)
button = Gtk.Button(label="Snapshot")
box.add(button)
button = Gtk.Button()
icon = Gio.ThemedIcon(name="zoom-in-symbolic")
image = Gtk.Image.new_from_gicon(icon, Gtk.IconSize.BUTTON)
button.add(image)
box.add(button)
hb.pack_start(box)
self.darea = Gtk.DrawingArea()
self.darea.connect("draw", self.on_draw)
#self.darea.set_events(Gdk.EventMask.BUTTON_PRESS_MASK)
self.add(self.darea)
def on_draw(self, wid, cr):
cr.translate(700,250)
#cr.scale(800,600)
cr.set_source_rgb(0.6, 0.6, 0.6)
cr.set_line_width(1)
cr.set_dash([10.0, 6.0])
cr.move_to(0, -250)
cr.line_to(0, 210)
cr.stroke()
cr.set_dash([])
nowTxt = "now"
cr.set_source_rgb(0,0,0)
cr.set_font_size(15)
xbearing, ybearing, width, height, xadvance, yadvance = cr.text_extents(nowTxt)
cr.move_to(-width/2, 210+height+10)
cr.show_text(nowTxt)
cr.stroke()
cr.set_source_rgb(0.2, 0.2, 0.7)
cr.set_line_width(3)
cr.move_to(-200, 0)
cr.line_to(0, 0)
cr.stroke()
drawSnapshot(cr, "current", 0, 0, 0.2, 0.7, 0.2)
drawSnapshot(cr, "snap-2015-07-16", -200, 0, 0.2, 0.2, 0.7)
def drawSnapshot (cr, name, x, y, r, g, b):
cr.set_source_rgb(r,g,b)
cr.arc(x, y, 8, 0, 2*math.pi)
cr.fill()
cr.set_source_rgb(0,0,0)
cr.set_font_size(15)
xbearing, ybearing, width, height, xadvance, yadvance = cr.text_extents(name)
cr.move_to(x-width/2, y+height+10)
cr.show_text(name)
cr.stroke()
def drawNowAxis (cr):
pass
win = MyWindow()
win.connect("delete-event", Gtk.main_quit)
win.show_all()
Gtk.main()
|
gpl-3.0
| 7,090,185,540,407,805,000
| 28.87037
| 87
| 0.599814
| false
| 3.054924
| false
| false
| false
|
Adista-ste/metabaseparser
|
script2012r2.py
|
1
|
1237
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from lxml import etree
import argparse, sys, os, re
arguments = argparse.ArgumentParser()
arguments.add_argument("-s","--https",help="Traite également les domaines HTTPS", action='store_true')
arguments.add_argument("-f","--fichier",help="Définit le fichierxml utilise")
args = arguments.parse_args()
if not args.fichier:
print "Erreur : Pas de fichier de MetaBase indiqué"
arguments.print_help()
sys.exit(1)
elif not os.path.exists(args.fichier):
print "Erreur : Le fichier MetaBase indiqué n'existe pas"
arguments.print_help()
sys.exit(2)
tree = etree.parse(args.fichier)
#ns={'xmlns': 'urn:microsoft-catalog:XML_Metabase_V64_0'}
liste=[]
#for i in tree.iter(tag="{%s}IIsWebServer" % ns['xmlns']):
for sites in tree.iter(tag="site"):
for binding in sites.iter('binding'):
bind = binding.attrib.get('bindingInformation')
ndd = re.sub(r'\**:[0-9]+:', r' ',bind)
if ndd:
#print ndd
liste+=ndd.split()
#print bind['bindingInformation']
# if site:
# if args.https:
# inter=re.sub(r':443:', r' ', site)
# inter=re.sub(r':80:', r' ', site)
# liste+=inter.split()
#
liste.sort()
final=list(set(liste))
final.sort()
#
for j in final:
print "%s" % j
|
agpl-3.0
| -6,761,721,896,002,896,000
| 23.176471
| 102
| 0.6691
| false
| 2.595789
| false
| false
| false
|
guyrt/court-reminder
|
server/admin_app.py
|
1
|
1277
|
"""
Flask Admin App
"""
import os
from flask import Flask, flash, render_template, request
from flask_admin import Admin
from flask_basicauth import BasicAuth
from azure.storage.blob import BlockBlobService, ContentSettings
from storage.models import Database
from storage.secrets import blob_key, blob_accountname, blob_container
from server.views import AinView
from server import config
db = Database()
blob_service = BlockBlobService(account_name=blob_accountname, account_key=blob_key)
app = Flask(__name__)
app.config.from_object(config.Config)
basic_auth = BasicAuth(app)
admin = Admin(app, name='ASAP', template_mode='bootstrap3')
admin.add_view(AinView(None, name='Ain', endpoint='ain'))
@app.route('/audio')
def audio():
ain = request.args.get('id')
azure_path = db.get_ain(ain).get('CallUploadUrl')
if azure_path:
file_root = os.path.join(app.root_path, 'static')
if not os.path.exists(file_root):
os.makedirs(file_root)
filename = ain + '.wav'
path = os.path.join(file_root, filename)
blob_service.get_blob_to_path(container_name=blob_container,
blob_name=azure_path, file_path=path)
else:
filename = None
return render_template('audio.html', filename=filename)
|
mit
| -6,595,723,578,014,805,000
| 28.697674
| 84
| 0.703994
| false
| 3.405333
| false
| false
| false
|
jantaylor/road-home-time-tracker
|
timetracker/models.py
|
1
|
3525
|
from django.db import models
from django.contrib import admin
from django.db.models import signals
class Volunteer(models.Model):
first_name = models.CharField(max_length=50)
last_name = models.CharField(max_length=50)
email = models.EmailField(max_length=254, unique=True)
is_group = models.BooleanField(default=False, verbose_name="Group")
organization = models.CharField(null=True, max_length=50, blank=True)
group_count = models.IntegerField(null=True, blank=True)
times_clocked_out = models.IntegerField(default=0, editable=False)
date_added = models.DateTimeField(auto_now_add=True, verbose_name="Date Added")
date_modified = models.DateTimeField(auto_now=True, verbose_name="Date Modified")
active = models.BooleanField(default=True)
def __str__(self):
return "{}".format(self.last_name) + ", {}".format(self.first_name)
def delete(self, *args, **kwargs):
self.active = False
self.save()
class Site(models.Model):
name = models.CharField(max_length=50, unique=True)
date_added = models.DateTimeField(auto_now_add=True, verbose_name="Date Added")
date_modified = models.DateTimeField(auto_now=True, verbose_name="Date Modified")
active = models.BooleanField(default=True)
def __str__(self):
return "{}".format(self.name)
def delete(self, *args, **kwargs):
self.active = False
self.save()
class Activity(models.Model):
class Meta:
verbose_name_plural = "activities"
name = models.CharField(max_length=100, unique=True)
date_added = models.DateTimeField(auto_now_add=True, verbose_name="Date Added")
date_modified = models.DateTimeField(auto_now=True, verbose_name="Date Modified")
active = models.BooleanField(default=True)
def __str__(self):
return "{}".format(self.name)
def delete(self, *args, **kwargs):
self.active = False
self.save()
def print_out(sender, instance, created, **kwargs):
if instance.end is not None:
volunteer = instance.volunteer
if volunteer.is_group:
hours = ((instance.end - instance.start).total_seconds() / 3600) * volunteer.group_count
else:
hours = (instance.end - instance.start).total_seconds() / 3600
if instance.hours != hours:
instance.hours = hours
instance.save()
volunteer = instance.volunteer
volunteer.times_clocked_out += 1
volunteer.save()
class TimeEntry(models.Model):
class Meta:
verbose_name_plural = "Time Entries"
volunteer = models.ForeignKey(Volunteer, on_delete=models.CASCADE)
start = models.DateTimeField(verbose_name="Start Time")
end = models.DateTimeField(null=True, blank=True, verbose_name="End Time")
site = models.ForeignKey(Site, on_delete=models.CASCADE)
activity = models.ForeignKey(Activity, on_delete=models.CASCADE)
date_added = models.DateTimeField(auto_now_add=True, verbose_name="Date Added")
date_modified = models.DateTimeField(auto_now=True, verbose_name="Date Modified")
active = models.BooleanField(default=True)
hours = models.DecimalField(null=True, blank=True, decimal_places=2, max_digits=12)
def __str__(self):
return "{}".format(self.id) + " {}".format(self.volunteer) + " {}".format(self.start.strftime('%m/%d/%Y'))
def delete(self, *args, **kwargs):
self.active = False
self.save()
signals.post_save.connect(print_out, sender=TimeEntry)
|
mit
| -9,162,268,426,743,387,000
| 36.5
| 114
| 0.670355
| false
| 3.726216
| false
| false
| false
|
SeungGiJeong/SK_FastIR
|
dump/mbr.py
|
1
|
6740
|
from construct import *
from distorm3 import Decode, Decode16Bits
import hexdump
import os
class Mbr:
def __init__(self, path):
self.mbrHexa = ""
self.mbrStruct = ""
self.bootloaderCode = ""
self.offset = 0
self.partition = {"name": []}
self.signature = ""
self.path = path
self.mbr = Struct("mbr",
HexDumpAdapter(Bytes("bootloaderCode", 446)),
Array(4,
Struct("partitions",
Enum(Byte("state"),
INACTIVE=0x00,
ACTIVE=0x80,
),
BitStruct("beginning",
Octet("head"),
Bits("sect", 6),
Bits("cyl", 10),
),
Enum(UBInt8("type"),
Nothing=0x00,
FAT12_CHS=0x01,
XENIX_ROOT=0x02,
XENIX_USR=0x03,
FAT16_16_32MB_CHS=0x04,
Extended_DOS=0x05,
FAT16_32MB_CHS=0x06,
NTFS=0x07,
FAT32_CHS=0x0b,
FAT32_LBA=0x0c,
FAT16_32MB_2GB_LBA=0x0e,
Microsoft_Extended_LBA=0x0f,
Hidden_FAT12_CHS=0x11,
Hidden_FAT16_16_32MB_CHS=0x14,
Hidden_FAT16_32MB_2GB_CHS=0x16,
AST_SmartSleep_Partition=0x18,
Hidden_FAT32_CHS=0x1b,
Hidden_FAT32_LBA=0x1c,
Hidden_FAT16_32MB_2GB_LBA=0x1e,
PQservice=0x27,
Plan_9_partition=0x39,
PartitionMagic_recovery_partition=0x3c,
Microsoft_MBR_Dynamic_Disk=0x42,
GoBack_partition=0x44,
Novell=0x51,
CP_M=0x52,
Unix_System_V=0x63,
PC_ARMOUR_protected_partition=0x64,
Solaris_x86_or_Linux_Swap=0x82,
LINUX_NATIVE=0x83,
Hibernation=0x84,
Linux_Extended=0x85,
NTFS_Volume_Set=0x86,
BSD_OS=0x9f,
FreeBSD=0xa5,
OpenBSD=0xa6,
Mac_OSX=0xa8,
NetBSD=0xa9,
Mac_OSX_Boot=0xab,
MacOS_X_HFS=0xaf,
BSDI=0xb7,
BSDI_Swap=0xb8,
Boot_Wizard_hidden=0xbb,
Solaris_8_boot_partition=0xbe,
CP_M_86=0xd8,
Dell_PowerEdge_Server_utilities_FAT_FS=0xde,
DG_UX_virtual_disk_manager_partition=0xdf,
BeOS_BFS=0xeb,
EFI_GPT_Disk=0xee,
EFI_System_Partition=0xef,
VMWare_File_System=0xfb,
VMWare_Swap=0xfc,
_default_=Pass,
),
BitStruct("ending",
Octet("head"),
Bits("sect", 6),
Bits("cyl", 10),
),
ULInt32("sector_offset"), # offset from MBR in sectors
ULInt32("size"), # in sectors
)
),
Const(Bytes("signature", 2), "\x55\xAA"),
)
def save_mbr(self, image):
file_image = open(image, "rb")
file_mbr = open(self.path + os.path.sep + "mbr_raw", "wb")
try:
file_mbr.write(file_image.read(512))
except Exception as err:
self.logger.error("Error to extract MBR")
file_image.close()
file_mbr.close()
return file_mbr.name
def extract_hexa(self, file_mbr):
# file = open(fileMbr,"rb")
hex_str = ""
for line in file_mbr.split('\n'):
hex_str += line[10:58]
hex_str = hex_str.replace(' ', '')
self.mbrHexa = hex_str
def mbr_parsing(self, image):
file_mbr = self.save_mbr(image)
self.extract_hexa(hexdump.hexdump(open(file_mbr, 'rb').read(512), "return"))
try:
cap1 = self.mbrHexa.decode("hex")
self.mbrStruct = self.mbr.parse(cap1)
return self.mbrStruct
except Exception as inst:
self.logger.error("Error MBR Parsing")
def boot_loader_disassembly(self):
l = Decode(0x000, self.mbrStruct.bootloaderCode, Decode16Bits)
assembly_code = ""
for (offset, size, instruction, hexdump) in l:
assembly_code = assembly_code + "%.8x: %-32s %s" % (offset, hexdump, instruction) + "\n"
h_file = open(self.path + os.path.sep + "bootLoaderAssemblyCode.txt", "w")
h_file.write(assembly_code)
h_file.close()
|
gpl-3.0
| 5,784,401,525,415,355,000
| 49.676692
| 100
| 0.327448
| false
| 4.970501
| false
| false
| false
|
caderache2014/django-rest-tutorial
|
tutorial/tutorial/settings.py
|
1
|
2075
|
"""
Django settings for tutorial project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'xnb^kzgkv4vd!(u@ry_=eo2xo_)@_c12bsvk63hv=c2%%4!zf#'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'rest_framework',
'snippets',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'tutorial.urls'
WSGI_APPLICATION = 'tutorial.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'tmp.db',
'USER': '',
'PASSWORD': '',
'HOST': '',
'PORT': '',
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
STATIC_URL = '/static/'
|
mit
| -1,510,887,145,338,488,600
| 22.590909
| 71
| 0.697831
| false
| 3.330658
| false
| false
| false
|
beiko-lab/gengis
|
bin/Lib/site-packages/scipy/optimize/tests/test_regression.py
|
1
|
1237
|
"""Regression tests for optimize.
"""
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy.testing import TestCase, run_module_suite, assert_almost_equal, \
assert_raises
import scipy.optimize
class TestRegression(TestCase):
def test_newton_x0_is_0(self):
"""Ticket #1074"""
tgt = 1
res = scipy.optimize.newton(lambda x: x - 1, 0)
assert_almost_equal(res, tgt)
def test_newton_integers(self):
"""Ticket #1214"""
root = scipy.optimize.newton(lambda x: x**2 - 1, x0=2,
fprime=lambda x: 2*x)
assert_almost_equal(root, 1.0)
def test_lmdif_errmsg(self):
# this shouldn't cause a crash on Python 3
class SomeError(Exception):
pass
counter = [0]
def func(x):
counter[0] += 1
if counter[0] < 3:
return x**2 - np.array([9, 10, 11])
else:
raise SomeError()
assert_raises(SomeError,
scipy.optimize.leastsq,
func, [1, 2, 3])
if __name__ == "__main__":
run_module_suite()
|
gpl-3.0
| 797,545,178,663,493,000
| 25.488889
| 76
| 0.515764
| false
| 3.817901
| true
| false
| false
|
chrisrink10/mumpy
|
mumpy/interpreter.py
|
1
|
5105
|
"""MUMPy Interpreter
The functions in this module represent various functions that may need
to be carried out from the command line (including starting the REPL
and compiling and executing a routine file).
Licensed under a BSD license. See LICENSE for more information.
Author: Christopher Rink"""
try:
# Used by Python's input() to provide readline functionality
# Does not work on Windows, so we'll just pass
import readline
except ImportError:
pass
import argparse
import mumpy
def main():
"""The main command line entry point for MUMPy."""
parser = argparse.ArgumentParser(
description="MUMPS interpreter. "
"Summoning this script without any arguments will open the "
"included MUMPS REPL capability."
)
parser.add_argument("-d", "--debug",
help="Enable debug output in REPL mode",
required=False,
action='store_true'
)
parser.add_argument("-c", "--compile",
help="A list of MUMPS scripts to compile.",
required=False,
nargs='*'
)
parser.add_argument("-f", "--file",
help="A MUMPS routine to execute.",
required=False,
nargs=1
)
parser.add_argument("-t", "--tag",
help="The tag to execute in the specified routine",
required=False,
nargs=1
)
parser.add_argument("-dev", "--device",
help="The I/O device this process should start with",
required=False,
nargs=1
)
parser.add_argument("-a", "--args",
help="The arguments to pass to the specified tag",
required=False,
nargs="*"
)
parser.add_argument("-r", "--recompile",
help="Recompile any routines before interpreting.",
required=False,
action='store_true'
)
args = parser.parse_args()
# Process routine compilations first
if args.compile:
compile_routine(args.compile,
args.debug)
# Then interpret any files
if args.file:
interpret(args.file[0],
tag=None if args.tag is None else args.tag[0],
device=None if args.device is None else args.device[0],
args=args.args,
recompile=args.recompile,
debug=args.debug)
# If the user wants to neither compile any routines or interpret any files,
# start the REPL
if not args.compile and not args.file:
start_repl(args.debug)
def start_repl(debug=False):
"""Start the interpreter loop."""
env = mumpy.MUMPSEnvironment()
p = mumpy.MUMPSParser(env, debug=debug)
# Catch the Keyboard Interrupt to let us exit gracefully
try:
# Accept user input
while True:
current_line = input("mumpy > ")
# Allow empty lines from the REPL
if current_line.strip() == "":
continue
# Catch any Syntax errors from the user input
try:
p.parse_repl(current_line)
except mumpy.MUMPSSyntaxError as e:
print(e)
# If output was emitted, we need to add an extra newline
if p.output:
print("")
except KeyboardInterrupt:
print("")
pass
def compile_routine(files, debug=False):
"""Compile a list of routines."""
# Compile the routines to an intermediate format
intf = []
for file in files:
print("Compiling {file}...".format(file=file))
try:
intf.append(mumpy.MUMPSFile(rou=file, debug=debug, recompile=True))
print("Success!")
except mumpy.MUMPSCompileError as e:
print(e)
print("Failed to compile {rou}!".format(rou=file))
def interpret(file, tag=None, args=None, device=None,
recompile=False, debug=False):
"""Interpret a routine file.."""
# Prepare the file
try:
f = mumpy.MUMPSFile(file, recompile=recompile, debug=debug)
except mumpy.MUMPSCompileError as e:
print(e)
return
# IF we recompiled and we made it this far, then there were no errors
if recompile:
print("{} recompiled successfully!".format(file))
# Prepare the environment and parser
env = mumpy.MUMPSEnvironment()
p = mumpy.MUMPSParser(env, debug=debug)
# If the user specifies another default device, use that
if device is not None:
env.open(device)
env.use(device)
# Parse the file
try:
p.parse_file(f, tag=tag, args=args)
except mumpy.MUMPSSyntaxError as e:
print(e)
|
bsd-3-clause
| -7,555,515,970,046,993,000
| 32.149351
| 80
| 0.543389
| false
| 4.541815
| false
| false
| false
|
Kozea/pygal
|
pygal/graph/xy.py
|
1
|
4016
|
# -*- coding: utf-8 -*-
# This file is part of pygal
#
# A python svg graph plotting library
# Copyright © 2012-2016 Kozea
#
# This library is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option) any
# later version.
#
# This library is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with pygal. If not, see <http://www.gnu.org/licenses/>.
"""
XY Line graph: Plot a set of couple data points (x, y) connected by
straight segments.
"""
from functools import reduce
from pygal.graph.dual import Dual
from pygal.graph.line import Line
from pygal.util import cached_property, compose, ident
class XY(Line, Dual):
"""XY Line graph class"""
_x_adapters = []
@cached_property
def xvals(self):
"""All x values"""
return [
val[0] for serie in self.all_series for val in serie.values
if val[0] is not None
]
@cached_property
def yvals(self):
"""All y values"""
return [
val[1] for serie in self.series for val in serie.values
if val[1] is not None
]
@cached_property
def _min(self):
"""Getter for the minimum series value"""
return (
self.range[0] if (self.range and self.range[0] is not None) else
(min(self.yvals) if self.yvals else None)
)
@cached_property
def _max(self):
"""Getter for the maximum series value"""
return (
self.range[1] if (self.range and self.range[1] is not None) else
(max(self.yvals) if self.yvals else None)
)
def _compute(self):
"""Compute x/y min and max and x/y scale and set labels"""
if self.xvals:
if self.xrange:
x_adapter = reduce(compose, self._x_adapters) if getattr(
self, '_x_adapters', None
) else ident
xmin = x_adapter(self.xrange[0])
xmax = x_adapter(self.xrange[1])
else:
xmin = min(self.xvals)
xmax = max(self.xvals)
xrng = (xmax - xmin)
else:
xrng = None
if self.yvals:
ymin = self._min
ymax = self._max
if self.include_x_axis:
ymin = min(ymin or 0, 0)
ymax = max(ymax or 0, 0)
yrng = (ymax - ymin)
else:
yrng = None
for serie in self.all_series:
serie.points = serie.values
if self.interpolate:
vals = list(
zip(
*sorted(
filter(lambda t: None not in t, serie.points),
key=lambda x: x[0]
)
)
)
serie.interpolated = self._interpolate(vals[0], vals[1])
if self.interpolate:
self.xvals = [
val[0] for serie in self.all_series
for val in serie.interpolated
]
self.yvals = [
val[1] for serie in self.series for val in serie.interpolated
]
if self.xvals:
xmin = min(self.xvals)
xmax = max(self.xvals)
xrng = (xmax - xmin)
else:
xrng = None
# these values can also be 0 (zero), so testing explicitly for None
if xrng is not None:
self._box.xmin, self._box.xmax = xmin, xmax
if yrng is not None:
self._box.ymin, self._box.ymax = ymin, ymax
|
lgpl-3.0
| -429,799,592,630,257,700
| 29.648855
| 79
| 0.537983
| false
| 4.039235
| false
| false
| false
|
matichorvat/pydmrs
|
dmrs_preprocess/label.py
|
1
|
4339
|
def create_label(dmrs_xml, carg_clean=False):
"""
Create an identifying label attribute for each node and link,
consisting of its arguments and properties.
:param dmrs_xml: Input DMRS XML
:return: Modified DMRS XML
"""
for entity in dmrs_xml:
if entity.tag == 'node':
node_attribs = collect_node_attribs(entity)
# Remove quotes around CARG
if node_attribs.get('carg') is not None and carg_clean:
clean_carg = node_attribs['carg'][1:-1]
entity.attrib['carg'] = clean_carg
node_attribs['carg'] = clean_carg
if node_attribs.get('gpred') is not None:
label = label_gpred(node_attribs)
elif node_attribs.get('pos') == 'n':
label = label_noun(node_attribs)
elif node_attribs.get('pos') == 'v':
label = label_verb(node_attribs)
else:
label = label_default(node_attribs)
# Attach the label to node XML
entity.attrib['label'] = label
elif entity.tag == 'link':
# Get ARG and POST of a link
arg = entity.findall('rargname')[0].text if entity.findall('rargname') else None
post = entity.findall('post')[0].text if entity.findall('post') else None
# Create a label and attach it to the link XML
entity.attrib['label'] = '_'.join([x for x in [arg, post] if x is not None])
return dmrs_xml
noun_like_gpreds = {'person', 'manner', 'reason', 'place_n', 'time_n', 'minute', 'mofy',
'numbered_hour', 'dofm', 'dofw', 'holiday', 'season', 'year_range',
'yofc', 'thing', 'measure', 'meas_np', 'named', 'named_n'}
def label_gpred(node_attribs):
if node_attribs.get('gpred') == 'pron':
label_list = [
node_attribs.get('gpred'),
node_attribs.get('pers'),
node_attribs.get('num'),
node_attribs.get('gend')
]
elif node_attribs.get('gpred') in noun_like_gpreds:
label_list = [
node_attribs.get('carg'),
node_attribs.get('gpred'),
simplify_gpred_num(node_attribs.get('num'))
]
else:
label_list = [
node_attribs.get('carg'),
node_attribs.get('gpred')
]
return '_'.join([unicode(x) for x in label_list if x is not None])
def label_noun(node_attribs):
label_list = [
node_attribs.get('lemma'),
node_attribs.get('pos'),
node_attribs.get('sense'),
node_attribs.get('pers') if node_attribs.get('pers') is not None else '3',
node_attribs.get('num') if node_attribs.get('num') is not None else 'sg'
]
return '_' + '_'.join([unicode(x) for x in label_list if x is not None])
def label_verb(node_attribs):
label_list = [
node_attribs.get('lemma'),
node_attribs.get('pos'),
node_attribs.get('sense'),
node_attribs.get('tense'),
node_attribs.get('sf'),
'perf' if node_attribs.get('perf') != '-' else None,
'prog' if node_attribs.get('prog') != '-' else None
]
return '_' + '_'.join([unicode(x) for x in label_list if x is not None])
def label_default(node_attribs):
label_list = [
node_attribs.get('lemma'),
node_attribs.get('pos'),
node_attribs.get('sense')
]
return '_' + '_'.join([unicode(x) for x in label_list if x is not None])
def collect_node_attribs(node):
"""
Collect node attributes in a dictionary
:param node: XML node
:return: Dictionary of node attributes
"""
node_attribs = dict()
for node_info in node:
node_attribs.update(node_info.attrib)
if node_info.tag == 'gpred':
node_attribs[node_info.tag] = node_info.text
if node.attrib.get('carg') is not None:
node_attribs['carg'] = node.attrib['carg']
if node_attribs.get('tense') is not None and node_attribs.get('tense').lower() == 'untensed':
del node_attribs['tense']
if node_attribs.get('sf') == 'prop' or node_attribs.get('sf') == 'prop-or-ques':
del node_attribs['sf']
return node_attribs
def simplify_gpred_num(gpred_num):
return gpred_num if gpred_num == 'pl' else 'sg'
|
mit
| -1,478,493,958,019,896,300
| 29.342657
| 97
| 0.558424
| false
| 3.536267
| false
| false
| false
|
jonaustin/advisoryscan
|
django/django/middleware/cache.py
|
1
|
4057
|
from django.conf import settings
from django.core.cache import cache
from django.utils.cache import get_cache_key, learn_cache_key, patch_response_headers
class CacheMiddleware(object):
"""
Cache middleware. If this is enabled, each Django-powered page will be
cached for CACHE_MIDDLEWARE_SECONDS seconds. Cache is based on URLs.
Only parameter-less GET or HEAD-requests with status code 200 are cached.
If CACHE_MIDDLEWARE_ANONYMOUS_ONLY is set to True, only anonymous requests
(i.e., those not made by a logged-in user) will be cached. This is a
simple and effective way of avoiding the caching of the Django admin (and
any other user-specific content).
This middleware expects that a HEAD request is answered with a response
exactly like the corresponding GET request.
When a hit occurs, a shallow copy of the original response object is
returned from process_request.
Pages will be cached based on the contents of the request headers
listed in the response's "Vary" header. This means that pages shouldn't
change their "Vary" header.
This middleware also sets ETag, Last-Modified, Expires and Cache-Control
headers on the response object.
"""
def __init__(self, cache_timeout=None, key_prefix=None, cache_anonymous_only=None):
self.cache_timeout = cache_timeout
if cache_timeout is None:
self.cache_timeout = settings.CACHE_MIDDLEWARE_SECONDS
self.key_prefix = key_prefix
if key_prefix is None:
self.key_prefix = settings.CACHE_MIDDLEWARE_KEY_PREFIX
if cache_anonymous_only is None:
self.cache_anonymous_only = getattr(settings, 'CACHE_MIDDLEWARE_ANONYMOUS_ONLY', False)
else:
self.cache_anonymous_only = cache_anonymous_only
def process_request(self, request):
"Checks whether the page is already cached and returns the cached version if available."
if self.cache_anonymous_only:
assert hasattr(request, 'user'), "The Django cache middleware with CACHE_MIDDLEWARE_ANONYMOUS_ONLY=True requires authentication middleware to be installed. Edit your MIDDLEWARE_CLASSES setting to insert 'django.contrib.auth.middleware.AuthenticationMiddleware' before the CacheMiddleware."
if not request.method in ('GET', 'HEAD') or request.GET:
request._cache_update_cache = False
return None # Don't bother checking the cache.
if self.cache_anonymous_only and request.user.is_authenticated():
request._cache_update_cache = False
return None # Don't cache requests from authenticated users.
cache_key = get_cache_key(request, self.key_prefix)
if cache_key is None:
request._cache_update_cache = True
return None # No cache information available, need to rebuild.
response = cache.get(cache_key, None)
if response is None:
request._cache_update_cache = True
return None # No cache information available, need to rebuild.
request._cache_update_cache = False
return response
def process_response(self, request, response):
"Sets the cache, if needed."
if not hasattr(request, '_cache_update_cache') or not request._cache_update_cache:
# We don't need to update the cache, just return.
return response
if request.method != 'GET':
# This is a stronger requirement than above. It is needed
# because of interactions between this middleware and the
# HTTPMiddleware, which throws the body of a HEAD-request
# away before this middleware gets a chance to cache it.
return response
if not response.status_code == 200:
return response
patch_response_headers(response, self.cache_timeout)
cache_key = learn_cache_key(request, response, self.cache_timeout, self.key_prefix)
cache.set(cache_key, response, self.cache_timeout)
return response
|
mit
| 901,742,829,478,083,500
| 47.297619
| 301
| 0.685975
| false
| 4.438731
| false
| false
| false
|
ostrokach/biskit
|
scripts/Mod/align.py
|
1
|
4218
|
#!/usr/bin/env python
##
## Biskit, a toolkit for the manipulation of macromolecular structures
## Copyright (C) 2004-2012 Raik Gruenberg & Johan Leckner
##
## This program is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 3 of the
## License, or any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You find a copy of the GNU General Public License in the file
## license.txt along with this program; if not, write to the Free
## Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
##
##
##
## last $Author$
## $Date$
## $Revision$
import Biskit.Mod.modUtils as modUtils
from Biskit.Mod import *
import Biskit.tools as tools
from Biskit import EHandler
from Biskit import LogFile
import sys, os.path
def _use( o ):
print """
Syntax: align.py [ -o |outFolder| -log |logFile| -h |host_computer| -nosap ]
Options:
-o output folder for results (default: .)
-log log file (default: STDOUT)
-nosap skip structural alignment (default: don't skip)
-h host computer for calculation (default: local computer)
-> must be accessible w/o password via ssh, check!
-? or help .. this help screen
Default options:
"""
for key, value in o.items():
print "\t-",key, "\t",value
sys.exit(0)
def defaultOptions():
return {'o':'.',
'log': None,
'h':None
}
### MAIN ###
options = tools.cmdDict( defaultOptions() )
outFolder = tools.absfile( options['o'] )
host = options['h']
sap = not 'nosap' in options
log = None
if options['log']:
log = LogFile( outFolder + '/' + options['log'], 'a' )
if not (os.path.exists( outFolder +'/templates' ) ):
print 'Current directory is not a valid modeling folder (missing /templates).'
_use( defaultOptions() )
if '?' in options or 'help' in options:
_use( defaultOptions() )
###################
## Aligner
##
## Create a sequence-structure alignment using T-coffee.
## Convert the alignment into Modeller compatible format
## input: sequences/nr.fasta
## templates/templates.fasta
## templates/t_cofee/*.alpha
##
## output: t_coffee/fast_pair.lib
## /final.score_html
## /struct.aln
## /t_coffee.log_*
## /final.aln
## /lalign_id_pair.lib
## /struct.aln_original
## /final.phylip
## /sap_pair.lib
## /t_coffee.inp
## /final.pir_aln (input for Modeller)
## /sap_pair.lib_original
## note 1: If there are more than approximately 50 sequences overall
## t_coffe will eat all the memory and the job will not finish
## This should be fixed in more recent versions of T-Coffee
## (v > 3.2) where T-Coffee, according to the manual "switches
## to a heuristic mode, named DPA, where DPA stands for Double
## Progressive Alignment."
## note 2: If there is only one template structure step 2 of T-coffee
## will not work. Solution, skip the structural alignment if
## only one template structure is provided.
## note 3: In quite som cases the sequence retrieved from the nrpdb
## sequence database is different from the sequence extracted
## from the coordinates in the pdb-file. This will sometimes
## cause t-coffee to terminate with an error (two sequences
## with the same name but with different sequences). Temporary
## solution: Choose another structure from the same cluster
## as the troublemaker.
try:
a = Aligner( outFolder, log, verbose=1, sap=sap )
a.align_for_modeller_inp()
a.go(host)
except:
EHandler.error( 'Error while building alingnments.')
print "\nalign.py -? or align.py -help for help screen"
|
gpl-3.0
| 5,412,043,434,244,115,000
| 31.446154
| 83
| 0.627786
| false
| 3.671018
| false
| false
| false
|
sopython/kesh
|
kesh/_database/creation/create_post_history.py
|
1
|
1880
|
from pymongo import MongoClient
from lxml import etree
from dateutil.parser import parse
import pickle
from time import gmtime, strftime
import os
import re
data_dir = '../../../bin/so_data_/'
file_name = 'PostHistory.xml'
db_name = 'kesh'
coll_name = 'post_history'
client = MongoClient()
db = client[db_name]
coll = db[coll_name]
context = etree.iterparse(os.path.join(data_dir, file_name),
events=('start', 'end'))
str_to_int = {'Id', 'PostHistoryTypeId', 'PostId', 'UserID'}
str_to_date = {'CreationDate'}
def convert(name):
s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', name)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower()
# Load in a set of python ids.
with open('question_ids.pickle', 'rb') as q, \
open('answer_ids.pickle', 'rb') as a:
question_ids = pickle.load(q)
answer_ids = pickle.load(a)
ids = question_ids | answer_ids
f = open(os.path.join(data_dir, './logs/{:s}.log'.format(coll_name)), 'w')
s = 'Importing {:s} data.\n\n'.format(coll_name)
f.write(s)
print(s, end='')
i = 0
for event, elem in context:
if event == 'end' and elem.tag == 'row':
# Create a dictionary and convert any necessary fields.
d = dict(elem.items())
if int(d['PostId']) in ids:
d = {convert(k):int(v) if k in str_to_int else
parse(v) if k in str_to_date else
v for k, v in d.items()}
coll.insert(d)
elem.clear()
while elem.getprevious() is not None:
del elem.getparent()[0]
i += 1
if i % 10000 == 0:
s_option = (strftime('%H:%M:%S', gmtime()), d['id'], i)
s = '{:s} : Id - {:d} : # - {:d}\n'.format(*s_option)
print(s, end='')
f.write(s)
print('Creating indices.')
coll.ensure_index(convert('id'))
f.close()
|
bsd-3-clause
| -2,257,058,302,008,041,500
| 27.923077
| 74
| 0.551064
| false
| 3.047002
| false
| false
| false
|
lsbardel/python-stdnet
|
tests/all/query/related.py
|
1
|
6409
|
import datetime
from random import randint, uniform
from stdnet.utils import test
from examples.models import Node, Role, Profile, Dictionary
from examples.data import FinanceTest, Position, Instrument, Fund
def create(cls, root=None, nesting=None):
models = cls.mapper
if root is None:
with models.session().begin() as t:
root = t.add(models.node(weight=1.0))
yield t.on_result
yield create(cls, root, nesting=nesting)
elif nesting:
N = randint(2,9)
with models.session().begin() as t:
for n in range(N):
node = t.add(models.node(parent=root, weight=uniform(0,1)))
yield t.on_result
yield cls.multi_async((create(cls, node, nesting-1) for node\
in t.saved[node._meta]))
class TestSelfForeignKey(test.TestCase):
'''The Node model is used only in this test class and should be used only
in this test class so that we can use the manager in a parallel test suite.'''
model = Node
nesting = 2
@classmethod
def after_setup(cls):
return create(cls, nesting=cls.nesting)
def test_meta(self):
all = yield self.query().load_related('parent').all()
for n in all:
if n.parent:
self.assertTrue(isinstance(n.parent, self.model))
def test_related_cache(self):
all = yield self.query().all()
pcache = self.model._meta.dfields['parent'].get_cache_name()
for n in all:
self.assertFalse(hasattr(n, pcache))
yield self.multi_async((n.parent for n in all))
for n in all:
self.assertTrue(hasattr(n, pcache))
self.assertEqual(getattr(n, pcache), n.parent)
def test_self_related(self):
query = self.query()
root = yield query.get(parent=None)
children = yield root.children.query().load_related('parent').all()
self.assertTrue(children)
for child in children:
self.assertEqual(child.parent, root)
children2 = yield child.children.query().load_related('parent').all()
self.assertTrue(children2)
for child2 in children2:
self.assertEqual(child2.parent, child)
def test_self_related_filter_on_self(self):
query = self.query()
# We should get the nodes just after the root
root = yield query.get(parent=None)
qs = yield query.filter(parent__parent=None).load_related('parent').all()
self.assertTrue(qs)
for node in qs:
self.assertEqual(node.parent, root)
class TestDeleteSelfRelated(test.TestWrite):
model = Node
nesting = 2
def setUp(self):
return create(self, nesting=self.nesting)
def test_related_delete_all(self):
all = yield self.query().all()
self.assertTrue(all)
root = 0
for a in all:
if a.parent is None:
root += 1
self.assertEqual(root, 1)
yield self.query().delete()
yield self.async.assertEqual(self.query().count(), 0)
def test_related_root_delete(self):
qs = self.query().filter(parent=None)
yield qs.delete()
yield self.async.assertEqual(self.query().count(), 0)
def test_related_filter_delete(self):
query = self.query()
root = yield query.get(parent=None)
self.assertFalse(root.parent)
qs = query.filter(parent=root)
yield qs.delete()
query = self.query()
yield self.async.assertEqual(query.count(), 1)
qs = yield query.all()
self.assertEqual(query[0], root)
class TestRealtedQuery(FinanceTest):
@classmethod
def after_setup(cls):
return cls.data.makePositions(cls)
def test_related_filter(self):
query = self.query(Position)
# fetch all position with EUR instruments
instruments = self.query(Instrument).filter(ccy='EUR')
peur1 = yield self.query(Position).filter(instrument=instruments)\
.load_related('instrument').all()
self.assertTrue(peur1)
for p in peur1:
self.assertEqual(p.instrument.ccy,'EUR')
peur = self.query(Position).filter(instrument__ccy='EUR')
qe = peur.construct()
self.assertEqual(qe._get_field, None)
self.assertEqual(len(qe),1)
self.assertEqual(qe.keyword, 'set')
peur = yield peur.all()
self.assertEqual(set(peur), set(peur1))
def test_related_exclude(self):
query = self.query(Position)
peur = yield query.exclude(instrument__ccy='EUR').load_related('instrument').all()
self.assertTrue(peur)
for p in peur:
self.assertNotEqual(p.instrument.ccy, 'EUR')
def test_load_related_model(self):
position = yield self.query(Position).get(id=1)
self.assertTrue(position.instrument_id)
cache = position.get_field('instrument').get_cache_name()
self.assertFalse(hasattr(position, cache))
instrument = yield position.load_related_model('instrument',
load_only=('ccy',))
self.assertTrue(isinstance(instrument, Instrument))
self.assertEqual(instrument._loadedfields, ('ccy',))
self.assertEqual(id(instrument), id(position.instrument))
def test_related_manager(self):
session = self.session()
fund = yield session.query(Fund).get(id=1)
positions1 = yield session.query(Position).filter(fund=fund).all()
positions = yield fund.positions.query().load_related('fund').all()
self.assertTrue(positions)
for p in positions:
self.assertEqual(p.fund, fund)
self.assertEqual(set(positions1), set(positions))
def test_related_manager_exclude(self):
inst = yield self.query().get(id=1)
fund = yield self.query(Fund).get(id=1)
pos = yield fund.positions.exclude(instrument=inst).load_related('instrument')\
.load_related('fund').all()
for p in pos:
self.assertNotEqual(p.instrument, inst)
self.assertEqual(p.fund, fund)
|
bsd-3-clause
| -5,532,104,236,342,709,000
| 36.7
| 90
| 0.591512
| false
| 4.040984
| true
| false
| false
|
nijel/weblate
|
setup.py
|
1
|
5078
|
#!/usr/bin/env python3
#
# Copyright © 2012 - 2021 Michal Čihař <michal@cihar.com>
#
# This file is part of Weblate <https://weblate.org/>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
import os
from distutils import log
from distutils.command.build import build
from distutils.core import Command
from distutils.dep_util import newer
from glob import glob
from itertools import chain
from setuptools import find_packages, setup
from setuptools.command.build_py import build_py
from translate.tools.pocompile import convertmo
LOCALE_MASKS = [
"weblate/locale/*/LC_MESSAGES/*.po",
]
# allow setup.py to be run from any path
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
with open("README.rst") as readme:
README = readme.read()
with open("requirements.txt") as requirements:
REQUIRES = requirements.read().splitlines()
EXTRAS = {"all": []}
with open("requirements-optional.txt") as requirements:
section = None
for line in requirements:
line = line.strip()
if line.startswith("-r") or not line:
continue
if line.startswith("#"):
section = line[2:]
else:
dep = line.split(";")[0].strip()
EXTRAS[section] = dep
if section != "MySQL":
EXTRAS["all"].append(dep)
class WeblateBuildPy(build_py):
def find_package_modules(self, package, package_dir):
"""Filter settings.py from built module."""
result = super().find_package_modules(package, package_dir)
return [item for item in result if item[2] != "weblate/settings.py"]
class BuildMo(Command):
description = "update MO files to match PO"
user_options = []
def initialize_options(self):
self.build_base = None
def finalize_options(self):
self.set_undefined_options("build", ("build_base", "build_base"))
def run(self):
for name in chain.from_iterable(glob(mask) for mask in LOCALE_MASKS):
output = os.path.splitext(name)[0] + ".mo"
if not newer(name, output):
continue
self.announce(f"compiling {name} -> {output}", level=log.INFO)
with open(name, "rb") as pofile, open(output, "wb") as mofile:
convertmo(pofile, mofile, None)
class WeblateBuild(build):
"""Override the default build with new subcommands."""
# The build_mo has to be before build_data
sub_commands = [("build_mo", lambda self: True)] + build.sub_commands
setup(
name="Weblate",
version="4.5.2",
python_requires=">=3.6",
packages=find_packages(),
include_package_data=True,
description=(
"A web-based continuous localization system with "
"tight version control integration"
),
long_description=README,
long_description_content_type="text/x-rst",
license="GPLv3+",
keywords="i18n l10n gettext git mercurial translate",
url="https://weblate.org/",
download_url="https://weblate.org/download/",
project_urls={
"Issue Tracker": "https://github.com/WeblateOrg/weblate/issues",
"Documentation": "https://docs.weblate.org/",
"Source Code": "https://github.com/WeblateOrg/weblate",
"Twitter": "https://twitter.com/WeblateOrg",
},
author="Michal Čihař",
author_email="michal@cihar.com",
install_requires=REQUIRES,
zip_safe=False,
extras_require=EXTRAS,
classifiers=[
"Development Status :: 5 - Production/Stable",
"Environment :: Web Environment",
"Framework :: Django",
"Intended Audience :: Developers",
"Intended Audience :: System Administrators",
"License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Topic :: Software Development :: Internationalization",
"Topic :: Software Development :: Localization",
"Topic :: Internet :: WWW/HTTP",
"Topic :: Internet :: WWW/HTTP :: Dynamic Content",
],
entry_points={"console_scripts": ["weblate = weblate.runner:main"]},
cmdclass={"build_py": WeblateBuildPy, "build_mo": BuildMo, "build": WeblateBuild},
)
|
gpl-3.0
| 7,299,572,486,727,412,000
| 34.229167
| 86
| 0.650108
| false
| 3.831571
| false
| false
| false
|
michael-hart/tactile_tablet
|
src/tablet.py
|
1
|
1195
|
# File created 26/01/2015
# Contains main method to write Braille output to a tablet
# Pins are [12, 16, 18, 22, 24, 26] in GPIO.BOARD mode
import RPi.GPIO as GPIO
import time
import atexit
from braille_converter import convert_string
from braille_dict import braille_dict as bdict
led_pins = [12, 16, 18, 22, 24, 26]
def main():
tablet_columns = 2
tablet_rows = 3
leftover_buffer = []
# Set up GPIO
GPIO.setmode(GPIO.BOARD)
for pin in led_pins:
GPIO.setup(pin, GPIO.OUT)
atexit.register(cleanup)
print "Enter sentences for Braille display"
while True:
display_str = raw_input('-> ')
word_buffer = convert_string(display_str)
word_buffer = leftover_buffer + word_buffer
line_buffer, leftover_buffer = fit_to_screen(word_buffer, tablet_columns, tablet_rows, leftover_buffer)
# TODO: Output line_buffer to display
def fit_to_screen(words, cols, rows):
leftover = list(words)
lines = []
for i in range(rows):
lines.append([])
while len(lines[i]) + len(leftover[0]) + 1 < cols:
lines[i] += leftover[0] + bdict[' ']
leftover = leftover[1:]
return lines, leftover
def cleanup():
print "Cleaning up..."
GPIO.cleanup()
if __name__ == '__main__':
main()
|
gpl-2.0
| -7,119,182,581,759,252,000
| 22.431373
| 105
| 0.687029
| false
| 2.94335
| false
| false
| false
|
zmr/namsel
|
edit_distance_tests/generate_accuracy_report.py
|
1
|
5481
|
#encoding: utf-8
import os
import sys
import glob
import re
import codecs
from difflib import HtmlDiff
from recognize import run_main
import Levenshtein as L
import requests
import datetime
import multiprocessing
from config_manager import Config, run_all_confs_for_page
LOGIN_URL = 'https://dhattupages.appspot.com/accounts/login/?next=/'
PW = 'dartsedolhagangdege7'
credentials = {'username':'zach', 'password':PW}
HD = HtmlDiff()
test_vols = ['sample_book6', 'sample_book5', 'sample_book4', 'sample_book3',
'sample_book2', 'sample_book1', 'ldong-yon-tan-rgya-mtsho',
'don-grub-rgyal', 'taranatha']
test_vols.sort()
style_old = ''' <style type="text/css">
table.diff {font-family:Courier; border:medium;}
.diff_header {background-color:#e0e0e0}
td.diff_header {text-align:right}
.diff_next {background-color:#c0c0c0}
.diff_add {background-color:#aaffaa}
.diff_chg {background-color:#ffff77}
.diff_sub {background-color:#ffaaaa}
</style>'''
style_new = ''' <style type="text/css">
table.diff {font-family:Courier; border:medium;}
.diff_header {background-color:#e0e0e0}
td.diff_header {text-align:right}
.diff_next {background-color:#c0c0c0}
.diff_add {background-color:#aaffaa}
.diff_chg {background-color:#ffff77}
.diff_sub {background-color:#ffaaaa}
tr {line-height: 40px;}
td {font-family: "Qomolangma-Uchen Sarchung" !important}
</style>'''
multiple_spaces = re.compile(ur'[ \t]{1,}')
pwd = os.getcwd()
def open(fl, mode):
return codecs.open(fl, mode, 'utf-8')
def _normalize_input(txt):
# Strip lines of extra whitespace
lines = txt.split('\n')
lines = [l.strip() for l in lines if l.strip()]
# remove top title line
lines = lines[1:]
txt = '\n'.join(lines)
# collapse multiple spaces to 1 space
txt = multiple_spaces.sub(' ', txt)
txt = txt.replace(u'༎', u'།།')
txt = txt.replace(u'<', u'〈')
txt = txt.replace(u'>', u'〉')
txt = txt.replace(u'༑', u'་།་')
txt = txt.replace(u'-', u'—')
return txt
def _make_html_diff(txt, ocr):
html = HD.make_file(txt.split('\n'), ocr.split('\n'))
html = html.replace(style_old, style_new)
html = html.replace('ISO-8859-1', 'utf-8')
html = html.replace('<tbody>\n', '<tbody>\n<tr><td></td><td></td><td>Manual input</td><td></td><td></td><td>OCR</td></tr>\n')
# print html
return html
def _get_compare_data(tif_txt_pair):
tif = tif_txt_pair[0]
txt = tif_txt_pair[1]
if tif[:-4] == txt[:-4]: # This should always be true
# ocr = run_main(tif, conf=Config(path='/home/zr/letters/conf/443cf9ec-76c7-44bc-95ad-593138d2d5fc.conf'), text=True)
# ocr = run_main(tif, conf=Config(segmenter='stochastic', recognizer='hmm', break_width=3.6), text=True)
ocr = run_main(tif, text=True)
# ocr = run_all_confs_for_page(tif, text = True)
ocr = ocr.strip()
txt = open(txt,'r').read()
txt = _normalize_input(txt)
edit_dist = L.distance(txt, ocr)
edit_ratio = L.ratio(txt, ocr)
html = _make_html_diff(txt, ocr)
# sys.exit()
data = {'edit_distance': edit_dist,
'edit_ratio': edit_ratio,
'filename': os.path.basename(tif),
'html': html
}
return data
def do_pairwise_comparison(origflpath, ocrflpath):
o = open(origflpath, 'r').read()
s = open(ocrflpath, 'r').read()
s = _normalize_input(s)
return L.ratio(o,s)
#data = {'csrfmiddlewaretoken':s.cookies['csrftoken'],
# 'edit_distance': edit_dist,
# 'filename': os.path.basename(tif),
# 'sample_set': t, 'html': html, 'timestamp': timestamp,
# 'comment': comment
# }
if __name__ == '__main__':
from sklearn.externals.joblib import Parallel, delayed
timestamp = datetime.datetime.now()
comment = raw_input('Comment: ')
for t in test_vols:
os.chdir(os.path.abspath(t))
tifs = glob.glob('*tif')
txts = glob.glob('*txt')
tifs.sort()
txts.sort()
pool = multiprocessing.Pool()
# all_data = Parallel(n_jobs=12)(delayed(_get_compare_data)(i) for i in zip(tifs, txts))
all_data = pool.map(_get_compare_data, zip(tifs, txts))
# all_data = []
# for i in zip(tifs, txts):
# all_data.append(_get_compare_data(i))
with requests.session() as s:
s.get(LOGIN_URL)
credentials['csrfmiddlewaretoken'] = s.cookies['csrftoken']
s.post(LOGIN_URL, data=credentials,
headers={'Referer': 'https://dhattupages.appspot.com/'},
allow_redirects=True)
print 'posting data for ', t
for data in all_data:
data['csrfmiddlewaretoken'] = s.cookies['csrftoken']
data['sample_set'] = t
data['timestamp'] = timestamp
data['comment'] = comment
r = s.post('https://dhattupages.appspot.com/test-data-update',
headers={'Referer': 'https://dhattupages.appspot.com/'},
data=data)
r.raise_for_status()
os.chdir(pwd)
|
mit
| 8,027,859,567,957,796,000
| 31.903614
| 129
| 0.564366
| false
| 3.171312
| false
| false
| false
|
rafin/Spotify-Visualizations
|
engine/views.py
|
1
|
3163
|
from django.http import JsonResponse, HttpResponseRedirect
from django.shortcuts import render_to_response
from django.core.urlresolvers import reverse
from urllib import unquote
#spotify tools
from spot import pl
from spot import keys
import json
from json import loads as dict #converts json back to dictionary
#generate serializer for retrieving db data
from django.core import serializers
json_serializer = serializers.get_serializer("json")()
import models
from django.views.decorators.csrf import ensure_csrf_cookie
@ensure_csrf_cookie
def index(request):
s_auth_url = keys.auth_url(1)
p_auth_url = keys.auth_url(0)
return render_to_response('index.html', {'p_auth_url': p_auth_url, 's_auth_url': s_auth_url})
def plot(request, token, username):
return render_to_response('plot.html', {'token': token, 'name': username})
def sift(request, token, username):
return render_to_response('sift.html', {'token': token, 'name': username})
def getsongs(request):
'''returns json response of given playlist title'''
username = request.GET.get('username', '')
title = unquote(request.GET.get('title', ''))
token = request.GET.get('token','')
#if title is a list of titles instead of just 1
if '~[' in title:
titles = title.split('~[')
songs = []
for title in titles:
songs += pl.pl_data(title, username, token)['songs']
songs = {"songs":songs}
else:
songs = pl.pl_data(title, username, token)
#json_songs = json_serializer.serialize(songs, ensure_ascii=True)
return JsonResponse(songs, safe=False )
def getplaylists(request):
'''returns json response of given playlist title'''
#playlists = models.Playlist.objects.all()
username = request.GET.get('username', '')
token = request.GET.get('token', '')
playlists = pl.get_playlists(username, token)
#json_playlists = json_serializer.serialize(playlists, ensure_ascii=True)
return JsonResponse(playlists, safe=False)
def newplaylist(request):
if request.is_ajax():
if request.method == 'POST':
title = request.POST.get("title","")
songs = request.POST.get("songs","")
songs = songs[1:-1]
songs = songs.replace('"', '')
#reauthorize and get username
token = request.POST.get("token","")
sp = keys.get_access(token)
username = sp.current_user()['id']
pl.new_playlist(title, songs)
return JsonResponse({"success":"yes"})
def authorize_plot(request):
code = request.GET.get('code', '')
token = keys.get_token(code, 0)
#get username
sp = keys.get_access(token)
username = sp.current_user()['id']
url = reverse('plot', args=(), kwargs={'token': token, 'username': username})
return HttpResponseRedirect(url)
def authorize_sift(request):
code = request.GET.get('code', '')
token = keys.get_token(code, 1)
#get username
sp = keys.get_access(token)
username = sp.current_user()['id']
url = reverse('sift', args=(), kwargs={'token': token, 'username': username})
return HttpResponseRedirect(url)
|
mit
| -6,362,495,189,862,798,000
| 33.010753
| 97
| 0.654758
| false
| 3.690782
| false
| false
| false
|
trthanhquang/bus-assistant
|
webApp/getBusTiming.py
|
1
|
2827
|
#!/usr/bin/env python
import urllib2
from bs4 import BeautifulSoup as BS
import re
import time
def getAgenciesList():
agenciesList_req = urllib2.Request('''http://services.my511.org/Transit2.0/GetAgencies.aspx?token=aeeb38de-5385-482a-abde-692dfb2769e3''')
xml_resp = urllib2.urlopen(agenciesList_req)
soup = BS(xml_resp.read(),'lxml')
print soup.prettify()
agencies = soup.find_all('agency')
for a in agencies:
print a['name']
def getBusList(busCodes):
api_url = '''http://services.my511.org/Transit2.0/GetRoutesForAgencies.aspx
?token=aeeb38de-5385-482a-abde-692dfb2769e3
&agencyNames=SF-MUNI'''
req = urllib2.urlopen(''.join(api_url.split()))
soup = BS(req.read(),'lxml')
routes = soup.find_all('route')
for route in routes:
if route['code'] in busCodes:
print route.prettify()
def getBusStopsList():
api_url = '''http://services.my511.org/Transit2.0/GetStopsForRoute.aspx
?token=aeeb38de-5385-482a-abde-692dfb2769e3
&routeIDF=SF-MUNI~8X~Inbound'''
req = urllib2.urlopen(''.join(api_url.split()))
soup = BS(req.read(),'lxml')
print soup.prettify()
def getNextDepartures(stopcode,buscode):
api_url = '''http://services.my511.org/Transit2.0/
GetNextDeparturesByStopCode.aspx
?token=aeeb38de-5385-482a-abde-692dfb2769e3
&stopcode=%s'''%stopcode
req = urllib2.urlopen(''.join(api_url.split()))
soup = BS(req.read(),'lxml')
# print soup.prettify()
route = soup.find('route',{'code':buscode})
l = route.departuretimelist.getText().split()
if l:
print '-- %s\t%s (mins)'%(buscode,', '.join(l))
else:
print '-- %s\tUnavailable'%buscode
return l
class busTime:
def __init__(self,busCode,busTime=[]):
self.busCode = busCode #String
self.busTime = busTime #List of String
def __str__(self):
return self.busCode
class busStopStatus:
def __init__(self,stopcode,description="",departureList=[]):
self.stopcode = stopcode
self.description = description
self.departureList = departureList
def getBusStopStatus(stopcode):
api_url = '''http://services.my511.org/Transit2.0/
GetNextDeparturesByStopCode.aspx
?token=aeeb38de-5385-482a-abde-692dfb2769e3
&stopcode=%s'''%stopcode
req = urllib2.urlopen(''.join(api_url.split()))
soup = BS(req.read(),'lxml')
description = soup.find('stop')['name']
status = busStopStatus(stopcode,description,[])
for bus in soup.find_all('route'):
departtime = busTime(bus['code'],[])
timeList = bus.departuretimelist.getText().split()
if timeList:
print '-- %s\t%s (mins)'%(bus['code'],', '.join(timeList))
for t in timeList:
departtime.busTime.append(t)
status.departureList.append(departtime)
else:
print '-- %s\tUnavailable'%bus['code']
return status
if __name__ == '__main__':
print 'BUS TIMING... :D\n'
print time.ctime(time.time())
getBusStopStatus(16367)
|
mit
| 6,510,849,392,254,107,000
| 27
| 139
| 0.694022
| false
| 2.664467
| false
| false
| false
|
NYUEcon/NYUecondata
|
psid/psid.py
|
1
|
12753
|
"""
Working with PSID in python
@author : Spencer Lyon <spencer.lyon@stern.nyu.edu>
@date : 2015-02-04 09:02:56
use the read_csv option `usecols` to only keep what we need
"""
import re
import os
import gc
import os.path
import zipfile
import requests
import lxml.html
import numpy as np
import pandas as pd
# ----------- #
# Downloading #
# ----------- #
# Define lookup that maps years into request numbers.
file_year = map(str, list(range(1968, 1998)) + list(range(1999, 2012, 2)))
request_numbers = map(str, ([1056] + list(range(1058, 1083)) +
list(range(1047, 1052)) +
[1040, 1052, 1132, 1139, 1152, 1156]))
file_lookup = dict(zip(file_year, request_numbers))
file_lookup["ind"] = "1053"
def start_psid_session(user=None, password=None):
"""
Use user supplied login details to log in to umich site for PSID
download
"""
login_url = "http://simba.isr.umich.edu/u/Login.aspx"
# start html session so we can log in
session = requests.session()
start = session.get(login_url)
html = start.text
root = lxml.html.fromstring(html)
# Stuff so we can log in
EVAL = root.xpath('//input[@name="__EVENTVALIDATION"]')[0].attrib['value']
VIEWSTATE = root.xpath('//input[@name="__VIEWSTATE"]')[0].attrib['value']
acc_pwd = {'ctl00$ContentPlaceHolder1$Login1$UserName': user,
'ctl00$ContentPlaceHolder1$Login1$Password': password,
'ctl00$ContentPlaceHolder1$Login1$LoginButton': 'Log In',
'__EVENTTARGET': '',
'__EVENTARGUMENT': '',
'__VIEWSTATE': VIEWSTATE,
'__EVENTVALIDATION': EVAL}
# Send login message to PSID site
session.post(login_url, data=acc_pwd)
# Check for login
z = session.get('http://simba.isr.umich.edu/data/data.aspx')
tf2 = 'Logout' in str(z.content)
print('Successful login: %s' % (tf2))
return session
# Function to download PSID zip file
def download_psid(number, local_filename, session):
"""
Download a zip file form the PSID and save to local_filename
"""
request_start = 'http://simba.isr.umich.edu/Zips/GetFile.aspx?file='
# Get the file using requests
r = session.get(request_start + number, stream=True)
with open(local_filename, 'wb') as f:
# Write it out in chunks incase it's big
for chunk in r.iter_content(chunk_size=1024):
if chunk:
f.write(chunk)
f.flush()
return local_filename
# Extracting PSID using psid_unzip.
def psid_unzip(filename, extractall=False):
zfile = zipfile.ZipFile(filename)
def keep_file(n):
if extractall:
return True
else:
return ".sas" in name or ".txt" in name or ".pdf" in name
for name in zfile.namelist():
# Only take out the files we want
if keep_file(name):
(dirname, filename) = os.path.split(name)
if ".pdf" in name: # Different directory for Codebooks
dirname = dirname + "Codebooks"
if ".txt" in name:
nascii = name # Keep track of ascii name
if ".sas" in name:
nsas = name # Keep track of sas name
print("Decompressing %s on %s" % (filename, dirname))
if dirname != '':
if not os.path.exists(dirname):
os.makedirs(dirname)
zfile.extract(name, dirname) # Extract file
return (nsas, nascii)
def sascii2csv(sas_name, ascii_name, csv_name, remove_orig=True):
"""
Read in ascii data from SAS commands and write out csv
"""
# Open sas file
x = open(sas_name, "r")
dat = x.read()
dat_split = dat.split('\n')
# RE for variable designation
re_var = "^\s*(?P<variable>\S+)\s+"
# RE for variable label
re_label = '[(LABEL)(label)]\s*=\s*"(?P<label>[^"]+)"'
# RE for variable format
re_format = "[(FORMAT)(format)]\s*=\s*(?P<format>\S+)\s"
# RE for variable position
re_length = "\s*(?P<length1>\d*)\s*-\s*(?P<length2>\d*)\s*"
meta = []
for dstr in dat_split:
res_var = re.search(re_var, dstr) # Find variable name in line
res_label = re.search(re_label, dstr) # Find variable label
res_format = re.search(re_format, dstr) # Find variable format
if not (res_var is None or res_label is None or res_format is None):
# Now that we have a verified variable name...
# Find position RE
counts = re.search(res_var.group("variable")+re_length, dat)
l1 = int(counts.group("length1")) # Grab out first position
l2 = int(counts.group("length2")) # Grab out second position
# Add to meta data
meta += [{"variable": res_var.group("variable"),
"label": res_label.group("label"),
"format": res_format.group("format"),
"l1": l1,
"l2": l2,
"l3": l2 - l1 + 1}]
# Get relevant descriptions
names = [z["label"] for z in meta]
lengths = [z["l3"] for z in meta]
del meta
# Use numpy to read fixed width file and write as .csv
data = np.genfromtxt(ascii_name, names=names, delimiter=lengths)
np.savetxt(csv_name, data, delimiter=',',
header=','.join(data.dtype.names))
del data
if remove_orig:
os.remove(sas_name)
os.remove(ascii_name)
def download_unzip_csv_psid(f_name, request_num, session, to_csv=True,
remove_orig=True, verbose=True):
"""
Download a family data set
"""
# Download zip file
if verbose:
print("Downloading %s" % f_name)
x = download_psid(str(request_num), f_name, session)
# Unzip
if verbose:
print("Unzipping %s" % f_name)
sas_name, ascii_name = psid_unzip(f_name)
if to_csv:
if verbose:
print("Converting %s to csv" % ascii_name)
# generate csv_name and convert to csv
csv_name = f_name.strip(".zip") + ".csv"
sascii2csv(sas_name, ascii_name, csv_name, remove_orig=remove_orig)
if remove_orig:
os.remove(f_name)
gc.collect()
def download_all_family_data(session, to_csv=True, **kwargs):
"""
Download all family data sets
"""
for (fy, rn) in file_lookup.copy().pop("ind").items():
fn = "FAM" + fy + ".zip"
download_unzip_csv_psid(fn, rn, session, to_csv=to_csv, **kwargs)
return
def download_ind_cross_year(session, to_csv=True, **kwargs):
"""
Download the cross year individual file
"""
download_unzip_csv_psid("IND2011ER.zip", str(1053), session,
to_csv=to_csv, **kwargs)
return
def download_parentfile(session, to_csv=True, **kwargs):
"""
Download the cross year individual file
"""
download_unzip_csv_psid("PID2011ER.zip", str(1123), session,
to_csv=to_csv, **kwargs)
return
def download_all_data(session, to_csv=True, **kwargs):
"""
Call the download ind and download all family functions
"""
download_ind_cross_year(session, to_csv=True, **kwargs)
download_all_family_data(session, to_csv=True, **kwargs)
return
# -------- #
# Cleaning #
# -------- #
def clean_indfile_names(df):
"""
Most of the columns in the PSID individual file have many
underscores in between the variable name and the year. The next few
lines remove those cases and re- assigns the column names.
This is necessary for us to save that data to hdf in table format
"""
cols = pd.Series(df.columns, dtype=str)
c2 = cols.str.extract("(.+?)__+(\d\d)")
cols2 = c2[0] + c2[1]
cols2 = cols2.fillna(cols)
df.cols = cols2
return df
def csv2hdf(csv_fn, hdf_fn, hdf_gn=None, hdf_mode="a",
extra_func=None):
"""
Move the file csv_fn to an HDF file.
Parameters
----------
csv_fn : string
The file name for the csv
hdf_fn: string
The name of the hdf file to write to
hdf_gn: string, optional
A string specifying the `path` to the group to contain the
dataset. If none is given, the data set is saved to `/fn`, where
fn is the root of csv_fn
hdf_mode: string, optional(default="a")
The open mode for the hdf file. Default is append
extra_func: function, optional(default=None)
An extra function the user can supply to clean or otherwise
alter the data set after reading in from csv, but before saving
to hdf
Returns
-------
None
Notes
-----
This function tries to write the data set in table form, but if it
cannot it will fallback to writing in fixed form.
For a discussion on the differences see the pandas manual
"""
df = pd.read_csv(csv_fn)
if extra_func is not None:
df = extra_func(df)
if hdf_gn is None:
# split to path/file then chop last 4 characters off (`.csv`)
hdf_gn = os.path.split(csv_fn)[1][:-4]
try:
df.to_hdf(hdf_fn, hdf_gn, mode=hdf_mode, format="table",
complib="blosc")
print("Added %s to %s" % (hdf_gn, hdf_fn))
except:
print("WARN: Couldn't store %s as table. Using fixed" % hdf_gn)
df.to_hdf(hdf_fn, hdf_gn, mode=hdf_mode, format="fixed",
complib="blosc")
return
def _convert_to_4_digit_year(yr):
print("recieved yr: %s" % yr)
if len(yr) == 4:
return yr
if len(yr) == 1:
return "200" + yr
if len(yr) == 3:
raise ValueError("Can't parse three digit year")
iy = int(yr)
if 0 <= iy <= 9: # 200x
return "20" + yr
elif 10 < iy <= int(str(datetime.datetime.now().year)[2:]):
return "20" + yr
else: # assuming in 1900's
return "19" + yr
if __name__ == '__main__':
import glob
import argparse
import datetime
from textwrap import dedent
d_help = dedent("""\
Download the specified data file. If argument begins with a, all files
will be downloaded. If it begins with i, only the cross-year individual
file will be downloaded. If it is of the form fYY or fYYYY then only the
family file for the given year will be downloaded
""")
# create parser and add arguments
parser = argparse.ArgumentParser()
parser.add_argument("-d", "--download",
help=d_help)
parser.add_argument("--hdf",
help="Convert csv files to hdf named PSID.hdf",
action="store_true")
parser.add_argument("-u", "--username",
help="Specify username for PSID website")
parser.add_argument("-p", "--password",
help="Specify password for PSID website")
args = parser.parse_args()
# Handle download arg
if args.download:
# make sure we have a user_name and password
if args.username is None or args.password is None:
msg = dedent("""\
Must supply username and password. Example syntax:
`python psid.py -u USERNAME -p PASSWORD -d f75 --hdf`
If you don't yet have an account, go to http://simba.isr.umich.edu
and create one
""")
raise ValueError(msg)
a = args.download
session = start_psid_session(user=args.username,
password=args.password)
if a.startswith("a"): # download all
download_all_data(session)
elif a.startswith("i"): # download individual file
download_ind_cross_year(session, to_csv=True)
elif a.startswith("p"): # download parent id file
download_parentfile(session, to_csv=True)
else:
# download single family file
m = re.match("f?(\d+)", a.lower())
if m is not None:
yr = m.groups()[0]
yr = _convert_to_4_digit_year(yr)
rn = file_lookup[yr]
fn = "FAM" + yr + ".zip"
download_unzip_csv_psid(fn, rn, session, to_csv=True)
else:
raise ValueError("Could not parse download option")
# Handle hdf arg
if args.hdf:
fnames = glob.glob("./*.csv") # get csv file names.
fnames.sort(reverse=True) # Sorting to put IND file at top
for f in fnames:
if f.lower().startswith("ind"):
csv2hdf(f, "PSID.hdf", extra_func=clean_indfile_names)
else:
csv2hdf(f, "PSID.hdf")
|
mit
| -7,739,318,489,776,719,000
| 28.183066
| 78
| 0.57312
| false
| 3.605598
| false
| false
| false
|
odoo-brazil/PySPED
|
pysped/cte/leiaute/consrecicte_300.py
|
1
|
8951
|
# -*- coding: utf-8 -*-
from pysped.xml_sped import *
from pysped.cte.leiaute import ESQUEMA_ATUAL_VERSAO_300 as ESQUEMA_ATUAL
import os
from .cte_300 import CTe
DIRNAME = os.path.dirname(__file__)
class ConsReciCTe(XMLNFe):
def __init__(self):
super(ConsReciCTe, self).__init__()
self.versao = TagDecimal(nome='consReciCTe', codigo='BP02', propriedade='versao', namespace=NAMESPACE_CTE, valor='3.00', raiz='/')
self.tpAmb = TagInteiro(nome='tpAmb' , codigo='BP03', tamanho=[1, 1, 1] , raiz='//consReciCTe', namespace=NAMESPACE_CTE, namespace_obrigatorio=False)
self.nRec = TagCaracter(nome='nRec' , codigo='BP04', tamanho=[1, 15, 1] , raiz='//consReciCTe', namespace=NAMESPACE_CTE, namespace_obrigatorio=False)
self.caminho_esquema = os.path.join(DIRNAME, 'schema/', ESQUEMA_ATUAL + '/')
self.arquivo_esquema = 'consReciCTe_v3.00.xsd'
def get_xml(self):
xml = XMLNFe.get_xml(self)
xml += ABERTURA
xml += self.versao.xml
xml += self.tpAmb.xml
xml += self.nRec.xml
xml += '</consReciCTe>'
return xml
def set_xml(self, arquivo):
if self._le_xml(arquivo):
self.versao.xml = arquivo
self.tpAmb.xml = arquivo
self.nRec.xml = arquivo
return self.xml
xml = property(get_xml, set_xml)
class InfProt(XMLNFe):
def __init__(self):
super(InfProt, self).__init__()
self.Id = TagCaracter(nome='infProt' , codigo='PR04', propriedade='Id' , raiz='/' , obrigatorio=False, namespace=NAMESPACE_CTE)
self.tpAmb = TagInteiro(nome='tpAmb' , codigo='PR05', tamanho=[1, 1, 1], raiz='//infProt', namespace=NAMESPACE_CTE, namespace_obrigatorio=False)
self.verAplic = TagCaracter(nome='verAplic', codigo='PR06', tamanho=[1, 20] , raiz='//infProt', namespace=NAMESPACE_CTE, namespace_obrigatorio=False)
self.chCTe = TagCaracter(nome='chCTe' , codigo='PR07', tamanho=[44, 44] , raiz='//infProt', namespace=NAMESPACE_CTE, namespace_obrigatorio=False)
self.dhRecbto = TagDataHora(nome='dhRecbto', codigo='PR08' , raiz='//infProt', namespace=NAMESPACE_CTE, namespace_obrigatorio=False)
self.nProt = TagCaracter(nome='nProt' , codigo='PR09', tamanho=[15, 15] , raiz='//infProt', obrigatorio=False, namespace=NAMESPACE_CTE, namespace_obrigatorio=False)
self.digVal = TagCaracter(nome='digVal' , codigo='PR10', tamanho=[28, 28] , raiz='//infProt', obrigatorio=False, namespace=NAMESPACE_CTE, namespace_obrigatorio=False)
self.cStat = TagCaracter(nome='cStat' , codigo='PR11' , tamanho=[1, 3] , raiz='//infProt', namespace=NAMESPACE_CTE, namespace_obrigatorio=False)
self.xMotivo = TagCaracter(nome='xMotivo' , codigo='PR12' , tamanho=[1, 255] , raiz='//infProt', namespace=NAMESPACE_CTE, namespace_obrigatorio=False)
def get_xml(self):
xml = XMLNFe.get_xml(self)
if self.Id.valor:
xml += self.Id.xml
else:
xml += '<infProt>'
xml += self.tpAmb.xml
xml += self.verAplic.xml
xml += self.chCTe.xml
xml += self.dhRecbto.xml
xml += self.nProt.xml
xml += self.digVal.xml
xml += self.cStat.xml
xml += self.xMotivo.xml
xml += '</infProt>'
return xml
def set_xml(self, arquivo):
if self._le_xml(arquivo):
self.Id.xml = arquivo
self.tpAmb.xml = arquivo
self.verAplic.xml = arquivo
self.chCTe.xml = arquivo
self.dhRecbto.xml = arquivo
self.nProt.xml = arquivo
self.digVal.xml = arquivo
self.cStat.xml = arquivo
self.xMotivo.xml = arquivo
xml = property(get_xml, set_xml)
class ProtCTe(XMLNFe):
def __init__(self):
super(ProtCTe, self).__init__()
self.versao = TagDecimal(nome='protCTe', codigo='PR02' , propriedade='versao', namespace=NAMESPACE_CTE, valor='3.00', raiz='/')
self.infProt = InfProt()
self.Signature = Signature()
def get_xml(self):
xml = XMLNFe.get_xml(self)
xml += self.versao.xml
xml += self.infProt.xml
if len(self.Signature.URI) and (self.Signature.URI.strip() != '#'):
xml += self.Signature.xml
xml += '</protCTe>'
return xml
def set_xml(self, arquivo):
if self._le_xml(arquivo):
self.versao.xml = arquivo
#
# o grupo infProt é usado também no webservice de consulta da situação de uma CT-e
# por isso, a raiz dele não pode ser assumida como sendo sempre o grupo
# protCTe
#
self.infProt.xml = self._le_noh('//protCTe/infProt', ns=NAMESPACE_CTE)
self.Signature.xml = self._le_noh('//protCTe/sig:Signature', ns=NAMESPACE_CTE)
xml = property(get_xml, set_xml)
def protocolo_formatado(self):
if not self.infProt.nProt.valor:
return ''
formatado = self.infProt.nProt.valor
formatado += ' - '
formatado += self.infProt.dhRecbto.formato_danfe()
return formatado
class RetConsReciCTe(XMLNFe):
def __init__(self):
super(RetConsReciCTe, self).__init__()
self.versao = TagDecimal(nome='retConsReciCTe', codigo='BR02' , propriedade='versao', namespace=NAMESPACE_CTE, valor='3.00', raiz='/')
self.tpAmb = TagInteiro(nome='tpAmb' , codigo='BR03' , tamanho=[1, 1, 1], raiz='//retConsReciCTe', namespace=NAMESPACE_CTE, namespace_obrigatorio=False)
self.verAplic = TagCaracter(nome='verAplic' , codigo='BR04' , tamanho=[1, 20] , raiz='//retConsReciCTe', namespace=NAMESPACE_CTE, namespace_obrigatorio=False)
self.nRec = TagCaracter(nome='nRec' , codigo='BR04a', tamanho=[1, 15, 1] , raiz='//retConsReciCTe', namespace=NAMESPACE_CTE, namespace_obrigatorio=False)
self.cStat = TagCaracter(nome='cStat' , codigo='BR05' , tamanho=[1, 3] , raiz='//retConsReciCTe', namespace=NAMESPACE_CTE, namespace_obrigatorio=False)
self.xMotivo = TagCaracter(nome='xMotivo' , codigo='BR06' , tamanho=[1, 255] , raiz='//retConsReciCTe', namespace=NAMESPACE_CTE, namespace_obrigatorio=False)
self.cUF = TagCaracter(nome='cUF' , codigo='BR06a', tamanho=[2, 2, 2], raiz='//retConsReciCTe', namespace=NAMESPACE_CTE, namespace_obrigatorio=False)
self.protCTe = []
#
# Dicionário dos protocolos, com a chave sendo a chave de CT-e
#
self.dic_protCTe = {}
#
# Dicionário dos processos (CT-e + protocolo), com a chave sendo a chave da CT-e
#
self.dic_procCTe = {}
self.caminho_esquema = os.path.join(DIRNAME, 'schema/', ESQUEMA_ATUAL + '/')
self.arquivo_esquema = 'retConsReciCTe_v3.00.xsd'
def get_xml(self):
xml = XMLNFe.get_xml(self)
xml += self.versao.xml
xml += self.tpAmb.xml
xml += self.verAplic.xml
xml += self.nRec.xml
xml += self.cStat.xml
xml += self.xMotivo.xml
xml += self.cUF.xml
for pn in self.protCTe:
xml += pn.xml
xml += '</retConsReciCTe>'
return xml
def set_xml(self, arquivo):
if self._le_xml(arquivo):
self.versao.xml = arquivo
self.tpAmb.xml = arquivo
self.verAplic.xml = arquivo
self.nRec.xml = arquivo
self.cStat.xml = arquivo
self.xMotivo.xml = arquivo
self.cUF.xml = arquivo
self.protCTe = self.le_grupo('//retConsReciCTe/protCTe', ProtCTe, sigla_ns='cte')
#
# Monta o dicionário dos protocolos
#
for pn in self.protCTe:
self.dic_protCTe[pn.infProt.chCTe.valor] = pn
xml = property(get_xml, set_xml)
class ProcCTe(XMLNFe):
def __init__(self):
super(ProcCTe, self).__init__()
self.versao = TagDecimal(nome='cteProc', propriedade='versao', namespace=NAMESPACE_CTE, valor='3.00', raiz='/')
self.CTe = CTe()
self.protCTe = ProtCTe()
self.caminho_esquema = os.path.join(DIRNAME, 'schema/', ESQUEMA_ATUAL + '/')
self.arquivo_esquema = 'procCTe_v3.00.xsd'
def get_xml(self):
xml = XMLNFe.get_xml(self)
xml += ABERTURA
xml += self.versao.xml
xml += self.CTe.xml.replace(ABERTURA, '')
xml += self.protCTe.xml.replace(ABERTURA, '')
xml += '</cteProc>'
return xml
def set_xml(self, arquivo):
if self._le_xml(arquivo):
self.CTe.xml = arquivo
self.protCTe.xml = self._le_noh('//cteProc/protCTe', ns=NAMESPACE_CTE)
xml = property(get_xml, set_xml)
|
lgpl-2.1
| 8,531,728,794,076,872,000
| 40.985915
| 180
| 0.587499
| false
| 2.901687
| false
| false
| false
|
lmazuel/azure-sdk-for-python
|
azure-mgmt-network/azure/mgmt/network/v2016_09_01/models/network_watcher.py
|
1
|
2066
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .resource import Resource
class NetworkWatcher(Resource):
"""Network watcher in a resource group.
Variables are only populated by the server, and will be ignored when
sending a request.
:param id: Resource ID.
:type id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param location: Resource location.
:type location: str
:param tags: Resource tags.
:type tags: dict[str, str]
:param etag: A unique read-only string that changes whenever the resource
is updated.
:type etag: str
:ivar provisioning_state: The provisioning state of the resource. Possible
values include: 'Succeeded', 'Updating', 'Deleting', 'Failed'
:vartype provisioning_state: str or
~azure.mgmt.network.v2016_09_01.models.ProvisioningState
"""
_validation = {
'name': {'readonly': True},
'type': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'etag': {'key': 'etag', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
}
def __init__(self, **kwargs):
super(NetworkWatcher, self).__init__(**kwargs)
self.etag = kwargs.get('etag', None)
self.provisioning_state = None
|
mit
| 8,103,589,322,604,929,000
| 34.016949
| 85
| 0.575508
| false
| 4.148594
| false
| false
| false
|
RichardLMR/xls2txtISA.NANO.archive
|
xls2txtISA.NANO.archive.py
|
1
|
9430
|
'''
xls2txtISA.NANO.archive.py
***********************
The research leading to the development of this program has received funding from the European Union Seventh Framework Programme (FP7/2007-2013) under grant agreement number 309837 (NanoPUZZLES project).
http://wwww.nanopuzzles.eu
************************
######################
#License information##
######################
Copyright (c) 2015 Liverpool John Moores University
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or (at
your option) any later version.
THIS PROGRAM IS MADE AVAILABLE FOR DISTRIBUTION WITHOUT ANY FORM OF WARRANTY TO THE
EXTENT PERMITTED BY APPLICABLE LAW. THE COPYRIGHT HOLDER PROVIDES THE PROGRAM \"AS IS\"
WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM LIES
WITH THE USER. SHOULD THE PROGRAM PROVE DEFECTIVE IN ANY WAY, THE USER ASSUMES THE
COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. THE COPYRIGHT HOLDER IS NOT
RESPONSIBLE FOR ANY AMENDMENT, MODIFICATION OR OTHER ENHANCEMENT MADE TO THE PROGRAM
BY ANY USER WHO REDISTRIBUTES THE PROGRAM SO AMENDED, MODIFIED OR ENHANCED.
IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL THE
COPYRIGHT HOLDER BE LIABLE TO ANY USER FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL,
INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE
PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE
OR LOSSES SUSTAINED BY THE USER OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO
OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER HAS BEEN ADVISED OF THE
POSSIBILITY OF SUCH DAMAGES.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
####################
See also: http://www.gnu.org/licenses/ (last accessed 14/01/2013)
Contact:
1. R.L.MarcheseRobinson@ljmu.ac.uk
or if this fails
2. rmarcheserobinson@gmail.com
#####################
########
Purpose#
########
To convert a compressed, *flat* archive ("yyyy.zip") populated with ISA-TAB-Nano based ".xls" files, to a corresponding compressed, *flat* archive ("yyyy-txt.zip") of ISA-TAB-Nano based tab delimited text (".txt") files.
N.B. ISA-TAB-Nano is described here:https://wiki.nci.nih.gov/display/ICR/ISA-TAB-Nano
DISCLAIMER: No endorsements from the original ISA-TAB-Nano developers or any other third party organisations should be inferred.
########
Usage #
########
python xls2txtISA.NANO.archive.py -i <absolute name of zip file containing ISA-TAB-Nano files in ".xls" format>
e.g.
python xls2txtISA.NANO.archive.py -i "C:\Work\Investigation.ID.zip"
Options:
-a : modify "Term Accession Numbers" = TRUE (default:FALSE). N.B. If this is set to TRUE, http://purl.bioontology.org/ontology/npo#NPO_1915 would be converted to NPO_1915 etc. This may be required by some ISA-TAB-Nano software programs.
-c : remove all "Comment" rows from the Investigation file. Some ISA-TAB-Nano software programs may not accept these rows.
-N: edit certain fields (or field entries) to be consistent with the latest version of the NanoPUZZLES ISA-TAB-Nano Excel templates
'''
###########################
#######Imports#############
import sys,re,glob,getopt,shutil,os
dir_of_this_file = re.sub('(xls2txtISA\.NANO\.archive\.py)','',os.path.abspath(__file__))
sys.path.append(r'%sutils' % dir_of_this_file)
from zipper import zipUtils
from os_ind_delimiter import delimiter
from xls2txt import changeXls2txt
from fixTxtContents import fixContents
###########################
##########################
########Globals###########
#*************************************
#Fixed
#*************************************
fileNameRegexesDict = {}
fileNameRegexesDict['input_extension'] = re.compile('(\.xls$)')
fileNameRegexesDict['Investigation'] = re.compile('(i_)')
fileNameRegexesDict['Study'] = re.compile('(s_)')
fileNameRegexesDict['Material'] = re.compile('(m_)')
fileNameRegexesDict['Assay'] = re.compile('(a_)')
all_file_types = [key for key in fileNameRegexesDict.keys() if not 'input_extension' == key]
del key
##########################
def extractXlsFolder(xls_archive):
instOfzipUtils = zipUtils(delimiter())
sub_folder_count = instOfzipUtils.archive2folder(xls_archive)
assert 0 == sub_folder_count
return instOfzipUtils.folder_name
def idInputFiles(xls_folder):
instOfzipUtils = zipUtils(delimiter())
input_files = instOfzipUtils.getRelativeFileNames(xls_folder)
input_files = [r'%s%s%s' % (xls_folder,delimiter(),file) for file in input_files if fileNameRegexesDict['input_extension'].search(file)]
del file
del instOfzipUtils
#non_xls_files = [file_name for file_name in input_files if not fileNameRegexesDict['input_extension'].search(file_name)]
#del file_name
#assert 0 == len(non_xls_files),"There are %d non-xls files in the folder %s created from the input archive." % (len(non_xls_files),xls_folder)
#del non_xls_files
fileType2No = {}
for fileType in all_file_types:
fileType2No[fileType] = len([file_name for file_name in input_files if fileNameRegexesDict[fileType].match(file_name.split(delimiter())[-1])])
assert not 0 == fileType2No[fileType], "Zero %s input files in the folder created from the input archive!" % fileType
print "%d %s input files in the folder created from the input archive!" % (fileType2No[fileType],fileType)
del fileType2No
return input_files#,non_xls_files #non_xls_files should just be copied across to the final zip archive without modification -see "def createFlatTxtArchive(xls_folder):"
def createAllTxt(xls_folder,mustEditAccessionCodes,mustRemoveComments,mustMakeNanoPUZZLESspecificChanges):
abs_name_input_files = idInputFiles(xls_folder)
for xls_file in abs_name_input_files:
txt_file = changeXls2txt(xls_file,fileNameRegexesDict['input_extension'])
applicable_standard_file_types = [ft for ft in all_file_types if fileNameRegexesDict[ft].match(txt_file.split(delimiter())[-1])]
del ft
assert 1 >= len(applicable_standard_file_types),"txt_file=%s,applicable_standard_file_types=%s" % (txt_file,str(applicable_standard_file_types))
if 1 == len(applicable_standard_file_types):
current_file_type = applicable_standard_file_types[0]
else:
assert 0 == len(applicable_standard_file_types),"txt_file=%s,applicable_standard_file_types=%s" % (txt_file,str(applicable_standard_file_types))
current_file_type = 'NonStandard'
del applicable_standard_file_types
fixContents(input_file=txt_file,out_name=None,del_intermediates=True,file_type=current_file_type,shouldEditAccessionCodes=mustEditAccessionCodes,shouldRemoveComments=mustRemoveComments,shouldMakeNanoPUZZLESspecificChanges=mustMakeNanoPUZZLESspecificChanges)
def createFlatTxtArchive(xls_folder,mustEditAccessionCodes,mustRemoveComments,mustMakeNanoPUZZLESspecificChanges):
flat_txt_archive = xls_folder+"-txt.zip"
###########
#Rename the output file if non-default options are used
if mustEditAccessionCodes:
flat_txt_archive = re.sub('(\.zip$)','_opt-a.zip',flat_txt_archive)
if mustRemoveComments:
flat_txt_archive = re.sub('(\.zip$)','_opt-c.zip',flat_txt_archive)
if mustMakeNanoPUZZLESspecificChanges:
flat_txt_archive = re.sub('(\.zip$)','_opt-N.zip',flat_txt_archive)
###########
cwd = os.getcwd()
os.chdir(xls_folder)
for xls_file in glob.glob('*.xls'):
os.remove(xls_file)
os.chdir(cwd)
del cwd,xls_file
instOfzipUtils = zipUtils(delimiter_value=delimiter())
instOfzipUtils.filesIntoFlatArchive(folder_name=xls_folder,zip_archive=flat_txt_archive)
del instOfzipUtils
def cleanUp(folder_list):
for folder in folder_list:
cwd = os.getcwd()
os.chdir(folder)
for file in glob.glob('*'):
os.remove(file)
os.chdir(cwd)
os.rmdir(folder)
def main():
#######################
#**********************
#These Boolean variables can be changed from their default values using command line switches
#**********************
mustEditAccessionCodes = False
mustRemoveComments = False
mustMakeNanoPUZZLESspecificChanges = False
#######################
print '-'*50
try:
#############
opts,args = getopt.getopt(sys.argv[1:],'Ncai:',['mustMakeNanoPUZZLESspecificChanges=True','mustRemoveComments=True','mustEditAccessionCodes=True','input='])
for o,v in opts:
if '-i' == o:
xls_archive = r'%s' % re.sub('"','',v)
if '-a' == o:
mustEditAccessionCodes = True
if '-c' == o:
mustRemoveComments = True
if '-N' == o:
mustMakeNanoPUZZLESspecificChanges = True
del o,v,opts,args
#############
except Exception:
print __doc__
sys.exit(1)
print 'Converting:', xls_archive
xls_folder = extractXlsFolder(xls_archive)
createAllTxt(xls_folder,mustEditAccessionCodes,mustRemoveComments,mustMakeNanoPUZZLESspecificChanges)
createFlatTxtArchive(xls_folder,mustEditAccessionCodes,mustRemoveComments,mustMakeNanoPUZZLESspecificChanges)
cleanUp([xls_folder])
print xls_archive, " CONVERTED SUCCESSFULLY"
print '-'*50
return 0
if __name__ == '__main__':
sys.exit(main())
|
gpl-2.0
| -1,746,001,864,893,949,200
| 37.178138
| 259
| 0.714952
| false
| 3.24055
| false
| false
| false
|
rmelo19/rmelo19-arduino
|
fritzing/fritzing.0.9.2b.64.pc/parts/part-gen-scripts/misc_scripts/findfonts.py
|
1
|
2245
|
# usage:
# findfonts.py -d <directory> -f [font1] -f [font2] ....
#
# <directory> is a folder, with subfolders, containing .svg files. In each svg file in the directory or its children
# look for fonts that aren't in the list
import getopt, sys, os, re
def usage():
print """
usage:
droid.py -d [directory] -f [font1] -f [font2] ...
directory is a folder containing .svg files.
In each svg file in the directory or its subfolders,
look for fonts that aren't in the list
"""
def main():
try:
opts, args = getopt.getopt(sys.argv[1:], "hd:f:", ["help", "directory", "font"])
except getopt.GetoptError, err:
# print help information and exit:
print str(err) # will print something like "option -a not recognized"
usage()
sys.exit(2)
outputDir = None
fonts = []
for o, a in opts:
#print o
#print a
if o in ("-d", "--directory"):
outputDir = a
elif o in ("-h", "--help"):
usage()
sys.exit(2)
elif o in ("-f", "--font"):
fonts.append(a);
else:
assert False, "unhandled option"
if(not(outputDir)):
usage()
sys.exit(2)
for root, dirs, files in os.walk(outputDir, topdown=False):
for filename in files:
if (filename.endswith(".svg")):
infile = open(os.path.join(root, filename), "r")
svg = infile.read();
infile.close();
matches = re.findall('font-family\\s*=\\s*\"(.+)\"', svg)
listMatches(matches, fonts, root, filename);
matches = re.findall('font-family\\s*:\\s*(.+)[\\";]', svg)
listMatches(matches, fonts, root, filename);
def listMatches(matches, fonts, root, filename):
for m in matches:
gotone = 0
for fontname in fonts:
if (m.find(fontname) >= 0):
gotone = 1;
break;
if not gotone:
print "{0}::{1}".format(os.path.join(root, filename), m)
if __name__ == "__main__":
main()
|
gpl-3.0
| -7,906,983,742,202,827,000
| 27.782051
| 120
| 0.498441
| false
| 3.864028
| false
| false
| false
|
peterheim1/robbie_ros
|
robbie_moveit/nodes/pick_test1.py
|
1
|
4531
|
#!/usr/bin/env python
import sys
import rospy
from moveit_commander import RobotCommander, MoveGroupCommander
from moveit_commander import PlanningSceneInterface, roscpp_initialize, roscpp_shutdown
from geometry_msgs.msg import PoseStamped
from moveit_msgs.msg import Grasp, GripperTranslation, PlaceLocation
from trajectory_msgs.msg import JointTrajectoryPoint
if __name__=='__main__':
roscpp_initialize(sys.argv)
rospy.init_node('moveit_py_demo', anonymous=True)
GRIPPER_FRAME = 'right_gripper_link'
scene = PlanningSceneInterface()
robot = RobotCommander()
right_arm = MoveGroupCommander("right_arm")
right_gripper = MoveGroupCommander("right_gripper")
#right_arm.set_planner_id("KPIECEkConfigDefault");
rospy.sleep(1)
# clean the scene
scene.remove_world_object("table")
scene.remove_world_object("part")
scene.remove_attached_object(GRIPPER_FRAME, "part")
#rospy.logwarn("cleaning world")
#right_arm.set_named_target("r_start")
#right_arm.go()
#right_gripper.set_named_target("open")
#right_gripper.go()
rospy.sleep(3)
# publish a demo scene
p = PoseStamped()
p.header.frame_id = robot.get_planning_frame()
# add a table
p.pose.position.x = 1.0
p.pose.position.y = 0.2
p.pose.position.z = 0.3
scene.add_box("table", p, (0.7, 1, 0.7))
# add an object to be grasped
p.pose.position.x = 0.4
p.pose.position.y = 0
p.pose.position.z = 0.75
scene.add_box("part", p, (0.07, 0.01, 0.2))
# add a position for placement
p1 = PoseStamped()
p1.header.frame_id = robot.get_planning_frame()
p1.pose.position.x = 0.4
p1.pose.position.y = -0.3
p1.pose.position.z = 0.75
rospy.sleep(1)
#rospy.logwarn("moving to test")
grasps = []
0.67611; 0.0091003; 0.71731
g = Grasp()
g.id = "test"
grasp_pose = PoseStamped()
grasp_pose.header.frame_id = "base_footprint"
grasp_pose.pose.position.x = 0.35
grasp_pose.pose.position.y = -0
grasp_pose.pose.position.z = 0.76
grasp_pose.pose.orientation.x = -0.0209083116076
grasp_pose.pose.orientation.y = -0.00636455547831
grasp_pose.pose.orientation.z = 0.0170413352124
grasp_pose.pose.orientation.w = 0.999615890147
rospy.logwarn("moving to arm")
right_arm.set_pose_target(grasp_pose)
right_arm.go()
rospy.sleep(1)
# set the grasp pose
g.grasp_pose = grasp_pose
# define the pre-grasp approach
g.pre_grasp_approach.direction.header.frame_id = "base_footprint"
g.pre_grasp_approach.direction.vector.x = 0.4
g.pre_grasp_approach.direction.vector.y = -0.0
g.pre_grasp_approach.direction.vector.z = 1.0
g.pre_grasp_approach.min_distance = 0.001
g.pre_grasp_approach.desired_distance = 0.1
g.pre_grasp_posture.header.frame_id = "right_gripper_link"
g.pre_grasp_posture.joint_names = ["right_arm_gripper_joint"]
pos = JointTrajectoryPoint()
pos.positions.append(0.0)
g.pre_grasp_posture.points.append(pos)
# set the grasp posture
g.grasp_posture.header.frame_id = "right_gripper_link"
g.grasp_posture.joint_names = ["right_arm_gripper_joint"]
pos = JointTrajectoryPoint()
pos.positions.append(0.2)
pos.effort.append(0.0)
g.grasp_posture.points.append(pos)
# set the post-grasp retreat
g.post_grasp_retreat.direction.header.frame_id = "base_footprint"
g.post_grasp_retreat.direction.vector.x = 1
g.post_grasp_retreat.direction.vector.y = -1
g.post_grasp_retreat.direction.vector.z = 1
g.post_grasp_retreat.desired_distance = 0.35
g.post_grasp_retreat.min_distance = 0.01
g.allowed_touch_objects = ["table"]
g.max_contact_force = 0
# append the grasp to the list of grasps
grasps.append(g)
# pick the object
#robot.right_arm.pick("part", grasps)
result = False
n_attempts = 0
# repeat until will succeed
while result == False:
result = robot.right_arm.pick("part", grasps)
n_attempts += 1
print "Attempts pickup: ", n_attempts
rospy.sleep(0.2)
rospy.sleep(6)
result1 = False
n_attempts1 = 0
while result1 == False:
result1 = robot.right_arm.place("part",p1)
n_attempts1 += 1
print "Attempts place: ", n_attempts1
rospy.sleep(0.2)
#robot.right_arm.place("part",p)
#right_arm.go()
rospy.sleep(0.1)
rospy.spin()
roscpp_shutdown()
|
bsd-3-clause
| -1,146,657,331,692,744,300
| 27.859873
| 87
| 0.651291
| false
| 2.876825
| false
| false
| false
|
plaes/numpy
|
numpy/core/code_generators/genapi.py
|
1
|
15403
|
"""
Get API information encoded in C files.
See ``find_function`` for how functions should be formatted, and
``read_order`` for how the order of the functions should be
specified.
"""
import sys, os, re
try:
import hashlib
md5new = hashlib.md5
except ImportError:
import md5
md5new = md5.new
if sys.version_info[:2] < (2, 6):
from sets import Set as set
import textwrap
from os.path import join
__docformat__ = 'restructuredtext'
# The files under src/ that are scanned for API functions
API_FILES = [join('multiarray', 'methods.c'),
join('multiarray', 'arrayobject.c'),
join('multiarray', 'flagsobject.c'),
join('multiarray', 'descriptor.c'),
join('multiarray', 'iterators.c'),
join('multiarray', 'getset.c'),
join('multiarray', 'number.c'),
join('multiarray', 'sequence.c'),
join('multiarray', 'ctors.c'),
join('multiarray', 'convert.c'),
join('multiarray', 'shape.c'),
join('multiarray', 'item_selection.c'),
join('multiarray', 'convert_datatype.c'),
join('multiarray', 'arraytypes.c.src'),
join('multiarray', 'multiarraymodule.c'),
join('multiarray', 'scalartypes.c.src'),
join('multiarray', 'scalarapi.c'),
join('multiarray', 'calculation.c'),
join('multiarray', 'usertypes.c'),
join('multiarray', 'refcount.c'),
join('multiarray', 'conversion_utils.c'),
join('multiarray', 'buffer.c'),
join('multiarray', 'datetime.c'),
join('umath', 'ufunc_object.c'),
join('umath', 'loops.c.src'),
]
THIS_DIR = os.path.dirname(__file__)
API_FILES = [os.path.join(THIS_DIR, '..', 'src', a) for a in API_FILES]
def file_in_this_dir(filename):
return os.path.join(THIS_DIR, filename)
def remove_whitespace(s):
return ''.join(s.split())
def _repl(str):
return str.replace('intp', 'npy_intp').replace('Bool','npy_bool')
class Function(object):
def __init__(self, name, return_type, args, doc=''):
self.name = name
self.return_type = _repl(return_type)
self.args = args
self.doc = doc
def _format_arg(self, (typename, name)):
if typename.endswith('*'):
return typename + name
else:
return typename + ' ' + name
def __str__(self):
argstr = ', '.join([self._format_arg(a) for a in self.args])
if self.doc:
doccomment = '/* %s */\n' % self.doc
else:
doccomment = ''
return '%s%s %s(%s)' % (doccomment, self.return_type, self.name, argstr)
def to_ReST(self):
lines = ['::', '', ' ' + self.return_type]
argstr = ',\000'.join([self._format_arg(a) for a in self.args])
name = ' %s' % (self.name,)
s = textwrap.wrap('(%s)' % (argstr,), width=72,
initial_indent=name,
subsequent_indent=' ' * (len(name)+1),
break_long_words=False)
for l in s:
lines.append(l.replace('\000', ' ').rstrip())
lines.append('')
if self.doc:
lines.append(textwrap.dedent(self.doc))
return '\n'.join(lines)
def api_hash(self):
m = md5new()
m.update(remove_whitespace(self.return_type))
m.update('\000')
m.update(self.name)
m.update('\000')
for typename, name in self.args:
m.update(remove_whitespace(typename))
m.update('\000')
return m.hexdigest()[:8]
class ParseError(Exception):
def __init__(self, filename, lineno, msg):
self.filename = filename
self.lineno = lineno
self.msg = msg
def __str__(self):
return '%s:%s:%s' % (self.filename, self.lineno, self.msg)
def skip_brackets(s, lbrac, rbrac):
count = 0
for i, c in enumerate(s):
if c == lbrac:
count += 1
elif c == rbrac:
count -= 1
if count == 0:
return i
raise ValueError("no match '%s' for '%s' (%r)" % (lbrac, rbrac, s))
def split_arguments(argstr):
arguments = []
bracket_counts = {'(': 0, '[': 0}
current_argument = []
state = 0
i = 0
def finish_arg():
if current_argument:
argstr = ''.join(current_argument).strip()
m = re.match(r'(.*(\s+|[*]))(\w+)$', argstr)
if m:
typename = m.group(1).strip()
name = m.group(3)
else:
typename = argstr
name = ''
arguments.append((typename, name))
del current_argument[:]
while i < len(argstr):
c = argstr[i]
if c == ',':
finish_arg()
elif c == '(':
p = skip_brackets(argstr[i:], '(', ')')
current_argument += argstr[i:i+p]
i += p-1
else:
current_argument += c
i += 1
finish_arg()
return arguments
def find_functions(filename, tag='API'):
"""
Scan the file, looking for tagged functions.
Assuming ``tag=='API'``, a tagged function looks like::
/*API*/
static returntype*
function_name(argtype1 arg1, argtype2 arg2)
{
}
where the return type must be on a separate line, the function
name must start the line, and the opening ``{`` must start the line.
An optional documentation comment in ReST format may follow the tag,
as in::
/*API
This function does foo...
*/
"""
fo = open(filename, 'r')
functions = []
return_type = None
function_name = None
function_args = []
doclist = []
SCANNING, STATE_DOC, STATE_RETTYPE, STATE_NAME, STATE_ARGS = range(5)
state = SCANNING
tagcomment = '/*' + tag
for lineno, line in enumerate(fo):
try:
line = line.strip()
if state == SCANNING:
if line.startswith(tagcomment):
if line.endswith('*/'):
state = STATE_RETTYPE
else:
state = STATE_DOC
elif state == STATE_DOC:
if line.startswith('*/'):
state = STATE_RETTYPE
else:
line = line.lstrip(' *')
doclist.append(line)
elif state == STATE_RETTYPE:
# first line of declaration with return type
m = re.match(r'NPY_NO_EXPORT\s+(.*)$', line)
if m:
line = m.group(1)
return_type = line
state = STATE_NAME
elif state == STATE_NAME:
# second line, with function name
m = re.match(r'(\w+)\s*\(', line)
if m:
function_name = m.group(1)
else:
raise ParseError(filename, lineno+1,
'could not find function name')
function_args.append(line[m.end():])
state = STATE_ARGS
elif state == STATE_ARGS:
if line.startswith('{'):
# finished
fargs_str = ' '.join(function_args).rstrip(' )')
fargs = split_arguments(fargs_str)
f = Function(function_name, return_type, fargs,
'\n'.join(doclist))
functions.append(f)
return_type = None
function_name = None
function_args = []
doclist = []
state = SCANNING
else:
function_args.append(line)
except:
print filename, lineno+1
raise
fo.close()
return functions
def should_rebuild(targets, source_files):
from distutils.dep_util import newer_group
for t in targets:
if not os.path.exists(t):
return True
sources = API_FILES + list(source_files) + [__file__]
if newer_group(sources, targets[0], missing='newer'):
return True
return False
# Those *Api classes instances know how to output strings for the generated code
class TypeApi:
def __init__(self, name, index, ptr_cast, api_name):
self.index = index
self.name = name
self.ptr_cast = ptr_cast
self.api_name = api_name
def define_from_array_api_string(self):
return "#define %s (*(%s *)%s[%d])" % (self.name,
self.ptr_cast,
self.api_name,
self.index)
def array_api_define(self):
return " (void *) &%s" % self.name
def internal_define(self):
astr = """\
#ifdef NPY_ENABLE_SEPARATE_COMPILATION
extern NPY_NO_EXPORT PyTypeObject %(type)s;
#else
NPY_NO_EXPORT PyTypeObject %(type)s;
#endif
""" % {'type': self.name}
return astr
class GlobalVarApi:
def __init__(self, name, index, type, api_name):
self.name = name
self.index = index
self.type = type
self.api_name = api_name
def define_from_array_api_string(self):
return "#define %s (*(%s *)%s[%d])" % (self.name,
self.type,
self.api_name,
self.index)
def array_api_define(self):
return " (%s *) &%s" % (self.type, self.name)
def internal_define(self):
astr = """\
#ifdef NPY_ENABLE_SEPARATE_COMPILATION
extern NPY_NO_EXPORT %(type)s %(name)s;
#else
NPY_NO_EXPORT %(type)s %(name)s;
#endif
""" % {'type': self.type, 'name': self.name}
return astr
# Dummy to be able to consistently use *Api instances for all items in the
# array api
class BoolValuesApi:
def __init__(self, name, index, api_name):
self.name = name
self.index = index
self.type = 'PyBoolScalarObject'
self.api_name = api_name
def define_from_array_api_string(self):
return "#define %s ((%s *)%s[%d])" % (self.name,
self.type,
self.api_name,
self.index)
def array_api_define(self):
return " (void *) &%s" % self.name
def internal_define(self):
astr = """\
#ifdef NPY_ENABLE_SEPARATE_COMPILATION
extern NPY_NO_EXPORT PyBoolScalarObject _PyArrayScalar_BoolValues[2];
#else
NPY_NO_EXPORT PyBoolScalarObject _PyArrayScalar_BoolValues[2];
#endif
"""
return astr
class FunctionApi:
def __init__(self, name, index, return_type, args, api_name):
self.name = name
self.index = index
self.return_type = return_type
self.args = args
self.api_name = api_name
def _argtypes_string(self):
if not self.args:
return 'void'
argstr = ', '.join([_repl(a[0]) for a in self.args])
return argstr
def define_from_array_api_string(self):
define = """\
#define %s \\\n (*(%s (*)(%s)) \\
%s[%d])""" % (self.name,
self.return_type,
self._argtypes_string(),
self.api_name,
self.index)
return define
def array_api_define(self):
return " (void *) %s" % self.name
def internal_define(self):
astr = """\
NPY_NO_EXPORT %s %s \\\n (%s);""" % (self.return_type,
self.name,
self._argtypes_string())
return astr
def order_dict(d):
"""Order dict by its values."""
o = d.items()
def cmp(x, y):
return x[1] - y[1]
return sorted(o, cmp=cmp)
def merge_api_dicts(dicts):
ret = {}
for d in dicts:
for k, v in d.items():
ret[k] = v
return ret
def check_api_dict(d):
"""Check that an api dict is valid (does not use the same index twice)."""
# We have if a same index is used twice: we 'revert' the dict so that index
# become keys. If the length is different, it means one index has been used
# at least twice
revert_dict = dict([(v, k) for k, v in d.items()])
if not len(revert_dict) == len(d):
# We compute a dict index -> list of associated items
doubled = {}
for name, index in d.items():
try:
doubled[index].append(name)
except KeyError:
doubled[index] = [name]
msg = """\
Same index has been used twice in api definition: %s
""" % ['index %d -> %s' % (index, names) for index, names in doubled.items() \
if len(names) != 1]
raise ValueError(msg)
# No 'hole' in the indexes may be allowed, and it must starts at 0
indexes = set(d.values())
expected = set(range(len(indexes)))
if not indexes == expected:
diff = expected.symmetric_difference(indexes)
msg = "There are some holes in the API indexing: " \
"(symmetric diff is %s)" % diff
raise ValueError(msg)
def get_api_functions(tagname, api_dict):
"""Parse source files to get functions tagged by the given tag."""
functions = []
for f in API_FILES:
functions.extend(find_functions(f, tagname))
dfunctions = []
for func in functions:
o = api_dict[func.name]
dfunctions.append( (o, func) )
dfunctions.sort()
return [a[1] for a in dfunctions]
def fullapi_hash(api_dicts):
"""Given a list of api dicts defining the numpy C API, compute a checksum
of the list of items in the API (as a string)."""
a = []
for d in api_dicts:
def sorted_by_values(d):
"""Sort a dictionary by its values. Assume the dictionary items is of
the form func_name -> order"""
return sorted(d.items(), key=lambda (x, y): (y, x))
for name, index in sorted_by_values(d):
a.extend(name)
a.extend(str(index))
return md5new(''.join(a)).hexdigest()
# To parse strings like 'hex = checksum' where hex is e.g. 0x1234567F and
# checksum a 128 bits md5 checksum (hex format as well)
VERRE = re.compile('(^0x[\da-f]{8})\s*=\s*([\da-f]{32})')
def get_versions_hash():
d = []
file = os.path.join(os.path.dirname(__file__), 'cversions.txt')
fid = open(file, 'r')
try:
for line in fid.readlines():
m = VERRE.match(line)
if m:
d.append((int(m.group(1), 16), m.group(2)))
finally:
fid.close()
return dict(d)
def main():
tagname = sys.argv[1]
order_file = sys.argv[2]
functions = get_api_functions(tagname, order_file)
m = md5new(tagname)
for func in functions:
print func
ah = func.api_hash()
m.update(ah)
print hex(int(ah,16))
print hex(int(m.hexdigest()[:8],16))
if __name__ == '__main__':
main()
|
bsd-3-clause
| 8,382,274,081,688,541,000
| 31.427368
| 81
| 0.510355
| false
| 3.890629
| false
| false
| false
|
skysports-digitalmedia/php-buildpack
|
tests/test_compile_helpers.py
|
1
|
15810
|
import os
import os.path
import tempfile
import shutil
from nose.tools import eq_
from build_pack_utils import utils
from compile_helpers import setup_webdir_if_it_doesnt_exist
from compile_helpers import convert_php_extensions
from compile_helpers import is_web_app
from compile_helpers import find_stand_alone_app_to_run
from compile_helpers import load_binary_index
from compile_helpers import find_all_php_versions
from compile_helpers import find_all_php_extensions
from compile_helpers import validate_php_version
from compile_helpers import validate_php_extensions
from compile_helpers import setup_log_dir
class TestCompileHelpers(object):
def setUp(self):
self.build_dir = tempfile.mkdtemp(prefix='build-')
self.cache_dir = tempfile.mkdtemp(prefix='cache-')
os.rmdir(self.build_dir) # delete otherwise copytree complains
os.rmdir(self.cache_dir) # cache dir does not exist normally
def tearDown(self):
if os.path.exists(self.build_dir):
shutil.rmtree(self.build_dir)
if os.path.exists(self.cache_dir):
shutil.rmtree(self.cache_dir)
for name in os.listdir(os.environ['TMPDIR']):
if name.startswith('httpd-') and name.endswith('.gz'):
os.remove(os.path.join(os.environ['TMPDIR'], name))
if name.startswith('php-') and name.endswith('.gz'):
os.remove(os.path.join(os.environ['TMPDIR'], name))
def assert_exists(self, *args):
eq_(True, os.path.exists(os.path.join(*args)),
"Does not exists: %s" % os.path.join(*args))
def test_setup_log_dir(self):
eq_(False, os.path.exists(os.path.join(self.build_dir, 'logs')))
setup_log_dir({
'BUILD_DIR': self.build_dir
})
self.assert_exists(self.build_dir, 'logs')
def test_setup_if_webdir_exists(self):
shutil.copytree('tests/data/app-1', self.build_dir)
setup_webdir_if_it_doesnt_exist(utils.FormattedDict({
'BUILD_DIR': self.build_dir,
'WEBDIR': 'htdocs',
'LIBDIR': 'lib'
}))
self.assert_exists(self.build_dir, 'htdocs')
self.assert_exists(self.build_dir, 'htdocs', 'index.php')
self.assert_exists(self.build_dir, 'htdocs', 'info.php')
self.assert_exists(self.build_dir, 'htdocs',
'technical-difficulties1.jpg')
self.assert_exists(self.build_dir, '.bp-config')
self.assert_exists(self.build_dir, '.bp-config', 'options.json')
self.assert_exists(self.build_dir, '.bp-config', 'httpd', 'extra',
'httpd-remoteip.conf')
eq_(2, len(os.listdir(self.build_dir)))
eq_(3, len(os.listdir(os.path.join(self.build_dir, 'htdocs'))))
def test_setup_if_custom_webdir_exists(self):
shutil.copytree('tests/data/app-6', self.build_dir)
setup_webdir_if_it_doesnt_exist(utils.FormattedDict({
'BUILD_DIR': self.build_dir,
'WEBDIR': 'public',
'LIBDIR': 'lib'
}))
self.assert_exists(self.build_dir, 'public')
self.assert_exists(self.build_dir, 'public', 'index.php')
self.assert_exists(self.build_dir, 'public', 'info.php')
self.assert_exists(self.build_dir, 'public',
'technical-difficulties1.jpg')
self.assert_exists(self.build_dir, '.bp-config')
self.assert_exists(self.build_dir, '.bp-config', 'options.json')
self.assert_exists(self.build_dir, '.bp-config', 'httpd', 'extra',
'httpd-remoteip.conf')
eq_(3, len(os.listdir(self.build_dir)))
eq_(3, len(os.listdir(os.path.join(self.build_dir, 'public'))))
def test_setup_if_htdocs_does_not_exist(self):
shutil.copytree('tests/data/app-2', self.build_dir)
setup_webdir_if_it_doesnt_exist(utils.FormattedDict({
'BUILD_DIR': self.build_dir,
'WEBDIR': 'htdocs',
'LIBDIR': 'lib'
}))
self.assert_exists(self.build_dir, 'htdocs')
self.assert_exists(self.build_dir, 'htdocs', 'index.php')
self.assert_exists(self.build_dir, 'htdocs', 'info.php')
self.assert_exists(self.build_dir, 'htdocs',
'technical-difficulties1.jpg')
self.assert_exists(self.build_dir, '.bp-config')
self.assert_exists(self.build_dir, '.bp-config', 'options.json')
self.assert_exists(self.build_dir, '.bp-config', 'httpd', 'extra',
'httpd-remoteip.conf')
eq_(2, len(os.listdir(self.build_dir)))
eq_(3, len(os.listdir(os.path.join(self.build_dir, 'htdocs'))))
def test_setup_if_htdocs_does_not_exist_but_library_does(self):
shutil.copytree('tests/data/app-7', self.build_dir)
setup_webdir_if_it_doesnt_exist(utils.FormattedDict({
'BUILD_DIR': self.build_dir,
'WEBDIR': 'htdocs',
'LIBDIR': 'lib'
}))
self.assert_exists(self.build_dir, 'htdocs')
self.assert_exists(self.build_dir, 'htdocs', 'index.php')
self.assert_exists(self.build_dir, 'htdocs', 'info.php')
self.assert_exists(self.build_dir, 'htdocs',
'technical-difficulties1.jpg')
self.assert_exists(self.build_dir, 'htdocs', 'library')
self.assert_exists(self.build_dir, 'htdocs', 'library', 'junk.php')
self.assert_exists(self.build_dir, 'lib')
self.assert_exists(self.build_dir, 'lib', 'test.php')
self.assert_exists(self.build_dir, '.bp-config')
self.assert_exists(self.build_dir, '.bp-config', 'options.json')
self.assert_exists(self.build_dir, '.bp-config', 'httpd', 'extra',
'httpd-remoteip.conf')
self.assert_exists(self.build_dir, 'manifest.yml')
eq_(4, len(os.listdir(self.build_dir)))
eq_(4, len(os.listdir(os.path.join(self.build_dir, 'htdocs'))))
def test_setup_if_custom_webdir_does_not_exist(self):
shutil.copytree('tests/data/app-2', self.build_dir)
setup_webdir_if_it_doesnt_exist(utils.FormattedDict({
'BUILD_DIR': self.build_dir,
'WEBDIR': 'public',
'LIBDIR': 'lib'
}))
self.assert_exists(self.build_dir, 'public')
self.assert_exists(self.build_dir, 'public', 'index.php')
self.assert_exists(self.build_dir, 'public', 'info.php')
self.assert_exists(self.build_dir, 'public',
'technical-difficulties1.jpg')
self.assert_exists(self.build_dir, '.bp-config')
self.assert_exists(self.build_dir, '.bp-config', 'options.json')
self.assert_exists(self.build_dir, '.bp-config', 'httpd', 'extra',
'httpd-remoteip.conf')
eq_(2, len(os.listdir(self.build_dir)))
eq_(3, len(os.listdir(os.path.join(self.build_dir, 'public'))))
def test_setup_if_htdocs_does_not_exist_with_extensions(self):
shutil.copytree('tests/data/app-4', self.build_dir)
setup_webdir_if_it_doesnt_exist(utils.FormattedDict({
'BUILD_DIR': self.build_dir,
'WEBDIR': 'htdocs',
'LIBDIR': 'lib'
}))
self.assert_exists(self.build_dir, 'htdocs')
self.assert_exists(self.build_dir, 'htdocs', 'index.php')
self.assert_exists(self.build_dir, 'htdocs', 'info.php')
self.assert_exists(self.build_dir, 'htdocs',
'technical-difficulties1.jpg')
self.assert_exists(self.build_dir, '.bp-config')
self.assert_exists(self.build_dir, '.bp-config', 'options.json')
self.assert_exists(self.build_dir, '.bp-config', 'httpd', 'extra',
'httpd-remoteip.conf')
self.assert_exists(self.build_dir, '.bp')
self.assert_exists(self.build_dir, '.bp', 'logs')
self.assert_exists(self.build_dir, '.bp', 'logs', 'some.log')
self.assert_exists(self.build_dir, '.extensions')
self.assert_exists(self.build_dir, '.extensions', 'some-ext')
self.assert_exists(self.build_dir, '.extensions', 'some-ext',
'extension.py')
eq_(4, len(os.listdir(self.build_dir)))
eq_(3, len(os.listdir(os.path.join(self.build_dir, 'htdocs'))))
def test_setup_if_custom_webdir_does_not_exist_with_extensions(self):
shutil.copytree('tests/data/app-4', self.build_dir)
setup_webdir_if_it_doesnt_exist(utils.FormattedDict({
'BUILD_DIR': self.build_dir,
'WEBDIR': 'public',
'LIBDIR': 'lib'
}))
self.assert_exists(self.build_dir, 'public')
self.assert_exists(self.build_dir, 'public', 'index.php')
self.assert_exists(self.build_dir, 'public', 'info.php')
self.assert_exists(self.build_dir, 'public',
'technical-difficulties1.jpg')
self.assert_exists(self.build_dir, '.bp-config')
self.assert_exists(self.build_dir, '.bp-config', 'options.json')
self.assert_exists(self.build_dir, '.bp-config', 'httpd', 'extra',
'httpd-remoteip.conf')
self.assert_exists(self.build_dir, '.bp')
self.assert_exists(self.build_dir, '.bp', 'logs')
self.assert_exists(self.build_dir, '.bp', 'logs', 'some.log')
self.assert_exists(self.build_dir, '.extensions')
self.assert_exists(self.build_dir, '.extensions', 'some-ext')
self.assert_exists(self.build_dir, '.extensions', 'some-ext',
'extension.py')
eq_(4, len(os.listdir(self.build_dir)))
eq_(3, len(os.listdir(os.path.join(self.build_dir, 'public'))))
def test_setup_if_htdocs_with_stand_alone_app(self):
shutil.copytree('tests/data/app-5', self.build_dir)
setup_webdir_if_it_doesnt_exist(utils.FormattedDict({
'BUILD_DIR': self.build_dir,
'WEB_SERVER': 'none'
}))
self.assert_exists(self.build_dir, 'app.php')
eq_(1, len(os.listdir(self.build_dir)))
def test_convert_php_extensions_54(self):
ctx = {
'PHP_VERSION': '5.4.x',
'PHP_EXTENSIONS': ['mod1', 'mod2', 'mod3'],
'ZEND_EXTENSIONS': ['zmod1', 'zmod2']
}
convert_php_extensions(ctx)
eq_('extension=mod1.so\nextension=mod2.so\nextension=mod3.so',
ctx['PHP_EXTENSIONS'])
eq_('zend_extension="@HOME/php/lib/php/extensions/'
'no-debug-non-zts-20100525/zmod1.so"\n'
'zend_extension="@HOME/php/lib/php/extensions/'
'no-debug-non-zts-20100525/zmod2.so"',
ctx['ZEND_EXTENSIONS'])
def test_convert_php_extensions_55(self):
ctx = {
'PHP_VERSION': '5.5.x',
'PHP_EXTENSIONS': ['mod1', 'mod2', 'mod3'],
'ZEND_EXTENSIONS': ['zmod1', 'zmod2']
}
convert_php_extensions(ctx)
eq_('extension=mod1.so\nextension=mod2.so\nextension=mod3.so',
ctx['PHP_EXTENSIONS'])
eq_('zend_extension="zmod1.so"\nzend_extension="zmod2.so"',
ctx['ZEND_EXTENSIONS'])
def test_convert_php_extensions_54_none(self):
ctx = {
'PHP_VERSION': '5.4.x',
'PHP_EXTENSIONS': [],
'ZEND_EXTENSIONS': []
}
convert_php_extensions(ctx)
eq_('', ctx['PHP_EXTENSIONS'])
eq_('', ctx['ZEND_EXTENSIONS'])
def test_convert_php_extensions_55_none(self):
ctx = {
'PHP_VERSION': '5.5.x',
'PHP_EXTENSIONS': [],
'ZEND_EXTENSIONS': []
}
convert_php_extensions(ctx)
eq_('', ctx['PHP_EXTENSIONS'])
eq_('', ctx['ZEND_EXTENSIONS'])
def test_convert_php_extensions_54_one(self):
ctx = {
'PHP_VERSION': '5.4.x',
'PHP_EXTENSIONS': ['mod1'],
'ZEND_EXTENSIONS': ['zmod1']
}
convert_php_extensions(ctx)
eq_('extension=mod1.so', ctx['PHP_EXTENSIONS'])
eq_('zend_extension="@HOME/php/lib/php/extensions/'
'no-debug-non-zts-20100525/zmod1.so"',
ctx['ZEND_EXTENSIONS'])
def test_convert_php_extensions_55_one(self):
ctx = {
'PHP_VERSION': '5.5.x',
'PHP_EXTENSIONS': ['mod1'],
'ZEND_EXTENSIONS': ['zmod1']
}
convert_php_extensions(ctx)
eq_('extension=mod1.so', ctx['PHP_EXTENSIONS'])
eq_('zend_extension="zmod1.so"',
ctx['ZEND_EXTENSIONS'])
def test_is_web_app(self):
ctx = {}
eq_(True, is_web_app(ctx))
ctx['WEB_SERVER'] = 'nginx'
eq_(True, is_web_app(ctx))
ctx['WEB_SERVER'] = 'httpd'
eq_(True, is_web_app(ctx))
ctx['WEB_SERVER'] = 'none'
eq_(False, is_web_app(ctx))
def test_find_stand_alone_app_to_run_app_start_cmd(self):
ctx = {'APP_START_CMD': "echo 'Hello World!'"}
eq_("echo 'Hello World!'", find_stand_alone_app_to_run(ctx))
results = ('app.php', 'main.php', 'run.php', 'start.php', 'app.php')
for i, res in enumerate(results):
ctx = {'BUILD_DIR': 'tests/data/standalone/test%d' % (i + 1)}
eq_(res, find_stand_alone_app_to_run(ctx))
def test_load_binary_index(self):
ctx = {'BP_DIR': '.', 'STACK': 'lucid'}
json = load_binary_index(ctx)
assert json is not None
assert 'php' in json.keys()
eq_(9, len(json['php'].keys()))
def test_find_all_php_versions(self):
ctx = {'BP_DIR': '.', 'STACK': 'lucid'}
json = load_binary_index(ctx)
versions = find_all_php_versions(json)
eq_(9, len(versions))
eq_(3, len([v for v in versions if v.startswith('5.4.')]))
eq_(3, len([v for v in versions if v.startswith('5.5.')]))
def test_find_php_extensions(self):
ctx = {'BP_DIR': '.', 'STACK': 'lucid'}
json = load_binary_index(ctx)
exts = find_all_php_extensions(json)
eq_(9, len(exts.keys()))
tmp = exts[[key for key in exts.keys() if key.startswith('5.4')][0]]
assert 'amqp' in tmp
assert 'apc' in tmp
assert 'imap' in tmp
assert 'ldap' in tmp
assert 'phalcon' in tmp
assert 'pspell' in tmp
assert 'pdo_pgsql' in tmp
assert 'mailparse' in tmp
assert 'redis' in tmp
assert 'pgsql' in tmp
assert 'snmp' in tmp
assert 'cgi' not in tmp
assert 'cli' not in tmp
assert 'fpm' not in tmp
assert 'pear' not in tmp
def test_validate_php_version(self):
ctx = {
'ALL_PHP_VERSIONS': ['5.4.31', '5.4.30'],
'PHP_54_LATEST': '5.4.31',
'PHP_VERSION': '5.4.30'
}
validate_php_version(ctx)
eq_('5.4.30', ctx['PHP_VERSION'])
ctx['PHP_VERSION'] = '5.4.29'
validate_php_version(ctx)
eq_('5.4.31', ctx['PHP_VERSION'])
ctx['PHP_VERSION'] = '5.4.30'
validate_php_version(ctx)
eq_('5.4.30', ctx['PHP_VERSION'])
def test_validate_php_extensions(self):
ctx = {
'ALL_PHP_EXTENSIONS': {
'5.4.31': ['curl', 'pgsql', 'snmp', 'phalcon']
},
'PHP_VERSION': '5.4.31',
'PHP_EXTENSIONS': ['curl', 'snmp']
}
validate_php_extensions(ctx)
eq_(2, len(ctx['PHP_EXTENSIONS']))
assert 'curl' in ctx['PHP_EXTENSIONS']
assert 'snmp' in ctx['PHP_EXTENSIONS']
ctx['PHP_EXTENSIONS'] = ['curl', 'pspell', 'imap', 'phalcon']
validate_php_extensions(ctx)
eq_(2, len(ctx['PHP_EXTENSIONS']))
assert 'curl' in ctx['PHP_EXTENSIONS']
assert 'phalcon' in ctx['PHP_EXTENSIONS']
|
apache-2.0
| 2,697,824,671,140,327,000
| 42.315068
| 76
| 0.569892
| false
| 3.308916
| true
| false
| false
|
soarpenguin/python-scripts
|
terminal.py
|
1
|
1467
|
#!/usr/bin/env python
import os
def clrscr():
""" Clear screen and move cursor to 1,1 (upper left) pos. """
print '\033[2J\033[1;1H'
def clreol():
""" Erases from the current cursor position to the end of the current line. """
print '\033[K'
def delline():
""" Erases the entire current line. """
print '\033[2K'
def gotoxy(x, y):
""" Moves the cursor to the specified position. """
print "\033[%d;%dH" % (x, y)
def _ioctl_GWINSZ(fd): #### TABULATION FUNCTIONS
try: ### Discover terminal width
import fcntl
import termios
import struct
cr = struct.unpack('hh', fcntl.ioctl(fd, termios.TIOCGWINSZ, '1234'))
except:
return
return cr
def terminal_size(): ### decide on *some* terminal size
"""Return (lines, columns)."""
cr = _ioctl_GWINSZ(0) or _ioctl_GWINSZ(1) or _ioctl_GWINSZ(2) # try open fds
if not cr: # ...then ctty
try:
fd = os.open(os.ctermid(), os.O_RDONLY)
cr = _ioctl_GWINSZ(fd)
os.close(fd)
except:
pass
if not cr: # env vars or finally defaults
try:
cr = os.environ['LINES'], os.environ['COLUMNS']
except:
cr = 25, 80
return int(cr[1]), int(cr[0]) # reverse rows, cols
|
gpl-3.0
| 4,358,864,761,990,705,700
| 30.891304
| 83
| 0.498978
| false
| 3.6675
| false
| false
| false
|
gelbander/retain24wrapper
|
retain24wrapper/retain24wrapper.py
|
1
|
5339
|
# -*- coding: utf-8 -*-
import time
from tempfile import NamedTemporaryFile
from xml.etree import cElementTree as ET
from xml.etree.cElementTree import XML
from dicttoxml import dicttoxml
import requests
import xmltodict
ACTIONS = {}
ACTIONS['GET_PROVIDERS'] = {'TA_ACTION': '5-45103'}
ACTIONS['ISSUE'] = {'TA_ACTION': '5-45102'}
ACTIONS['VALIDATE'] = {'TA_ACTION': '5-43101'}
class Provider(object):
def __init__(self, body):
""" Populate an Provider instance base on body data. """
for k, v in body.iteritems():
self.__setattr__(k.replace('@','').lower(), v)
def __repr__(self):
""" Printable representation. """
return ' - '.join([self.name, self.id])
class Retain24Wrapper(object):
"""
Usage::
>>> from retain24wrapper import Retain24Wrapper
>>> r = Retain24Wrapper(base_url, certificate_path)
>>> providers = r.get_providers()
[H&M - 001, Lindex - 002, ICA - 003]
>>> r.issue_valuable(args)
OrderedDict([(u'MSISDN', u'00467311122233'), ... (u'STATUS', u'OK')])
>>> r.validate_valuable(args)
OrderedDict([(u'CPNINFO'...
"""
def __init__(self, base_url, certificate_path):
""" Setup the retain wrapper object. """
self.base_url = base_url
self.certificate_path = certificate_path
self.providers = []
def parse_response(self, resp):
"""Parse response data into a dictionary."""
return xmltodict.parse(resp.content)['TICKETANYWHERE']['COUPON']['RESPONSE']
def populate_xml(self, body, **kwargs):
""" Prepare the xml data to be sent to the api"""
tmp = NamedTemporaryFile(mode='w+b', suffix='xml', delete=True)
root = ET.Element("TICKETANYWHERE")
coupon = ET.SubElement(root, "COUPON", {'VER': '1.0'})
body_xml = XML(dicttoxml(body, root=False, attr_type=False))
if (kwargs.get('body_attrs')):
body_xml.attrib = kwargs.get('body_attrs')
coupon.append(body_xml)
tmp.write('<?xml version="1.0" encoding="ISO-8859-1" ?>')
ET.ElementTree(root).write(tmp)
tmp.seek(0)
file = tmp.read()
tmp.close()
return file
def validate_receipt(self, resp):
""" Parse the issue and send response and checks for errors."""
receipt = self.parse_response(resp)['RECEIPT']
if (receipt['STATUS'] == 'ERROR'):
raise ValueError('ERRORCODE: {error_code} - {message}'.format(
error_code=receipt['ERRORCODE'],
message=receipt['MESSAGE']
))
return receipt
def get_providers(self):
""" Cet currently available providers.
:return: self.providers: A list with available providers.
"""
resp = requests.get(self.base_url, params=ACTIONS['GET_PROVIDERS'], cert=self.certificate_path, verify=True, stream=True)
for template in self.parse_response(resp)['TEMPLATELIST']['TEMPLATE']:
self.providers.append(Provider(template))
return self.providers
def issue_valuable(self, template_id, qty, msisdn, **kwargs):
""" Generate a coupon (aka valuable).
:param template_id: The retain24 id for a clinet/organization
:param qty: The value of coupon 100 = 1 SEK
:param msisdn: Customer id also customers phone number.
:param: email_address: (optional) Customers email.
:param: sms_text: (optional) SMS text.
:param: email_text: (optional) Email text.
:param: send_date: (optional) Date sent.
:return receipt: Receipt
"""
email_address = kwargs.get('email_address', 'None')
sms_text = kwargs.get('sms_text', 'None')
email_text = kwargs.get('email_text', 'None')
send_date = kwargs.get('send_date', time.strftime('%Y-%m-%d %H:%m'))
obj = {
"SEND": {
"TEMPLATE": template_id,
"QTY": qty,
"MSISDN": msisdn,
"EMAIL_ADDRESS": email_address,
"SMS_TEXT": sms_text,
"EMAIL_TEXT": email_text,
"SEND_DATE": send_date,
}
}
xml = self.populate_xml(obj)
resp = requests.post(
self.base_url,
data=xml,
params=ACTIONS['ISSUE'],
cert=self.certificate_path,
verify=True,
stream=True
)
receipt = self.validate_receipt(resp)
return receipt
def validate_valuable(self, msisdn, pin, multicode):
""" Valudate a valuable aka. coupon.
:param multicode: The unique code for a valuable.
:param pin: Pincode, set to empty string if provider doesnt need it.
:param msisdn: Customer id also customers phone number.
"""
obj = {
"VALIDATE": {
"MSISDN": msisdn,
"PIN": pin,
"MULTICODE": multicode
}
}
xml = self.populate_xml(body=obj, body_attrs={'TYPE': 'STANDARD'})
resp = requests.post(
self.base_url,
data=xml,
params=ACTIONS['VALIDATE'],
cert=self.certificate_path,
verify=True,
stream=True
)
return self.parse_response(resp)
|
mit
| -3,539,120,547,608,042,500
| 30.222222
| 129
| 0.567335
| false
| 3.832735
| false
| false
| false
|
chinfeng/gumpy
|
huacaya/auth/endpoint.py
|
1
|
13593
|
# -*- coding: utf-8 -*-
__author__ = 'chinfeng'
import os
import uuid
import json
import datetime
import tornado.web
from tornado.web import HTTPError
from tornado.escape import json_decode
try:
from urllib import urlencode
from urlparse import urlsplit, urlunsplit
except ImportError:
from urllib.parse import urlencode, urlsplit, urlunsplit
import logging
logger = logging.getLogger(__name__)
from .auth import AuthorizationError
def json_default(obj):
if isinstance(obj, datetime.datetime):
return str(obj)
else:
return obj
class BaseHandler(tornado.web.RequestHandler):
def initialize(self, **kwds):
self._auth_server = kwds.get('auth_server', None)
self._auth_provider = kwds.get('auth_provider', None)
self._current_user = None
def prepare(self):
if all((
self.request.method.upper() != 'GET',
self.request.headers.get('content-type').startswith('application/json'),
)):
self.json_data = json_decode(self.request.body)
else:
self.json_data = None
def get_argument(self, name, default=None, strip=True):
if self.json_data:
arg = self.json_data.get(name, default)
return arg.strip() if strip and isinstance(arg, str) else arg
else:
return tornado.web.RequestHandler.get_argument(self, name, default, strip)
def write_error(self, status_code, **kwds):
try:
self.write(kwds)
except TypeError:
tornado.web.RequestHandler.write_error(self, status_code, **kwds)
def get_current_user(self):
if not self._current_user:
account_raw = self.get_secure_cookie('account', None)
self._current_user = json_decode(account_raw) if account_raw else None
return self._current_user
def get_access_token(self):
access_token = self.get_secure_cookie('access_token', None)
if not access_token:
bearer_str = self.request.headers.get('Authorization', None)
if bearer_str:
if bearer_str.startswith('Bearer '):
return bearer_str[7:]
return self.get_argument('access_token', None)
else:
return access_token.decode('utf-8')
class RedirectBaseHandler(BaseHandler):
def send_redirect(self, redirect_uri, args):
self.clear()
url_parts = list(urlsplit(redirect_uri))
url_parts[3] = '&'.join((urlencode({k: v for k, v in args.items() if v is not None}), url_parts[3])).strip('&')
self.redirect(urlunsplit(url_parts))
def send_invalid_request_error(self, redirect_uri, state=None):
self.send_redirect(redirect_uri, dict(
state=state, error='invalid_request', error_description='The request is missing a required parameter.',
))
def send_unsupported_response_type_error(self, redirect_uri, state=None):
self.send_redirect(redirect_uri, dict(
state=state, error='unsupported_response_type',
error_description='The authorization server does not support obtaining an authorization code using this method.',
))
def send_unauthorized_client_error(self, redirect_uri, state=None):
self.send_redirect(redirect_uri, dict(
state=state, error='unauthorized_client',
error_description='The client is not authorized to request an authorization code using this method.',
))
def send_access_denied_error(self, redirect_uri, state=None):
self.send_redirect(redirect_uri, dict(
state=state, error='access_denied',
error_description='The resource owner or authorization server denied the request.',
))
class MainHandler(BaseHandler):
__route__ = r'/?'
def get(self):
self.redirect('/auth/index.html')
class SignUpHandler(BaseHandler):
__route__ = r'/signup'
def post(self):
data = json.loads(self.request.body.decode('utf-8'))
self._auth_server.register_account(data)
token_data = self._auth_provider.password_grant(data['username'], data, 'me, all')
logger.debug('access_token: {0}'.format(token_data['access_token']))
self.set_secure_cookie('access_token', token_data['access_token'])
self.set_secure_cookie('refresh_token', token_data['refresh_token'])
self.write(token_data)
class RevokeTokenHandler(BaseHandler):
""" TODO: demonstration without any permission check for now """
__route__ = r'/revoke'
def post(self):
data = json.loads(self.request.body.decode('utf-8'))
token = data.get('token')
self._auth_server.revoke_token(token)
self.write({})
class AccountListHandler(BaseHandler):
__route__ = r'/accounts'
def get(self):
""" # TODO: demonstration with simple access control fornow """
if self.request.remote_ip == '127.0.0.1':
self.set_header('Content-Type', 'application/json')
self.write(json.dumps(list(self._auth_server.get_accounts())))
else:
self.send_error(403)
class TokenListHandler(BaseHandler):
__route__ = r'/tokens'
def get(self):
""" # TODO: demonstration with simple access control fornow """
if self.request.remote_ip == '127.0.0.1':
self.set_header('Content-Type', 'application/json')
self.write(json.dumps(list(self._auth_server.get_tokens()), default=json_default))
else:
self.send_error(403)
class ClientListHandler(BaseHandler):
__route__ = r'/clients'
def get(self):
""" # TODO: demonstration with simple access control fornow """
if self.request.remote_ip == '127.0.0.1':
self.set_header('Content-Type', 'application/json')
self.write(json.dumps(list(self._auth_server.get_clients())))
else:
self.send_error(403)
class AccountInfoHandler(BaseHandler):
__route__ = r'/me'
def get(self):
token = self.get_access_token()
logger.debug('get_access_token: {0}'.format(token))
if token and self._auth_server.verify_scope(token, 'me'):
account = self._auth_server.get_account_by_token(token)
if account:
account.pop('password', None)
self.write(account)
else:
self.send_error(
500, error='server_error',
error_description='account not found',
)
else:
self.set_header(
'WWW-Authenticate',
'Bearer realm="{0}", error="{1}"'.format(
'example', 'access_denied',
)
)
self.set_status(401, 'Unauthorized')
class SignInHandler(BaseHandler):
__route__ = r'/signin'
def post(self):
account = self._auth_server.find_account(self.json_data)
if account:
del account['password']
self.set_secure_cookie('account', json.dumps(account, default=json_default))
self.write({'sign_in': 'success'})
class AuthorizeHandler(RedirectBaseHandler):
__route__ = r'/authorize'
__sign_in_endpoint__ = r'/signin.html'
__auth_endpoint__ = r'/auth.html'
__default_redirect__ = r'/default_callback'
def get(self):
# https://tools.ietf.org/html/rfc6749#section-4.1.1
# https://tools.ietf.org/html/rfc6749#section-4.2.1
# 暂无默认 redirect callback 机制,所以 redirect_uri 必要参数
redirect_uri = self.get_argument('redirect_uri', None)
response_type = self.get_argument('response_type', None)
client_id = self.get_argument('client_id', None)
scope = self.get_argument('scope', None)
state = self.get_argument('state', None)
if not (redirect_uri and response_type and client_id):
self.send_invalid_request_error(redirect_uri or self.__default_redirect__, state)
elif response_type not in ('code', 'token'):
self.send_unsupported_response_type_error(redirect_uri, state)
elif not self._auth_server.has_client_id(client_id):
self.send_unauthorized_client_error(redirect_uri, state)
else:
self.send_redirect(self.__sign_in_endpoint__, dict(
response_type=response_type, client_id=client_id,
redirect_uri=redirect_uri, state=state, scope=scope,
))
def post(self):
# https://tools.ietf.org/html/rfc6749#section-4.1.1
# https://tools.ietf.org/html/rfc6749#section-4.2.1
# 暂无默认 redirect callback 机制,所以 redirect_uri 必要参数
redirect_uri = self.get_argument('redirect_uri', None)
response_type = self.get_argument('response_type', None)
client_id = self.get_argument('client_id', None)
state = self.get_argument('state', None)
scope = self.get_argument('scope', None)
agreed = self.get_argument('agreed', 0)
account = self.get_current_user()
if not (redirect_uri and response_type and client_id):
self.send_invalid_request_error(redirect_uri or self.__default_redirect__, state)
elif not agreed:
self.send_access_denied_error(redirect_uri, state)
if not (redirect_uri and response_type and client_id):
self.send_invalid_request_error(redirect_uri, state)
elif response_type == 'code':
# https://tools.ietf.org/html/rfc6749#section-4.1.1
# 暂无默认 redirect callback 机制,所以 redirect_uri 必要参数
if self._auth_server.has_client_id(client_id):
self.send_redirect(redirect_uri, dict(
state=state,
code=self._auth_provider.authorization_request(account['username'], client_id, redirect_uri, scope)
))
else:
self.send_unauthorized_client_error(redirect_uri, state)
elif response_type == 'token':
# https://tools.ietf.org/html/rfc6749#section-4.2.1
# 暂无默认 redirect callback 机制,所以 redirect_uri 必要参数
if self._auth_server.has_client_id(client_id):
access_token_data = self._auth_provider.implicit_grant(account['username'], client_id, redirect_uri, scope)
self.send_redirect(redirect_uri, dict(
state=state, expires_in=access_token_data['expires_in'],
token_type=access_token_data['token_type'], access_token=access_token_data['access_token'],
))
else:
self.send_unauthorized_client_error(redirect_uri, state)
else:
self.send_unsupported_response_type_error(redirect_uri, state)
class GrantHandler(BaseHandler):
__route__ = r'/grant'
def post(self):
grant_type = self.get_argument('grant_type', None)
if grant_type == 'authorization_code':
authorization_code = self.get_argument('code', None)
client_id = self.get_argument('client_id', None)
redirect_uri = self.get_argument('redirect_uri', None)
try:
self.write(
self._auth_provider.authorization_code_grant(
authorization_code, client_id, redirect_uri
)
)
except BaseException as err:
self.send_error(400, **err.args[0])
elif grant_type == 'refresh_token':
# Refreshing an Access Token
# https://tools.ietf.org/html/rfc6749#section-6
try:
self.write(
self._auth_provider.refresh_token_grant(self.get_argument('refresh_token', None))
)
except BaseException as err:
self.send_error(400, **err.args[0])
elif grant_type == 'password':
username = self.get_argument('username', None)
password = self.get_argument('password', None)
scope = self.get_argument('scope', None)
try:
token_data = self._auth_server.password_grant(
username, {'username': username, 'password': password}, scope)
self.write(token_data)
except AuthorizationError:
self.send_error(400, error='invalid_request')
elif grant_type:
self.send_error(
400, error='unsupported_grant_type',
error_description='The authorization grant type is not supported by the authorization server.',
)
class EndpointApplication(tornado.web.Application):
def __init__(self, auth_server, auth_provider):
self._auth_server = auth_server
self._auth_provider = auth_provider
super(self.__class__, self).__init__(
self.get_handlers(auth_server=auth_server, auth_provider=auth_provider),
cookie_secret=uuid.uuid4().hex
)
def get_handlers(self, **kwds):
handlers = [
MainHandler, SignUpHandler, AuthorizeHandler, GrantHandler, AccountInfoHandler,
RevokeTokenHandler, AccountListHandler, TokenListHandler, ClientListHandler, SignInHandler,
]
for handler in handlers:
yield (handler.__route__, handler, kwds)
static_path = os.path.join(os.path.dirname(__file__), 'static')
yield (r'/(.*)', tornado.web.StaticFileHandler, dict(path=static_path))
|
lgpl-3.0
| -4,256,504,983,164,449,000
| 39.875758
| 127
| 0.602046
| false
| 3.8862
| false
| false
| false
|
provideyourown/SiteMonitoring
|
memoryusage.py
|
1
|
1362
|
#!/usr/bin/env python
"""
Display the system memory usage. Can be called on a remote server or use 'local' or 'localhost' for your computer
Usage:
./memoryusage.py MYSERVER
"""
import argparse
import subprocess
def getMemoryUsage(server):
"""
Returns the cpu load as a value from the interval [0.0, 1.0]
"""
if server in ['local', 'localhost']:
result = subprocess.check_output('free -m', shell=True)
else:
result = subprocess.check_output('ssh %s "free -m"' % server, shell=True)
lines = result.split('\n')
toks = lines[2].split() # split along whitespace
used = int(toks[2])
free = int(toks[3])
total = used + free
toks = lines[3].split()
swap = float(toks[2]) / float(toks[1]) if int(toks[1]) else 0
return used, total, swap
if __name__ == '__main__': # allow funcs above to be imported as a module
parser = argparse.ArgumentParser(description='Get memory usage for a server/computer.')
parser.add_argument("server", help='Enter server name as defined in ~/.ssh/config or user@ip. NB: public key should be uploaded to server. For local computer use either local or localhost')
args = parser.parse_args()
used, total, swap = getMemoryUsage(args.server)
print "Memory usage: {:.2f}% of {}Mb (swap: {:.2f}%)".format(100.0*used/total, total, swap*100)
exit()
|
gpl-3.0
| 2,848,168,859,462,388,700
| 31.428571
| 193
| 0.654185
| false
| 3.546875
| false
| false
| false
|
Naeka/vosae-app
|
www/invoicing/models/payment.py
|
1
|
3144
|
# -*- coding:Utf-8 -*-
from mongoengine import Document, fields
from django.utils.timezone import now
import decimal
from core.fields import DateField
from invoicing import PAYMENT_TYPES, currency_format
from invoicing.exceptions import (
InvalidPaymentAmount,
)
__all__ = (
'Payment',
'InvoicePayment',
'DownPaymentInvoicePayment',
)
class Payment(Document):
"""
A payment, representing money flows within the company.
Amount can be negative (debit) or positive (credit).
"""
TYPES = PAYMENT_TYPES
tenant = fields.ReferenceField("Tenant", required=True)
issuer = fields.ReferenceField("VosaeUser", required=True)
issued_at = fields.DateTimeField(required=True, default=now)
amount = fields.DecimalField(required=True)
currency = fields.ReferenceField("Currency", required=True)
type = fields.StringField(required=True, choices=TYPES, default="CHECK")
date = DateField(required=True)
note = fields.StringField(max_length=512)
meta = {
"allow_inheritance": True
}
def __unicode__(self):
if self.date and self.amount and self.currency:
return u'%s: %s' % (self.date, currency_format(self.amount, self.currency.symbol, True))
return '%s object' % self.__class__.__name__
@classmethod
def pre_save(self, sender, document, **kwargs):
"""
Pre save hook handler
Validates payment amount
"""
# If amount set from float (not from string), the rounding is only done on init or on save
# So, we round here to prevent incorrect comparison
document.amount = document.amount.quantize(decimal.Decimal('.00'), decimal.ROUND_HALF_UP)
if document.amount < 0 or document.amount > document.related_to.balance:
raise InvalidPaymentAmount()
@classmethod
def post_save(self, sender, document, created, **kwargs):
"""
Post save hook handler
- Associates payment to related document
- Creates a payment statistic entry
"""
from vosae_statistics.models import PaymentStatistics
if created:
document.related_to.payments.append(document)
document.related_to.save()
# XXX: Should save organization/contact/address
payment_statistic = PaymentStatistics(
tenant=document.tenant,
date=document.date,
amount=document.amount,
payment=document
).save()
@classmethod
def post_delete(self, sender, document, **kwargs):
"""
Post delete hook handler
Removes payment from related document
"""
document.related_to.payments.pop(document)
document.related_to.save()
class InvoicePayment(Payment):
"""Payment related to an :class:`~invoicing.models.Invoice`"""
related_to = fields.ReferenceField("Invoice", required=True, dbref=False)
class DownPaymentInvoicePayment(Payment):
"""Payment related to an :class:`~invoicing.models.DownPaymentInvoice`"""
related_to = fields.ReferenceField("DownPaymentInvoice", required=True, dbref=False)
|
agpl-3.0
| -8,912,325,496,183,845,000
| 29.823529
| 100
| 0.66285
| false
| 4.295082
| false
| false
| false
|
BTCfork/hardfork_prototype_1_mvf-core
|
qa/rpc-tests/mvf-core-csig.py
|
1
|
9119
|
#!/usr/bin/env python2
# Copyright (c) 2014-2015 The Bitcoin Core developers
# Copyright (c) 2016 The Bitcoin developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
# MVF-Core
"""
Exercise the signature change (replay protection) code.
Derived from walletbackupauto.py.
Test case is:
4 nodes - 2 forking and 2 non-forking, sending transactions between each other.
Prior to the fork, anything goes.
Post fork, the nodes of the same kind can still send between each other,
but not to the nodes of the other kind (2 way check).
"""
import os
import fnmatch
import hashlib
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
from random import randint
import logging
import time
#logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.INFO)
FORKHEIGHT = 120
class ReplayProtectionTest(BitcoinTestFramework):
def setup_chain(self):
#logging.info("Initializing test directory "+self.options.tmpdir)
print("Initializing test directory "+self.options.tmpdir)
initialize_chain_clean(self.options.tmpdir, 4)
def setup_network(self, split=False):
#logging.info("Starting nodes")
print("Starting nodes")
# all nodes are spenders, let's give them a keypool=100
self.extra_args = [
['-debug', '-whitelist=127.0.0.1', "-keypool=100"],
['-debug', '-whitelist=127.0.0.1', "-keypool=100"],
['-debug', '-whitelist=127.0.0.1', "-keypool=100", "-forkheight=%s"%FORKHEIGHT],
['-debug', '-whitelist=127.0.0.1', "-keypool=100", "-forkheight=%s"%FORKHEIGHT]]
self.nodes = start_nodes(4, self.options.tmpdir, self.extra_args)
connect_nodes(self.nodes[0], 1)
connect_nodes(self.nodes[0], 2)
connect_nodes(self.nodes[0], 3)
connect_nodes(self.nodes[1], 2)
connect_nodes(self.nodes[1], 3)
connect_nodes(self.nodes[3], 2)
self.is_network_split=False
self.sync_all()
def send_and_check(self, from_node, to_node, expect_to_succeed=True, force_sync=True, check=True, check_for_fail=False):
''' try sending 0.1 BTC from one node to another,
and optionally check if successful '''
to_addr = self.nodes[to_node].getnewaddress()
amount = Decimal(1) / Decimal(10)
txid = self.nodes[from_node].sendtoaddress(to_addr, amount)
if force_sync:
sync_mempools([self.nodes[from_node], self.nodes[to_node]])
else:
time.sleep(1)
if check:
if check_for_fail:
assert_equal(txid in self.nodes[from_node].getrawmempool(), True)
assert_equal(txid in self.nodes[to_node].getrawmempool(), False)
else:
assert_equal(txid in self.nodes[from_node].getrawmempool() and (txid in self.nodes[to_node].getrawmempool() or not expect_to_succeed), True)
return txid
def run_test(self):
#logging.info("Fork height configured for block %s"%(FORKHEIGHT))
print("Fork height configured for block %s"%(FORKHEIGHT))
#logging.info("Generating initial 104 blocks")
print("Generating initial 104 blocks")
self.nodes[0].generate(1)
sync_blocks(self.nodes)
self.nodes[1].generate(1)
sync_blocks(self.nodes)
self.nodes[2].generate(1)
sync_blocks(self.nodes)
self.nodes[3].generate(101)
sync_blocks(self.nodes)
#logging.info("Current height %s blocks"%(self.nodes[0].getblockcount()))
print("Current height %s blocks"%(self.nodes[0].getblockcount()))
assert_equal(self.nodes[0].getbalance(), 50)
assert_equal(self.nodes[1].getbalance(), 50)
assert_equal(self.nodes[2].getbalance(), 50)
assert_equal(self.nodes[3].getbalance(), 50)
assert_equal(self.nodes[0].getblockcount(), 104)
#logging.info("Check all sending works after setup")
print("Check all sending works after setup")
# from any node to the others should be ok now
# this should generate 4*3 = 12 more blocks
for src_node in range(4):
for dst_node in range(4):
if src_node != dst_node:
#logging.info("... from %d to %d" %(src_node, dst_node))
print("... from %d to %d" %(src_node, dst_node))
self.send_and_check(src_node, dst_node, True)
self.nodes[dst_node].generate(1)
sync_blocks(self.nodes)
current_height = self.nodes[0].getblockcount()
assert_equal(current_height, 116)
# generate blocks, one on each node in turn, until we reach pre-fork block height
blocks_to_fork = FORKHEIGHT - current_height - 1
self.nodes[0].generate(blocks_to_fork)
# not sure why this loop didn't work reliably...
# maybe it was the round-robin generation
while False: #blocks_to_fork > 0:
#logging.info("blocks left to fork height: %d" % blocks_to_fork)
print("blocks left to fork height: %d" % blocks_to_fork)
self.nodes[blocks_to_fork % 4].generate(1)
blocks_to_fork -= 1
sync_blocks(self.nodes)
assert_equal(self.nodes[0].getblockcount(), FORKHEIGHT - 1)
#logging.info("Current height %s blocks (pre-fork block)"%(self.nodes[0].getblockcount()))
print("Current height %s blocks (pre-fork block)"%(self.nodes[0].getblockcount()))
# check that we can still send to all other nodes for the pre-fork block
# collect a bunch of tx's sent by the nodes to each other
#logging.info("sending tx's between all nodes at pre-fork")
print("sending tx's between all nodes at pre-fork")
should_be_fine_txs = []
for src_node in range(4):
for dst_node in range(4):
if src_node != dst_node:
#logging.info("... from %d to %d" %(src_node, dst_node))
print("... from %d to %d" %(src_node, dst_node))
should_be_fine_txs.append(self.send_and_check(src_node, dst_node, True))
#logging.info("Verifying tx's were still accepted by all nodes")
print("Verifying tx's were still accepted by all nodes")
sync_mempools(self.nodes)
mempools = [self.nodes[i].getrawmempool() for i in range(4)]
for tx in should_be_fine_txs:
for n in range(4):
assert_equal(tx in mempools[n], True)
# generate the fork block
#logging.info("Generate fork block at height %s" % FORKHEIGHT)
print("Generate fork block at height %s" % FORKHEIGHT)
self.nodes[0].generate(1)
# check the previous round of tx's not in mempool anymore
self.sync_all()
assert_equal(self.nodes[0].getblockcount(), FORKHEIGHT)
#logging.info("Verifying tx's no longer in any mempool")
print("Verifying tx's no longer in any mempool")
mempools = [self.nodes[i].getrawmempool() for i in range(4)]
for tx in should_be_fine_txs:
for n in range(4):
assert_equal(tx in mempools[n], False)
# check that now, only nodes of the same kind can transact
# these pairs should work fine
#logging.info("Checking transactions between same-kind nodes")
print("Checking transactions between same-kind nodes")
for pair in ((0,1), (1,0), (2,3), (3,2)):
#logging.info("... from %d to %d" %(pair[0], pair[1]))
print("... from %d to %d" %(pair[0], pair[1]))
self.send_and_check(pair[0], pair[1], True)
# re-connect the nodes which have been disconnected due to the
# above post-fork transactions, so we can test them separately
#logging.info("Re-connecting nodes which disconnected due to prior step")
print("Re-connecting nodes which disconnected due to prior step")
connect_nodes_bi(self.nodes,0,2)
connect_nodes_bi(self.nodes,0,3)
connect_nodes_bi(self.nodes,1,2)
connect_nodes_bi(self.nodes,1,3)
#logging.info("Checking transactions between forked/unforked nodes")
print("Checking transactions between forked/unforked nodes")
# these should not work anymore
# MVF-Core TODO: decide whether to accept old-style signatures post-fork (maybe limited-time only?)
# if you only want to deny new->old, then use the commented out code
#for pair in ((2,0), (2,1), (3,0), (3,1)):
# check both forked->unforked and vice versa are blocked now
for pair in ((0,2), (0,3), (1,2), (1,3), (2,0), (2,1), (3,0), (3,1)):
#logging.info("... from %d to %d" %(pair[0], pair[1]))
print("... from %d to %d" %(pair[0], pair[1]))
self.send_and_check(pair[0], pair[1], expect_to_succeed=False, force_sync=False, check=True, check_for_fail=True)
if __name__ == '__main__':
ReplayProtectionTest().main()
|
mit
| -2,729,698,025,060,871,000
| 43.26699
| 156
| 0.617173
| false
| 3.665193
| true
| false
| false
|
DBeath/flask-feedrsub
|
feedrsub/utils/feeds/feed_generation.py
|
1
|
1207
|
from flask import current_app as app
from typing import List, Dict
from flask import url_for
def websub_discovery_link() -> Dict:
"""
Creates a WebSub discovery link
:return: link as dict
"""
hub_enabled = app.config.get("HUB_ENABLED", False)
if hub_enabled:
hub_url = url_for(
"websub.hub_endpoint",
_external=True,
_scheme=app.config.get("HTTP_SCHEME", "http"),
)
return dict(href=hub_url, rel="hub")
return dict()
def links(links: List[Dict] = None) -> List:
"""
Creates a list of links to add to the feed
:param links: List of Dicts with href and rel keys
:return: list of links as dicts
"""
if not links:
links = []
links_list = []
websub_link = websub_discovery_link()
if websub_link:
links.append(websub_link)
for item in links:
if "href" in item and "rel" in item:
links_list.append(item)
return links_list
def generator():
"""
Returns the generator of the feed
:return: tuple of generator name, location, version
"""
return (app.config["PROJECT_NAME"], app.config.get("SERVER_NAME", None), "1.0")
|
mit
| -544,283,231,300,035,600
| 22.211538
| 83
| 0.59652
| false
| 3.668693
| false
| false
| false
|
JonathanFrederick/job-hunt
|
company_scripts.py
|
1
|
1197
|
from selenium import webdriver
from companies.red_hat import red_hat
from app import db
from models import Company
def print_result(info):
"""Takes in a dictionary with keys for 'company', 'title', 'url',
and 'description' and prints them neatly to the terminal"""
for key in ['company', 'title', 'url', 'description']:
assert key in info.keys(), \
"The key '{}' is not in the dictionary".format(key)
assert isinstance(info[key], str), \
"The value at '{}' is not a string".format(key)
print('{} - {}'.format(info['company'], info['title']))
print(info['url'])
print(info['description'])
def main():
driver = webdriver.Firefox()
company_dict = {
"Red Hat": red_hat,
}
interesting_companies = db.session.query(Company) \
.filter(Company.interest == True)
for comp in interesting_companies:
company_dict[comp.name](driver)
driver.close()
# print_result({'company': 'comp',
# 'title': 'title',
# 'url': 'url.com',
# 'description': 'things and stuff'})
if __name__ == "__main__":
main()
|
mit
| -6,624,179,340,990,161,000
| 28.195122
| 71
| 0.56391
| false
| 4.003344
| false
| false
| false
|
alexismirandan/Edit-image-kivy-app
|
layout/edit_image_layout.py
|
1
|
2428
|
# -*- coding: utf-8 -*
from kivy.uix.floatlayout import FloatLayout
from kivy.properties import ListProperty, ObjectProperty
from components.touch_selector import TouchSelector
from components.bubble_buttons import BubbleButtons
from layout.image_layout import ImageLayout
from kivy.uix.button import Button
class EditImageLayout(FloatLayout):
color_button = ListProperty([1, .3, .4, 1])
button_color = ListProperty([0, 0, 0, 1])
rectangle_selector = ObjectProperty()
text_size_rectangle = ObjectProperty()
image_layout = ObjectProperty()
bubble_buttons = ObjectProperty()
bubble_buttons_undo_confirm = ObjectProperty()
def __init__(self, **kwargs):
self.sm = kwargs.pop('sm', None)
self.crop_image_screen = kwargs.pop('crop_image_screen', None)
super(EditImageLayout, self).__init__(**kwargs)
self.rectangle_selector.bind(size_selected=self.on_change_size_rectangle_selector)
self.rectangle_selector.bind(size_selected_temp=self.update_text_size_rectangle)
self.bind(on_touch_down=self.bubble_buttons.hide)
self.bubble_buttons.resize_button.bind(on_press=self.on_press_resize_button)
self.bubble_buttons_undo_confirm.undo_button.bind(on_press=self.on_press_undo_button)
self.bubble_buttons_undo_confirm.confirm_button.bind(on_press=self.on_press_confirm_button)
def on_change_size_rectangle_selector(self, instance, size_selected):
if not self.rectangle_selector.tap_not_draw_a_line():
self.bubble_buttons.show()
else:
self.text_size_rectangle.text = ''
def on_press_resize_button(self, instance):
self.image_layout.resize_image(width=self.rectangle_selector.size_selected[0],
height=self.rectangle_selector.size_selected[1])
self.rectangle_selector.delete_line()
self.text_size_rectangle.text = ''
self.bubble_buttons_undo_confirm.show()
def on_press_undo_button(self, instance):
size = self.image_layout.old_size
self.image_layout.resize_image(width=size[0], height=size[1])
self.bubble_buttons_undo_confirm.hide()
def on_press_confirm_button(self, instance):
self.bubble_buttons_undo_confirm.hide()
def update_text_size_rectangle(self, instance, size):
self.text_size_rectangle.text = str('({0}, {1})'.format(int(size[0]), int(size[1])))
|
mit
| 7,713,281,658,232,743,000
| 43.145455
| 99
| 0.689044
| false
| 3.570588
| false
| false
| false
|
botswana-harvard/bcpp-export
|
bcpp_export/old_export/constants.py
|
1
|
1213
|
import numpy as np
from edc_constants.constants import (
ALIVE as edc_ALIVE, DEAD as edc_DEAD, YES as edc_YES, NO as edc_NO,
POS as edc_POS, NEG as edc_NEG, IND as edc_IND, UNK as edc_UNK,
NOT_APPLICABLE as edc_NOT_APPLICABLE,
MALE as edc_MALE, FEMALE as edc_FEMALE)
SUBJECT_IDENTIFIER = 'subject_identifier'
HOUSEHOLD_MEMBER = 'household_member'
edc_DWTA = 'DWTA'
edc_NOT_SURE = 'Not Sure'
edc_ART_PRESCRIPTION = 'ART Prescription'
ALIVE = 1
DEAD = 0
DEFAULTER = 2
DWTA = 4
FEMALE = 2
IND = 2
MALE = 1
NAIVE = 1
NEG = 0
NO = 0
NOT_APPLICABLE = 3
NOT_SURE = 5
ON_ART = 3
PLOT_IDENTIFIER = 'plot_identifier'
POS = 1
UNK = 3
YES = 1
gender = {
edc_MALE: MALE,
edc_FEMALE: FEMALE}
hiv_options = {
edc_POS: POS,
edc_NEG: NEG,
edc_IND: IND,
edc_UNK: UNK,
'not_answering': DWTA,
'positive': POS,
'negative': NEG,
'not_sure': UNK,
None: np.nan}
tf = {
True: YES,
False: NO,
None: np.nan}
yes_no = {
edc_YES: YES,
edc_NO: NO,
'1': YES,
'0': NO,
edc_NOT_APPLICABLE: NOT_APPLICABLE,
None: np.nan,
edc_DWTA: DWTA,
edc_NOT_SURE: NOT_SURE}
survival = {
edc_ALIVE: ALIVE,
edc_DEAD: DEAD,
None: np.nan}
|
gpl-2.0
| -9,128,420,541,188,606,000
| 17.104478
| 71
| 0.611707
| false
| 2.284369
| false
| true
| false
|
NLeSC/PattyAnalytics
|
tests/test_utils.py
|
1
|
2201
|
import os
from tempfile import NamedTemporaryFile
import pcl
import numpy as np
from patty import utils
from numpy.testing import assert_array_almost_equal
from nose.tools import assert_equal, assert_raises
def _compare( pcA, pcB ):
''' compare two pointclouds point-by-point'''
pcA_arr = np.asarray(pcA)
pcB_arr = np.asarray(pcB)
# dont use set_srs function, they will be tested later
if hasattr(pcA, 'offset' ):
pcA_arr += pcA.offset
if hasattr(pcB, 'offset' ):
pcB_arr += pcB.offset
assert_array_almost_equal(pcA_arr, pcB_arr, 2,
"Written/read point clouds are different!")
def test_read_write():
''' Test read and write LAS files functionality'''
filename = './testIO.las'
# make and save a pointcloud
pc1 = pcl.PointCloud(10)
pc1_arr = np.asarray(pc1)
pc1_arr[:] = np.random.randn(*pc1_arr.shape)
utils.save(pc1, filename)
# reload it
pc2 = utils.load(filename)
_compare( pc1, pc2 )
os.remove(filename)
def test_auto_file_format():
"""Test saving and loading pointclouds via the pcl loader"""
# make and save a pointcloud
pc = pcl.PointCloud(10)
pc_arr = np.asarray(pc)
pc_arr[:] = np.random.randn(*pc_arr.shape)
with NamedTemporaryFile(suffix='.ply') as f:
utils.save(pc, f.name)
pc2 = utils.load(f.name)
_compare( pc, pc2 )
with NamedTemporaryFile(suffix='.pcd') as f:
utils.save(pc, f.name)
pc2 = utils.load(f.name)
_compare( pc, pc2 )
with NamedTemporaryFile(suffix='.las') as f:
utils.save(pc, f.name, format="PLY")
pc2 = utils.load(f.name, format="PLY")
_compare( pc, pc2 )
with NamedTemporaryFile(suffix='.las') as f:
utils.save(pc, f.name, format="PCD")
pc2 = utils.load(f.name, format="PCD")
_compare( pc, pc2 )
def test_downsample_random():
pc = pcl.PointCloud(10)
a = np.asarray(pc)
a[:] = np.random.randn(*a.shape)
assert_raises(ValueError, utils.downsample_random, pc, 0)
assert_raises(ValueError, utils.downsample_random, pc, 2)
assert_equal(len(utils.downsample_random(pc, .39)), 4)
|
apache-2.0
| -6,275,963,288,678,196,000
| 25.518072
| 73
| 0.625625
| false
| 3.232012
| true
| false
| false
|
Spotipo/spotipo
|
tests/core/test_guestutils.py
|
1
|
10985
|
import sys
import pytest
from flask import current_app,url_for
from flask_wtf import Form
from wtforms import TextField
from faker import Faker
import arrow
import uuid
from unifispot.core.models import Wifisite,Device,Guesttrack,Guest,Loginauth,\
Guestsession
from unifispot.core.guestutils import init_track,validate_track,redirect_guest,\
assign_guest_entry,validate_loginauth_usage
from tests.helpers import randomMAC,get_guestauth_url
fake = Faker()
def test_init_track(session):
#
site1 = Wifisite.query.get(1)
apmac = randomMAC()
mac = randomMAC()
#test creating a new track
track = init_track(site1,guestmac=mac,apmac=apmac)
count = Guesttrack.query.count()
assert 1 == count,'Guesttrack count is :%s instead of expected 1 '%count
#another track for same MAC done immediately shouldn't create track
track = init_track(site1,guestmac=mac,apmac=apmac)
count = Guesttrack.query.count()
assert 1 == count,'Guesttrack count is :%s instead of expected 1 '%count
assert isinstance(track,Guesttrack),'init_track is not returning Guestrack instance'
#different MAC
track = init_track(site1,guestmac=randomMAC(),apmac=apmac)
count = Guesttrack.query.count()
assert 2 == count,'Guesttrack count is :%s instead of expected 2 '%count
#same MAC after track expiry
track = Guesttrack.query.get(1)
track.timestamp = arrow.utcnow().replace(seconds= -(current_app.config['GUESTTRACK_LIFETIME'] + 100)).naive
session.commit()
track = init_track(site1,guestmac=mac,apmac=apmac)
count = Guesttrack.query.count()
assert 3 == count,'Guesttrack count is :%s instead of expected 3 '%count
#check device count
dcount = Device.query.count()
assert 2 == dcount,'Device count is :%s instead of expected 2 '%count
def test_validate_track(session,client,register_testvalidateview):
#needs a fixture defined in conftest as its a decorator
trackid = str(uuid.uuid4())
mac = randomMAC()
#invalid track ID
status = client.get('/validate_track/%s'%trackid).status
assert '404 NOT FOUND' == status,'Status is :%s instead of 404 for invalid \
trackid'%status
#valid track but non-valid site
guesttrack = Guesttrack(trackid=trackid,devicemac=mac)
session.add(guesttrack)
session.commit()
status = client.get('/validate_track/%s'%trackid).status
assert '404 NOT FOUND' == status,'Status is :%s instead of 404 for invalid \
site'%status
#valid site but no device
site1 = Wifisite.query.get(1)
guesttrack.siteid = site1.id
session.commit
status = client.get('/validate_track/%s'%trackid).status
assert '404 NOT FOUND' == status,'Status is :%s instead of 404 for invalid \
device'%status
device = Device(devicemac=mac,siteid=site1.id)
session.add(device)
session.commit()
status = client.get('/validate_track/%s'%trackid).status
assert '200 OK' == status,'Status is :%s instead of 200 OK for valid \
track'%status
def test_redirect_guest(client,session):
site1 = Wifisite.query.get(1)
track = init_track(site1,guestmac=randomMAC(),apmac=randomMAC())
#nologin methods
with current_app.test_request_context():
resp = redirect_guest(site1,track)
url = get_guestauth_url(site1,track.trackid)
assert url == resp.location, 'Guest in no auth site is getting redirected to :%s instead of :%s'%\
(resp.location,url)
def test_assign_guest_entry(client,session):
#create dummy email and phone forms
class DummyForm1(Form):
email = TextField('Email')
firstname = TextField('Firstname')
extra1 = TextField('Extra1')
extra2 = TextField('Extra2')
class DummyForm2(Form):
phonenumber = TextField('Email')
firstname = TextField('Firstname')
extra1 = TextField('Extra1')
class DummyFBProfile():
first_name = None
last_name = None
email = None
gender = None
birthday = None
age_range = None
eform = DummyForm1()
eform.email.data = 'test@gmail.com'
eform.firstname.data = 'firstname'
eform.extra1.data = 'extra1'
eform.extra2.data = 'extra2'
pform = DummyForm2()
pform.phonenumber.data = '+1234567890'
pform.firstname.data = 'firstname'
pform.extra1.data = 'extra1'
profile = {
'first_name': 'first_name',
'last_name':'last_name',
'email': 'test23@gmail.com',
'age_range': { 'min': 21, 'max':28} }
site1 = Wifisite.query.get(1)
#test creating a new track
##-----test email form
track1 = init_track(site1,guestmac=randomMAC(),apmac=randomMAC())
track2 = init_track(site1,guestmac=randomMAC(),apmac=randomMAC())
guest1 = assign_guest_entry(site1,track1,form=eform)
guest1 = assign_guest_entry(site1,track2,form=eform)
cnt = Guest.query.count()
assert 1 == cnt, 'number of guest created is not 1 but :%s '%cnt
newguest = Guest.query.get(1)
assert newguest.details == {'Extra1':'extra1','Extra2':'extra2'}, 'Guest details is :%s insteads \
of expected :%s'%(newguest.details,{'Extra1':'extra1','Extra2':'extra2'})
assert newguest.siteid == site1.id, "Guest siteid is not correctly populated"
assert 1 == Guesttrack.query.get(1).loginstat.get('newguest'),\
'newguest is not set to 1 after new guest added'
assert None == Guesttrack.query.get(2).loginstat.get('newguest'),\
'newguest is not set to None after existing guest found'
##-----test phone form
track3 = init_track(site1,guestmac=randomMAC(),apmac=randomMAC())
track4 = init_track(site1,guestmac=randomMAC(),apmac=randomMAC())
guest2 = assign_guest_entry(site1,track3,form=pform)
guest2 = assign_guest_entry(site1,track4,form=pform)
cnt = Guest.query.count()
assert 2 == cnt, 'number of guest created is not 2 but :%s '%cnt
assert 1 == Guesttrack.query.get(3).loginstat.get('newguest'),\
'newguest is not set to 1 after new guest added'
assert None == Guesttrack.query.get(4).loginstat.get('newguest'),\
'newguest is not set to None after existing guest found'
##-----test FB profile
track5 = init_track(site1,guestmac=randomMAC(),apmac=randomMAC())
track6 = init_track(site1,guestmac=randomMAC(),apmac=randomMAC())
guest1 = assign_guest_entry(site1,track5,fbprofile=profile)
guest1 = assign_guest_entry(site1,track6,fbprofile=profile)
cnt = Guest.query.count()
assert 3 == cnt, 'number of guest created is not 3 but :%s '%cnt
newguest = Guest.query.get(3)
assert 'test23@gmail.com' == newguest.email,'Wrong email '
assert '21-28' == newguest.agerange, 'Wrong age range'
assert 1 == Guesttrack.query.get(5).loginstat.get('newguest'),\
'newguest is not set to 1 after new guest added'
assert None == Guesttrack.query.get(6).loginstat.get('newguest'),\
'newguest is not set to None after existing guest found'
def test_validate_loginauth_usage(client,session):
site1 = Wifisite.query.get(1)
apmac = randomMAC()
mac = randomMAC()
#test creating a new track
track = init_track(site1,guestmac=mac,apmac=apmac)
loginauth = Loginauth(siteid=site1.id,deviceid=track.deviceid)
loginauth.save()
#timenow for refference
utcnow = arrow.utcnow()
#create bunch of sessions
for i in range(10):
#wtih unused sessions
days = -(i+1)
session = Guestsession(siteid=site1.id,deviceid=track.deviceid,
loginauthid=loginauth.id)
session.starttime = utcnow.replace(days=days).naive
session.data_used = 50
session.duration = 20
session.save()
#fake login config
class Loginconfig:
def __init__(self,time_limit,data_limit):
self.time_limit = time_limit
self.data_limit = data_limit
#expired data
lconf = Loginconfig(100,50)
starttime = utcnow.replace(days=-2).naive
assert False == validate_loginauth_usage(site1,track,lconf,
loginauth,starttime),'Expired datalimit not returning false'
#expired time
lconf = Loginconfig(20,500)
starttime = utcnow.replace(days=-2).naive
assert False == validate_loginauth_usage(site1,track,lconf,
loginauth,starttime),'Expired timelimit not returning false'
#nonexpired
lconf = Loginconfig(200,500)
starttime = utcnow.replace(days=-2).naive
assert True == validate_loginauth_usage(site1,track,lconf,
loginauth,starttime),'Non limits not returning True'
chkauth = Loginauth.query.get(1)
assert int(chkauth.data_limit) == 400,'datlimit is :%s instead of expected 400'%\
chkauth.data_limit
assert int(chkauth.time_limit) == 160,'time_limit is :%s instead of expected 160'%\
chkauth.time_limit
#unlimited data and limited time not expired
lconf = Loginconfig(50,0)
starttime = utcnow.replace(days=-2).naive
assert True == validate_loginauth_usage(site1,track,lconf,
loginauth,starttime),'Non limits not returning True'
chkauth = Loginauth.query.get(1)
assert int(chkauth.data_limit) == 1000,'datlimit is :%s instead of expected 1000'%\
chkauth.data_limit
assert int(chkauth.time_limit) == 10,'time_limit is :%s instead of expected 10'%\
chkauth.time_limit
#unlimited data and limited time expired
lconf = Loginconfig(30,0)
starttime = utcnow.replace(days=-2).naive
assert False == validate_loginauth_usage(site1,track,lconf,
loginauth,starttime),'Non limits not returning True'
#unlimited time and limited data not expired
lconf = Loginconfig(0,300)
starttime = utcnow.replace(days=-2).naive
assert True == validate_loginauth_usage(site1,track,lconf,
loginauth,starttime),'Non limits not returning True'
chkauth = Loginauth.query.get(1)
assert int(chkauth.data_limit) == 200,'datlimit is :%s instead of expected 200'%\
chkauth.data_limit
assert int(chkauth.time_limit) == 480,'time_limit is :%s instead of expected 480'%\
chkauth.time_limit
#unlimited time and limited data expired
lconf = Loginconfig(0,30)
starttime = utcnow.replace(days=-2).naive
assert False == validate_loginauth_usage(site1,track,lconf,
loginauth,starttime),'Non limits not returning True'
|
agpl-3.0
| -1,459,336,541,446,399,200
| 39.241758
| 146
| 0.640419
| false
| 3.644658
| true
| false
| false
|
giovannipro/map-the-glam
|
scraper/scrape/scrape_image_size-analogic.py
|
1
|
9603
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Get data from file page
import os # get file path
import webbrowser # open webpages
import time # get unix code
import datetime # convert in unix timestamp
import urllib, json, io # read json
from urllib import urlopen # open file
import sys # reset file encoding
import datetime # print time
import csv # read csv
import re # replace all occurrences
import pprint # pretty print
from bs4 import BeautifulSoup # parse html
from multiprocessing import Pool
# from multiprocessing import Process
reload(sys)
sys.setdefaultencoding("utf-8")
# -----------------------------------
# Utilities
folder = os.path.dirname(os.path.realpath(__file__))
t = "\t"
n = "\n"
s = " "
commons_page = "https://commons.wikimedia.org/wiki/"
def time():
my_format = "%d %m %Y %I:%M%p"
ts = datetime.datetime.utcnow().strftime(my_format)
print(ts)
def clean_url_a(title):
replace_01 = "?"
replace_02 = "&"
replace_03 = "ä"
replace_04 = "ö"
replace_06 = "("
replace_07 = ")"
replace_08 = ","
replace_10 = "…"
replace_11 = " "
replace_12 = "å"
replace_13 = "ü"
replace_14 = ","
replace_15 = "á"
replace_16 = '"'
replace_17 = '?'
# replace_09 = "-"
clean = title \
.replace(replace_01,"%3f") \
.replace(replace_02,"%26") \
.replace(replace_03,"%e4") \
.replace(replace_04,"%f6") \
.replace(replace_06,"%28") \
.replace(replace_07,"%29") \
.replace(replace_08,"%2c") \
.replace(replace_10,"%20") \
.replace(replace_11,"_") \
.replace(replace_12,"%e5") \
.replace(replace_13,"%fc") \
.replace(replace_14,"%2c") \
.replace(replace_15,"%e1") \
.replace(replace_16,"%22") \
.replace(replace_17,"%3f")
# .replace(replace_05,"%fc")
# .replace(replace_09,"%2d") \
return clean
def clean_url_b(title):
replace_01 = "å"
replace_02 = "é"
replace_03 = "ô"
replace_04 = "è"
replace_05 = "_"
replace_06 = " "
replace_07 = '?'
replace_08 = '&'
clean = title \
.replace(replace_01,"ä") \
.replace(replace_02,"%e9") \
.replace(replace_03,"%f4") \
.replace(replace_04,"%e8") \
.replace(replace_05,"_") \
.replace(replace_06,"_") \
.replace(replace_07,"%3f") \
.replace(replace_07,"%26")
return clean
# -----------------------------------
# Script
def get_img_size_analogic(f_name,start_id):
# start = time.time()
# print(start)
func = "img_size_analogic"
index = 0
f_in = folder + "/data/" + f_name + ".tsv"
f_out = folder + "/data/" + f_name + "_" + func + "-output.tsv"
f_err = folder + "/data/" + f_name + "_" + func + "-errors.tsv"
with open(f_in, "r") as f1:
with open(f_out, "a") as f2:
with open(f_err, "a") as f3:
tsv_file = csv.reader(f1, delimiter="\t")
for file in tsv_file:
index += 1
file_id = file[0]
file_name = file[1]
# print(file_name)
if (index >= start_id):
try:
url = commons_page + file_name
html = urlopen(url)
bsObj = BeautifulSoup(html,"html.parser")
print(file_id)
with open(f_out, "a") as f:
try:
raw_data = bsObj.find("div",{"class":"commons-file-information-table"})
output = str(file_id) + t + file_name + t
f2.write(output)
f2.write("-" + t)
# try:
# value_1 = raw_data.findAll("tr")[1].findAll("td")[1].get_text().replace(n,s)
# #.split(s)[2] # h = dimension.split(s)[0]
# # print(value_1)
# f2.write(value_1 + t)
# except Exception as e:
# output = str(file_id) + t + commons_page+file_name + t + "error 3.1"
# # print(output + str(e))
# f2.write("-" + t)
# f3.write(output + n)
# pass
f2.write("-" + t)
# try:
# value_2 = raw_data.findAll("tr")[2].findAll("td")[1].get_text().replace(n,s)
# #.split(s)[2] # h = dimension.split(s)[0]
# # print(value_2)
# f3.write(value_2 + t)
# except Exception as e:
# output = str(file_id) + t + commons_page+file_name + t + "error 3.2"
# # print(output + str(e))
# f2.write("-" + t)
# f3.write(output + n)
# pass
try:
value_3 = raw_data.findAll("tr")[3].findAll("td")[1].get_text().replace(n,s)
#.split(s)[2] # h = dimension.split(s)[0]
# print(value_3)
f2.write(value_3 + t)
except Exception as e:
output = str(file_id) + t + commons_page+file_name + t + "error 3.3"
# print(output + str(e))
f2.write("-" + t)
f3.write(output + n)
try:
value_4 = raw_data.findAll("tr")[4].findAll("td")[1].get_text().replace(n,s)
#.split(s)[2] # h = dimension.split(s)[0]
# print(value_4)
f2.write(value_4 + t)
except Exception as e:
output = str(file_id) + t + commons_page+file_name + t + "error 3.4"
# print(output + str(e))
f2.write("-" + t)
f3.write(output + n)
pass
try:
value_5 = raw_data.findAll("tr")[5].findAll("td")[1].get_text().replace(n,s)
#.split(s)[2] # h = dimension.split(s)[0]
# print(value_5)
f2.write(value_5 + t)
except Exception as e:
output = str(file_id) + t + commons_page+file_name + t + "error 3.5"
# print(output + str(e))
f2.write("-" + t)
f3.write(output + n)
pass
f2.write("-" + t)
# try:
# value_6 = raw_data.findAll("tr")[6].findAll("td")[1].get_text().replace(n,s)
# #.split(s)[2] # h = dimension.split(s)[0]
# # print(value_6)
# f2.write(value_6 + t)
# except Exception as e:
# output = str(file_id) + t + commons_page+file_name + t + "error 3.6"
# # print(output + str(e))
# f2.write("-" + t)
# f3.write(output + n)
f2.write(n)
except Exception as e:
output = str(file_id) + t + commons_page+file_name + t + "error 2"
print(e)
f3.write(output + n)
pass
except Exception as e:
output = str(file_id) + t + commons_page+file_name + t + "error 1"
print(e)
f3.write(output + n)
pass
# end = time()
# running_time = end - start
# print (running_time)
def get_medium(f_name,start_id):
# start = time.time()
# print(start)
func = "medium"
index = 0
print(func)
f_in = folder + "/data/" + f_name + ".tsv"
f_out = folder + "/data/" + f_name + "_" + func + "-output.tsv"
f_err = folder + "/data/" + f_name + "_" + func + "-errors.tsv"
with open(f_in, "r") as f1:
with open(f_out, "a") as f2:
with open(f_err, "a") as f3:
tsv_file = csv.reader(f1, delimiter="\t")
for file in tsv_file:
index += 1
file_id = file[0]
file_name = file[1]
# print(file_name)
if (index >= start_id):
try:
url = commons_page + file_name
html = urlopen(url)
bsObj = BeautifulSoup(html,"html.parser")
print(file_id)
with open(f_out, "a") as f:
try:
raw_data = bsObj.find("div",{"class":"commons-file-information-table"})
output = str(file_id) + t + file_name + t
# print(output)
f2.write(output)
except Exception as e:
output = str(file_id) + t + commons_page + file_name + t + "error_1"
f3.write(output)
pass
try:
row = raw_data.findAll("tr")[1]
val = row.findAll("td")[0].get_text().replace(n,s)
value_1 = row.findAll("td")[1].get_text().replace(n,s)
if val == "Medium":
output = val + t + value_1
print(value_1)
f2.write(output + n)
except Exception as e:
pass
try:
row = raw_data.findAll("tr")[2]
val = row.findAll("td")[0].get_text().replace(n,s)
value_2 = row.findAll("td")[1].get_text().replace(n,s)
if val == "Medium":
output = val + t + value_2
print(value_2)
f2.write(output + n)
except Exception as e:
pass
try:
row = raw_data.findAll("tr")[3]
val = row.findAll("td")[0].get_text().replace(n,s)
value_3 = row.findAll("td")[1].get_text().replace(n,s)
if val == "Medium":
output = val + t + value_3
print(value_3)
f2.write(output + n)
except Exception as e:
pass
try:
row = raw_data.findAll("tr")[4]
val = row.findAll("td")[0].get_text().replace(n,s)
value_4 = row.findAll("td")[1].get_text().replace(n,s)
if val == "Medium":
output = val + t + value_4
print(value_4)
f2.write(output + n)
except Exception as e:
pass
try:
row = raw_data.findAll("tr")[5]
val = row.findAll("td")[0].get_text().replace(n,s)
value_5 = row.findAll("td")[1].get_text().replace(n,s)
if val == "Medium":
output = val + t + value_5
print(value_5)
f2.write(output + n)
except Exception as e:
pass
except Exception as e:
output = str(file_id) + t + commons_page + file_name + t + "error 1"
print(e)
# f3.write(output + n)
pass
# end = time()
# running_time = end - start
# print (running_time)
# -----------------------------------
# Launch scripts
# get_img_size_analogic("test",48766);
get_medium("file_medium",0);
|
gpl-3.0
| -1,088,663,877,356,968,000
| 26.09322
| 88
| 0.512043
| false
| 2.703975
| false
| false
| false
|
tom-mi/pyrad
|
pyrad/server.py
|
1
|
9027
|
# server.py
#
# Copyright 2003-2004,2007 Wichert Akkerman <wichert@wiggy.net>
import select
import socket
from pyrad import host
from pyrad import packet
import logging
logger = logging.getLogger('pyrad')
class RemoteHost:
"""Remote RADIUS capable host we can talk to.
"""
def __init__(self, address, secret, name, authport=1812, acctport=1813):
"""Constructor.
:param address: IP address
:type address: string
:param secret: RADIUS secret
:type secret: string
:param name: short name (used for logging only)
:type name: string
:param authport: port used for authentication packets
:type authport: integer
:param acctport: port used for accounting packets
:type acctport: integer
"""
self.address = address
self.secret = secret
self.authport = authport
self.acctport = acctport
self.name = name
class ServerPacketError(Exception):
"""Exception class for bogus packets.
ServerPacketError exceptions are only used inside the Server class to
abort processing of a packet.
"""
class Server(host.Host):
"""Basic RADIUS server.
This class implements the basics of a RADIUS server. It takes care
of the details of receiving and decoding requests; processing of
the requests should be done by overloading the appropriate methods
in derived classes.
:ivar hosts: hosts who are allowed to talk to us
:type hosts: dictionary of Host class instances
:ivar _poll: poll object for network sockets
:type _poll: select.poll class instance
:ivar _fdmap: map of filedescriptors to network sockets
:type _fdmap: dictionary
:cvar MaxPacketSize: maximum size of a RADIUS packet
:type MaxPacketSize: integer
"""
MaxPacketSize = 8192
def __init__(self, addresses=[], authport=1812, acctport=1813, hosts=None,
dict=None, auto_crypt=False):
"""Constructor.
:param addresses: IP addresses to listen on
:type addresses: sequence of strings
:param authport: port to listen on for authentication packets
:type authport: integer
:param acctport: port to listen on for accounting packets
:type acctport: integer
:param hosts: hosts who we can talk to
:type hosts: dictionary mapping IP to RemoteHost class instances
:param dict: RADIUS dictionary to use
:type dict: Dictionary class instance
"""
host.Host.__init__(self, authport, acctport, dict)
if hosts is None:
self.hosts = {}
else:
self.hosts = hosts
self.authfds = []
self.acctfds = []
for addr in addresses:
self.BindToAddress(addr)
self.auto_crypt = auto_crypt
self.running = True
def BindToAddress(self, addr):
"""Add an address to listen to.
An empty string indicated you want to listen on all addresses.
:param addr: IP address to listen on
:type addr: string
"""
authfd = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
authfd.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
authfd.bind((addr, self.authport))
acctfd = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
acctfd.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
acctfd.bind((addr, self.acctport))
self.authfds.append(authfd)
self.acctfds.append(acctfd)
def HandleAuthPacket(self, pkt):
"""Authentication packet handler.
This is an empty function that is called when a valid
authentication packet has been received. It can be overriden in
derived classes to add custom behaviour.
:param pkt: packet to process
:type pkt: Packet class instance
"""
def HandleAcctPacket(self, pkt):
"""Accounting packet handler.
This is an empty function that is called when a valid
accounting packet has been received. It can be overriden in
derived classes to add custom behaviour.
:param pkt: packet to process
:type pkt: Packet class instance
"""
def _HandleAuthPacket(self, pkt):
"""Process a packet received on the authentication port.
If this packet should be dropped instead of processed a
ServerPacketError exception should be raised. The main loop will
drop the packet and log the reason.
:param pkt: packet to process
:type pkt: Packet class instance
"""
if pkt.source[0] not in self.hosts:
msg = 'Received packet from unknown host ({})'.format(pkt.source[0])
raise ServerPacketError(msg)
pkt.secret = self.hosts[pkt.source[0]].secret
if pkt.code != packet.AccessRequest:
raise ServerPacketError(
'Received non-authentication packet on authentication port')
self.HandleAuthPacket(pkt)
def _HandleAcctPacket(self, pkt):
"""Process a packet received on the accounting port.
If this packet should be dropped instead of processed a
ServerPacketError exception should be raised. The main loop will
drop the packet and log the reason.
:param pkt: packet to process
:type pkt: Packet class instance
"""
if pkt.source[0] not in self.hosts:
raise ServerPacketError('Received packet from unknown host')
pkt.secret = self.hosts[pkt.source[0]].secret
if not pkt.code in [packet.AccountingRequest,
packet.AccountingResponse]:
raise ServerPacketError(
'Received non-accounting packet on accounting port')
self.HandleAcctPacket(pkt)
def _GrabPacket(self, pktgen, fd):
"""Read a packet from a network connection.
This method assumes there is data waiting for to be read.
:param fd: socket to read packet from
:type fd: socket class instance
:return: RADIUS packet
:rtype: Packet class instance
"""
(data, source) = fd.recvfrom(self.MaxPacketSize)
pkt = pktgen(data)
pkt.source = source
pkt.fd = fd
return pkt
def _PrepareSockets(self):
"""Prepare all sockets to receive packets.
"""
for fd in self.authfds + self.acctfds:
self._fdmap[fd.fileno()] = fd
self._poll.register(fd.fileno(),
select.POLLIN | select.POLLPRI | select.POLLERR)
self._realauthfds = list(map(lambda x: x.fileno(), self.authfds))
self._realacctfds = list(map(lambda x: x.fileno(), self.acctfds))
def CreateReplyPacket(self, pkt, **attributes):
"""Create a reply packet.
Create a new packet which can be returned as a reply to a received
packet.
:param pkt: original packet
:type pkt: Packet instance
"""
reply = pkt.CreateReply(**attributes)
reply.source = pkt.source
return reply
def _ProcessInput(self, fd):
"""Process available data.
If this packet should be dropped instead of processed a
PacketError exception should be raised. The main loop will
drop the packet and log the reason.
This function calls either HandleAuthPacket() or
HandleAcctPacket() depending on which socket is being
processed.
:param fd: socket to read packet from
:type fd: socket class instance
"""
if fd.fileno() in self._realauthfds:
pkt = self._GrabPacket(lambda data, s=self:
s.CreateAuthPacket(packet=data,
auto_crypt=self.auto_crypt), fd)
self._HandleAuthPacket(pkt)
else:
pkt = self._GrabPacket(lambda data, s=self:
s.CreateAcctPacket(packet=data), fd)
self._HandleAcctPacket(pkt)
def Stop(self):
self.running = False
def Run(self):
"""Main loop.
This method is the main loop for a RADIUS server. It waits
for packets to arrive via the network and calls other methods
to process them.
"""
self._poll = select.poll()
self._fdmap = {}
self._PrepareSockets()
while self.running:
for (fd, event) in self._poll.poll(10):
if event == select.POLLIN:
try:
fdo = self._fdmap[fd]
self._ProcessInput(fdo)
except ServerPacketError as err:
logger.info('Dropping packet: ' + str(err))
except packet.PacketError as err:
logger.info('Received a broken packet: ' + str(err))
else:
logger.error('Unexpected event in server main loop')
|
bsd-3-clause
| -1,549,894,543,043,070,200
| 34.4
| 80
| 0.611056
| false
| 4.403415
| false
| false
| false
|
unioslo/cerebrum
|
contrib/exchange/exchange_group_state_verification.py
|
1
|
22909
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2014-2018 University of Oslo, Norway
#
# This file is part of Cerebrum.
#
# Cerebrum is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Cerebrum is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Cerebrum; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Script that checks the state of dist.groups between Cerebrum and Exchange.
This is done by:
- Pulling out all related attributes from Exchange, via LDAP.
- Pulling out all related information from Cerebrum, via API.
- Compare the two above.
- Send a report by mail/file.
"""
import argparse
import itertools
import logging
import pickle
import time
import ldap
from six import text_type
import cereconf
import eventconf
import Cerebrum.logutils
import Cerebrum.logutils.options
from Cerebrum import Utils
from Cerebrum.Utils import Factory
from Cerebrum.Utils import read_password
from Cerebrum.modules.Email import EmailAddress
from Cerebrum.modules.exchange.CerebrumUtils import CerebrumUtils
from Cerebrum.utils.email import sendmail
from Cerebrum.utils.ldaputils import decode_attrs
logger = logging.getLogger(__name__)
def text_decoder(encoding, allow_none=True):
def to_text(value):
if allow_none and value is None:
return None
if isinstance(value, bytes):
return value.decode(encoding)
return text_type(value)
return to_text
class StateChecker(object):
"""Wrapper class for state-checking functions.
The StateChecker class wraps all the functions we need in order to
verify and report deviances between Cerebrum and Exchange.
"""
# Connect params
LDAP_RETRY_DELAY = 60
LDAP_RETRY_MAX = 5
# Search and result params
LDAP_COM_DELAY = 30
LDAP_COM_MAX = 3
def __init__(self, conf):
"""Initzialize a new instance of out state-checker.
:param logger logger: The logger to use.
:param dict conf: Our StateCheckers configuration.
"""
self.db = Factory.get('Database')(client_encoding='UTF-8')
self.co = Factory.get('Constants')(self.db)
self.dg = Factory.get('DistributionGroup')(self.db)
self.ac = Factory.get('Account')(self.db)
self.gr = Factory.get('Group')(self.db)
self.et = Factory.get('EmailTarget')(self.db)
self.ea = EmailAddress(self.db)
self.ut = CerebrumUtils()
self.config = conf
self._ldap_page_size = 1000
def u(self, db_value):
""" Decode bytestring from database. """
if isinstance(db_value, bytes):
return db_value.decode(self.db.encoding)
return text_type(db_value)
def init_ldap(self):
"""Initzialize LDAP connection."""
self.ldap_srv = ldap.ldapobject.ReconnectLDAPObject(
'%s://%s/' % (self.config['ldap_proto'],
self.config['ldap_server']),
retry_max=self.LDAP_RETRY_MAX,
retry_delay=self.LDAP_RETRY_DELAY)
usr = self.config['ldap_user'].split('\\')[1]
self.ldap_srv.bind_s(
self.config['ldap_user'],
read_password(usr, self.config['ldap_server']))
self.ldap_lc = ldap.controls.SimplePagedResultsControl(
True, self._ldap_page_size, '')
def _searcher(self, ou, scope, attrs, ctrls):
""" Perform ldap.search(), but retry in the event of an error.
This wraps the search with error handling, so that the search is
repeated with a delay between attempts.
"""
for attempt in itertools.count(1):
try:
return self.ldap_srv.search_ext(
ou, scope, attrlist=attrs, serverctrls=ctrls)
except ldap.LDAPError as e:
if attempt < self.LDAP_COM_MAX:
logger.debug('Caught %r in _searcher on attempt %d',
e, attempt)
time.sleep(self.LDAP_COM_DELAY)
continue
raise
def _recvr(self, msgid):
""" Perform ldap.result3(), but retry in the event of an error.
This wraps the result fetching with error handling, so that the fetch
is repeated with a delay between attempts.
It also decodes all attributes and attribute text values.
"""
for attempt in itertools.count(1):
try:
# return self.ldap_srv.result3(msgid)
rtype, rdata, rmsgid, sc = self.ldap_srv.result3(msgid)
return rtype, decode_attrs(rdata), rmsgid, sc
except ldap.LDAPError as e:
if attempt < self.LDAP_COM_MAX:
logger.debug('Caught %r in _recvr on attempt %d',
e, attempt)
time.sleep(self.LDAP_COM_DELAY)
continue
raise
# This is a paging searcher, that should be used for large amounts of data
def search(self, ou, attrs, scope=ldap.SCOPE_SUBTREE):
"""Wrapper for the search- and result-calls.
Implements paged searching.
:param str ou: The OU to search in.
:param list attrs: The attributes to fetch.
:param int scope: Our search scope, default is subtree.
:rtype: list
:return: List of objects.
"""
# Implementing paging, taken from
# http://www.novell.com/coolsolutions/tip/18274.html
msgid = self._searcher(ou, scope, attrs, [self.ldap_lc])
data = []
ctrltype = ldap.controls.SimplePagedResultsControl.controlType
while True:
time.sleep(1)
rtype, rdata, rmsgid, sc = self._recvr(msgid)
data.extend(rdata)
pctrls = [c for c in sc if c.controlType == ctrltype]
if pctrls:
cookie = pctrls[0].cookie
if cookie:
self.ldap_lc.cookie = cookie
time.sleep(1)
msgid = self._searcher(ou, scope, attrs, [self.ldap_lc])
else:
break
else:
logger.warn('Server ignores RFC 2696 control.')
break
# Skip the OU itself, only return objects in the OU
return data[1:]
# This search wrapper should be used for fetching members
def member_searcher(self, dn, scope, attrs):
"""Utility method for searching for group members.
:param str dn: The groups distinguished name.
:param int scope: Which scope to search by, should be BASE.
:param list attrs: A list of attributes to fetch.
:rtype: tuple
:return: The return-type and the result.
"""
# Wrapping the search, try three times
for attempt in itertools.count(1):
try:
# Search
msgid = self.ldap_srv.search(dn, scope, attrlist=attrs)
# Fetch
rtype, r = self.ldap_srv.result(msgid)
return rtype, r
except ldap.LDAPError as e:
if attempt < self.LDAP_COM_MAX:
logger.debug('Caught %r in member_searcher on attempt %d',
e, attempt)
time.sleep(self.LDAP_COM_DELAY)
continue
raise
# We need to implement a special function to pull out all the members from
# a group, since the idiots at M$ forces us to select a range...
# Fucking asswipes will burn in hell.
def collect_members(self, dn):
"""Fetch a groups members.
This method picks out members in slices, since AD LDAP won't give us
more than 1500 users at a time. If the range-part of the attribute name
ends with a star, we know that we need to look for more members...
:param str dn: The groups distinguished name.
:rtype: list
:return: A list of the members.
"""
# We are searching trough a range. 0 is the start point.
low = str(0)
members = []
end = False
while not end:
# * means that we search for as many attributes as possible, from
# the start point defined by the low-param
attr = ['member;range=%s-*' % low]
# Search'n fetch
time.sleep(1) # Be polite
rtype, r = self.member_searcher(dn, ldap.SCOPE_BASE, attr)
# If this shit hits, no members exists. Break of.
if not r[0][1]:
end = True
break
# Dig out the data
r = r[0][1]
# Extract key
key = r.keys()[0]
# Store members
members.extend(r[key])
# If so, we have reached the end of the range
# (i.e. key is 'member;range=7500-*')
if '*' in key:
end = True
# Extract the new start point from the key
# (i.e. key is 'member;range=0-1499')
else:
low = str(int(key.split('-')[-1]) + 1)
return members
def close(self):
"""Close the connection to the LDAP server."""
self.ldap_srv.unbind_s()
###
# Group related fetching & comparison
###
def collect_exchange_group_info(self, group_ou):
"""Collect group-information from Exchange, via LDAP.
:param str group_ou: The OrganizationalUnit to search for groups.
:rtype: dict
:return: A dict with the group attributes. The key is the group name.
"""
attrs = ['displayName',
'info',
'proxyAddresses',
'msExchHideFromAddressLists']
r = self.search(group_ou, attrs)
ret = {}
for cn, data in r:
tmp = {}
name = cn[3:].split(',')[0]
for key in data:
if key == 'info':
tmp[u'Description'] = data[key][0]
elif key == 'displayName':
tmp[u'DisplayName'] = data[key][0]
elif key == 'proxyAddresses':
addrs = []
for addr in data[key]:
if addr.startswith('SMTP:'):
tmp[u'Primary'] = addr[5:]
# TODO: Correct var?
if (cereconf.EXCHANGE_DEFAULT_ADDRESS_PLACEHOLDER not
in addr):
addrs.append(addr[5:])
tmp[u'Aliases'] = sorted(addrs)
elif key == 'managedBy':
tmp_man = data[key][0][3:].split(',')[0]
if tmp_man == 'Default group moderator':
tmp_man = u'groupadmin'
tmp[u'ManagedBy'] = [tmp_man]
# Skip reporting memberships for roomlists, since we don't manage
# those memberships.
# TODO: Generalize this
if name.startswith('rom-'):
tmp['Members'] = []
else:
# Pulling 'em out the logical way... S..
tmp['Members'] = sorted([m[3:].split(',')[0] for m in
self.collect_members(cn)])
# Non-existent attribute means that the value is false. Fuckers.
if 'msExchHideFromAddressLists' in data:
tmp_key = 'msExchHideFromAddressLists'
tmp[u'HiddenFromAddressListsEnabled'] = (
True if data[tmp_key][0] == 'TRUE' else False)
else:
tmp[u'HiddenFromAddressListsEnabled'] = False
ret[name] = tmp
return ret
def collect_cerebrum_group_info(self, mb_spread, ad_spread):
"""Collect distgroup related information from Cerebrum.
:param int/str mb_spread: Spread of mailboxes in exchange.
:param int/str ad_spread: Spread of accounts in AD.
:rtype: dict
:return: A dict of users attributes. Uname is key.
"""
mb_spread = self.co.Spread(mb_spread)
ad_spread = self.co.Spread(ad_spread)
u = text_decoder(self.db.encoding)
def _true_or_false(val):
# Yes, we know...
if val == 'T':
return True
elif val == 'F':
return False
else:
return None
tmp = {}
for dg in self.dg.list_distribution_groups():
self.dg.clear()
self.dg.find(dg['group_id'])
roomlist = _true_or_false(self.dg.roomlist)
data = self.dg.get_distgroup_attributes_and_targetdata(
roomlist=roomlist)
tmp[u(self.dg.group_name)] = {
u'Description': u(self.dg.description),
u'DisplayName': u(data['displayname']),
}
if not roomlist:
# Split up the moderated by field, and resolve group members
# from groups if there are groups in the moderated by field!
tmp[u(self.dg.group_name)].update({
u'HiddenFromAddressListsEnabled':
_true_or_false(data['hidden']),
u'Primary': u(data['primary']),
u'Aliases': [u(v) for v in sorted(data['aliases'])]
})
# Collect members
membs_unfiltered = self.ut.get_group_members(
self.dg.entity_id,
spread=mb_spread,
filter_spread=ad_spread
)
members = [u(member['name']) for member in membs_unfiltered]
tmp[u(self.dg.group_name)].update({u'Members': sorted(members)})
return tmp
def compare_group_state(self, ex_group_info, cere_group_info, state,
config):
"""Compare the information fetched from Cerebrum and Exchange.
This method produces a dict with the state between the systems,
and a report that will be sent to the appropriate target system
administrators.
:param dict ex_state: The state in Exchange.
:param dict ce_state: The state in Cerebrum.
:param dict state: The previous state generated by this method.
:param dict config: Configuration of reporting delays for various
attributes.
:rtype: tuple
:return: A tuple consisting of the new difference-state and a
human-readable report of differences.
"""
s_ce_keys = set(cere_group_info.keys())
s_ex_keys = set(ex_group_info.keys())
diff_group = {}
diff_stale = {}
diff_new = {}
##
# Populate some structures with information we need
# Groups in Exchange, but not in Cerebrum
stale_keys = list(s_ex_keys - s_ce_keys)
for ident in stale_keys:
if state and ident in state['stale_group']:
diff_stale[ident] = state['stale_group'][ident]
else:
diff_stale[ident] = time.time()
# Groups in Cerebrum, but not in Exchange
new_keys = list(s_ce_keys - s_ex_keys)
for ident in new_keys:
if state and ident in state['new_group']:
diff_new[ident] = state['new_group'][ident]
else:
diff_new[ident] = time.time()
# Check groups that exists in both Cerebrum and Exchange for
# difference (& is union, in case you wondered). If an attribute is
# not in it's desired state in both this and the last run, save the
# timestamp from the last run. This is used for calculating when we
# nag to someone about stuff not beeing in sync.
for key in s_ex_keys & s_ce_keys:
for attr in cere_group_info[key]:
tmp = {}
if state and key in state['group'] and \
attr in state['group'][key]:
t_0 = state['group'][key][attr][u'Time']
else:
t_0 = time.time()
if attr not in ex_group_info[key]:
tmp = {
u'Exchange': None,
u'Cerebrum': cere_group_info[key][attr],
u'Time': t_0
}
elif cere_group_info[key][attr] != ex_group_info[key][attr]:
tmp = {
u'Exchange': ex_group_info[key][attr],
u'Cerebrum': cere_group_info[key][attr],
u'Time': t_0
}
if tmp:
diff_group.setdefault(key, {})[attr] = tmp
ret = {
'new_group': diff_new,
'stale_group': diff_stale,
'group': diff_group,
}
if not state:
return ret, []
now = time.time()
# By now, we have three different dicts. Loop trough them and check if
# we should report 'em
report = ['\n\n# Group Attribute Since Cerebrum_value:Exchange_value']
# Report attribute mismatches for groups
for key in diff_group:
for attr in diff_group[key]:
delta = (config.get(attr) if attr in config else
config.get('UndefinedAttribute'))
if diff_group[key][attr][u'Time'] < now - delta:
t = time.strftime(u'%d%m%Y-%H:%M', time.localtime(
diff_group[key][attr][u'Time']))
if attr in (u'Aliases', u'Members',):
# We report the difference for these types, for
# redability
s_ce_attr = set(diff_group[key][attr][u'Cerebrum'])
try:
s_ex_attr = set(diff_group[key][attr][u'Exchange'])
except TypeError:
s_ex_attr = set([])
new_attr = list(s_ce_attr - s_ex_attr)
stale_attr = list(s_ex_attr - s_ce_attr)
if new_attr == stale_attr:
continue
tmp = u'%-10s %-30s %s +%s:-%s' % (key, attr, t,
str(new_attr),
str(stale_attr))
else:
tmp = u'%-10s %-30s %s %s:%s' % (
key, attr, t,
repr(diff_group[key][attr][u'Cerebrum']),
repr(diff_group[key][attr][u'Exchange']))
report += [tmp]
# Report uncreated groups
report += ['\n# Uncreated groups (uname, time)']
attr = 'UncreatedGroup'
delta = (config.get(attr) if attr in config else
config.get('UndefinedAttribute'))
for key in diff_new:
if diff_new[key] < now - delta:
t = time.strftime(u'%d%m%Y-%H:%M', time.localtime(
diff_new[key]))
report += [u'%-10s uncreated_group %s' % (key, t)]
# Report stale groups
report += ['\n# Stale groups (uname, time)']
attr = 'StaleGroup'
delta = (config.get(attr) if attr in config else
config.get('UndefinedAttribute'))
for key in diff_stale:
t = time.strftime(u'%d%m%Y-%H:%M', time.localtime(
diff_stale[key]))
if diff_stale[key] < now - delta:
report += [u'%-10s stale_group %s' % (key, t)]
return ret, report
def eventconf_type(value):
try:
return eventconf.CONFIG[value]
except KeyError as e:
raise ValueError(e)
def main(inargs=None):
parser = argparse.ArgumentParser()
parser.add_argument(
'-t', '--type',
dest='config',
type=eventconf_type,
required=True,
help="Sync type (a valid entry in eventconf.CONFIG)")
parser.add_argument(
'-f', '--file',
dest='state',
required=True,
help="read and write state to %(metavar)s")
parser.add_argument(
'-m', '--mail',
help="Send reports to %(metavar)s")
parser.add_argument(
'-s', '--sender',
help="Send reports from %(metavar)s")
parser.add_argument(
'-r', '--report-file',
dest='report',
help="Write the report to %(metavar)s")
Cerebrum.logutils.options.install_subparser(parser)
args = parser.parse_args(inargs)
if bool(args.mail) ^ bool(args.sender):
raise ValueError("Must give both mail and sender")
Cerebrum.logutils.autoconf('cronjob', args)
attr_config = args.config['state_check_conf']
group_ou = args.config['group_ou']
try:
with open(args.state, 'r') as f:
state = pickle.load(f)
except IOError:
logger.warn('No existing state file %s', args.state)
state = None
sc = StateChecker(args.config)
# Collect group info from Cerebrum and Exchange
sc.init_ldap()
ex_group_info = sc.collect_exchange_group_info(group_ou)
sc.close()
cere_group_info = sc.collect_cerebrum_group_info(
args.config['mailbox_spread'],
args.config['ad_spread'])
# Compare group state
new_state, report = sc.compare_group_state(ex_group_info,
cere_group_info,
state,
attr_config)
try:
rep = u'\n'.join(report)
except UnicodeError as e:
logger.warn('Bytestring data in report: %r', e)
tmp = []
for x in report:
tmp.append(x.decode('UTF-8'))
rep = u'\n'.join(tmp)
# Send a report by mail
if args.mail and args.sender:
sendmail(args.mail, args.sender,
'Exchange group state report',
rep.encode('utf-8'))
# Write report to file
if args.report:
with open(args.report, 'w') as f:
f.write(rep.encode('utf-8'))
with open(args.state, 'w') as f:
pickle.dump(new_state, f)
if __name__ == '__main__':
main()
|
gpl-2.0
| 7,390,959,573,923,553,000
| 35.713141
| 79
| 0.533677
| false
| 4.118102
| true
| false
| false
|
domeger/SplunkTAforPuppetEnterprise
|
bin/puppet_enterprise_metrics.py
|
1
|
4586
|
import splunktaforpuppetenterprise_declare
import os
import sys
import time
import datetime
import json
import modinput_wrapper.base_modinput
from solnlib.packages.splunklib import modularinput as smi
import input_module_puppet_enterprise_metrics as input_module
bin_dir = os.path.basename(__file__)
'''
Do not edit this file!!!
This file is generated by Add-on builder automatically.
Add your modular input logic to file input_module_puppet_enterprise_metrics.py
'''
class ModInputpuppet_enterprise_metrics(modinput_wrapper.base_modinput.BaseModInput):
def __init__(self):
if 'use_single_instance_mode' in dir(input_module):
use_single_instance = input_module.use_single_instance_mode()
else:
use_single_instance = False
super(ModInputpuppet_enterprise_metrics, self).__init__("splunktaforpuppetenterprise", "puppet_enterprise_metrics", use_single_instance)
self.global_checkbox_fields = None
def get_scheme(self):
"""overloaded splunklib modularinput method"""
scheme = super(ModInputpuppet_enterprise_metrics, self).get_scheme()
scheme.title = ("Puppet Enterprise Metrics")
scheme.description = ("Go to the add-on\'s configuration UI and configure modular inputs under the Inputs menu.")
scheme.use_external_validation = True
scheme.streaming_mode_xml = True
scheme.add_argument(smi.Argument("name", title="Name",
description="",
required_on_create=True))
"""
For customized inputs, hard code the arguments here to hide argument detail from users.
For other input types, arguments should be get from input_module. Defining new input types could be easier.
"""
scheme.add_argument(smi.Argument("token_", title="Token:",
description="curl -k -X POST -H \'Content-Type: application/json\' -d \'{\"login\": \"\", \"password\": \"\",\"lifetime\": \"9y\" }\' https://$:4433/rbac-api/v1/auth/token",
required_on_create=True,
required_on_edit=False))
scheme.add_argument(smi.Argument("puppet_enterprise_server_", title="Puppet Enterprise Server:",
description="Put in your FQDN of your Puppet Enterprise Server so the links backs on the dashboards work correctly.",
required_on_create=False,
required_on_edit=False))
scheme.add_argument(smi.Argument("server_", title="Server:",
description="Input your Puppet Enterprise Server address.",
required_on_create=True,
required_on_edit=False))
scheme.add_argument(smi.Argument("port_", title="Port:",
description="Input your Puppet Enterprise DB Port (HTTPS 8081, HTTP: 8080)",
required_on_create=True,
required_on_edit=False))
return scheme
def get_app_name(self):
return "SplunkTAforPuppetEnterprise"
def validate_input(self, definition):
"""validate the input stanza"""
input_module.validate_input(self, definition)
def collect_events(self, ew):
"""write out the events"""
input_module.collect_events(self, ew)
def get_account_fields(self):
account_fields = []
return account_fields
def get_checkbox_fields(self):
checkbox_fields = []
return checkbox_fields
def get_global_checkbox_fields(self):
if self.global_checkbox_fields is None:
checkbox_name_file = os.path.join(bin_dir, 'global_checkbox_param.json')
try:
if os.path.isfile(checkbox_name_file):
with open(checkbox_name_file, 'r') as fp:
self.global_checkbox_fields = json.load(fp)
else:
self.global_checkbox_fields = []
except Exception as e:
self.log_error('Get exception when loading global checkbox parameter names. ' + str(e))
self.global_checkbox_fields = []
return self.global_checkbox_fields
if __name__ == "__main__":
exitcode = ModInputpuppet_enterprise_metrics().run(sys.argv)
sys.exit(exitcode)
|
apache-2.0
| -8,698,139,154,685,138,000
| 43.960784
| 214
| 0.586786
| false
| 4.448109
| false
| false
| false
|
EiNSTeiN-/deluge-gtk3
|
deluge/ui/gtkui/edittrackersdialog.py
|
1
|
9157
|
#
# edittrackersdialog.py
#
# Copyright (C) 2007, 2008 Andrew Resch <andrewresch@gmail.com>
#
# Deluge is free software.
#
# You may redistribute it and/or modify it under the terms of the
# GNU General Public License, as published by the Free Software
# Foundation; either version 3 of the License, or (at your option)
# any later version.
#
# deluge is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with deluge. If not, write to:
# The Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor
# Boston, MA 02110-1301, USA.
#
# In addition, as a special exception, the copyright holders give
# permission to link the code of portions of this program with the OpenSSL
# library.
# You must obey the GNU General Public License in all respects for all of
# the code used other than OpenSSL. If you modify file(s) with this
# exception, you may extend this exception to your version of the file(s),
# but you are not obligated to do so. If you do not wish to do so, delete
# this exception statement from your version. If you delete this exception
# statement from all source files in the program, then also delete it here.
#
#
from gi.repository import Gtk
import pkg_resources
import deluge.common
import common
from deluge.ui.client import client
import deluge.component as component
from deluge.log import LOG as log
class EditTrackersDialog:
def __init__(self, torrent_id, parent=None):
self.torrent_id = torrent_id
self.glade = Gtk.Builder()
self.glade.add_from_file(
pkg_resources.resource_filename("deluge.ui.gtkui",
"builder/edit_trackers.ui"))
self.dialog = self.glade.get_object("edit_trackers_dialog")
self.treeview = self.glade.get_object("tracker_treeview")
self.add_tracker_dialog = self.glade.get_object("add_tracker_dialog")
self.add_tracker_dialog.set_transient_for(self.dialog)
self.edit_tracker_entry = self.glade.get_object("edit_tracker_entry")
self.edit_tracker_entry.set_transient_for(self.dialog)
self.dialog.set_icon(common.get_deluge_icon())
if parent != None:
self.dialog.set_transient_for(parent)
# Connect the signals
self.glade.connect_signals({
"on_button_up_clicked": self.on_button_up_clicked,
"on_button_add_clicked": self.on_button_add_clicked,
"on_button_edit_clicked": self.on_button_edit_clicked,
"on_button_edit_cancel_clicked": self.on_button_edit_cancel_clicked,
"on_button_edit_ok_clicked": self.on_button_edit_ok_clicked,
"on_button_remove_clicked": self.on_button_remove_clicked,
"on_button_down_clicked": self.on_button_down_clicked,
"on_button_ok_clicked": self.on_button_ok_clicked,
"on_button_cancel_clicked": self.on_button_cancel_clicked,
"on_button_add_ok_clicked": self.on_button_add_ok_clicked,
"on_button_add_cancel_clicked": self.on_button_add_cancel_clicked
})
# Create a liststore for tier, url
self.liststore = Gtk.ListStore(int, str)
# Create the columns
self.treeview.append_column(
Gtk.TreeViewColumn(_("Tier"), Gtk.CellRendererText(), text=0))
self.treeview.append_column(
Gtk.TreeViewColumn(_("Tracker"), Gtk.CellRendererText(), text=1))
self.treeview.set_model(self.liststore)
self.liststore.set_sort_column_id(0, Gtk.SortType.ASCENDING)
def run(self):
# Make sure we have a torrent_id.. if not just return
if self.torrent_id == None:
return
# Get the trackers for this torrent
session = component.get("SessionProxy")
session.get_torrent_status(self.torrent_id, ["trackers"]).addCallback(self._on_get_torrent_status)
client.force_call()
def _on_get_torrent_status(self, status):
"""Display trackers dialog"""
if 'trackers' in status:
for tracker in status["trackers"]:
self.add_tracker(tracker["tier"], tracker["url"])
self.dialog.show()
def add_tracker(self, tier, url):
"""Adds a tracker to the list"""
self.liststore.append([tier, url])
def get_selected(self):
"""Returns the selected tracker"""
return self.treeview.get_selection().get_selected()[1]
def on_button_add_clicked(self, widget):
log.debug("on_button_add_clicked")
# Show the add tracker dialog
self.add_tracker_dialog.show()
self.glade.get_object("textview_trackers").grab_focus()
def on_button_remove_clicked(self, widget):
log.debug("on_button_remove_clicked")
selected = self.get_selected()
if selected != None:
self.liststore.remove(selected)
def on_button_edit_clicked(self, widget):
"""edits an existing tracker"""
log.debug("on_button_edit_clicked")
selected = self.get_selected()
if selected:
tracker = self.liststore.get_value(selected, 1)
self.glade.get_object("entry_edit_tracker").set_text(tracker)
self.edit_tracker_entry.show()
self.glade.get_object("edit_tracker_entry").grab_focus()
def on_button_edit_cancel_clicked(self, widget):
log.debug("on_button_edit_cancel_clicked")
self.edit_tracker_entry.hide()
def on_button_edit_ok_clicked(self, widget):
log.debug("on_button_edit_ok_clicked")
selected = self.get_selected()
tracker = self.glade.get_object("entry_edit_tracker").get_text()
self.liststore.set_value(selected, 1, tracker)
self.edit_tracker_entry.hide()
def on_button_up_clicked(self, widget):
log.debug("on_button_up_clicked")
selected = self.get_selected()
num_rows = self.liststore.iter_n_children(None)
if selected != None and num_rows > 1:
tier = self.liststore.get_value(selected, 0)
if not tier > 0:
return
new_tier = tier - 1
# Now change the tier for this tracker
self.liststore.set_value(selected, 0, new_tier)
def on_button_down_clicked(self, widget):
log.debug("on_button_down_clicked")
selected = self.get_selected()
num_rows = self.liststore.iter_n_children(None)
if selected != None and num_rows > 1:
tier = self.liststore.get_value(selected, 0)
new_tier = tier + 1
# Now change the tier for this tracker
self.liststore.set_value(selected, 0, new_tier)
def on_button_ok_clicked(self, widget):
log.debug("on_button_ok_clicked")
self.trackers = []
def each(model, path, iter, data):
tracker = {}
tracker["tier"] = model.get_value(iter, 0)
tracker["url"] = model.get_value(iter, 1)
self.trackers.append(tracker)
self.liststore.foreach(each, None)
# Set the torrens trackers
client.core.set_torrent_trackers(self.torrent_id, self.trackers)
self.dialog.destroy()
def on_button_cancel_clicked(self, widget):
log.debug("on_button_cancel_clicked")
self.dialog.destroy()
def on_button_add_ok_clicked(self, widget):
log.debug("on_button_add_ok_clicked")
# Create a list of trackers from the textview widget
textview = self.glade.get_object("textview_trackers")
trackers = []
b = textview.get_buffer()
lines = b.get_text(b.get_start_iter(), b.get_end_iter()).strip().split("\n")
for l in lines:
if deluge.common.is_url(l):
trackers.append(l)
for tracker in trackers:
# Figure out what tier number to use.. it's going to be the highest+1
# Also check for duplicates
# Check if there are any entries
duplicate = False
highest_tier = -1
for row in self.liststore:
tier = row[0]
if tier > highest_tier:
highest_tier = tier
if tracker == row[1]:
duplicate = True
break
# If not a duplicate, then add it to the list
if not duplicate:
# Add the tracker to the list
self.add_tracker(highest_tier + 1, tracker)
# Clear the entry widget and hide the dialog
textview.get_buffer().set_text("")
self.add_tracker_dialog.hide()
def on_button_add_cancel_clicked(self, widget):
log.debug("on_button_add_cancel_clicked")
# Clear the entry widget and hide the dialog
b = Gtk.TextBuffer()
self.glade.get_object("textview_trackers").set_buffer(b)
self.add_tracker_dialog.hide()
|
gpl-3.0
| -8,834,094,978,799,285,000
| 38.640693
| 106
| 0.624331
| false
| 3.787014
| false
| false
| false
|
fsxfreak/club-suite
|
clubsuite/suite/views/view_budget.py
|
1
|
4194
|
from django.shortcuts import render, get_object_or_404
from django.http import HttpResponse, HttpResponseRedirect
from django.contrib.auth.mixins import LoginRequiredMixin, UserPassesTestMixin
from django.views.generic import View
from django.urls import reverse
from django.contrib import messages
from suite.models import Club, Division, Budget
from suite.forms import DivisionCreateForm, BudgetCreateForm
from guardian.shortcuts import get_perms
from django.core.exceptions import PermissionDenied
class Budget(UserPassesTestMixin, LoginRequiredMixin, View):
template_name = 'dashboard/budget.html'
division_form_class = DivisionCreateForm
budget_form_class = BudgetCreateForm
def test_func(self):
club = get_object_or_404(Club, pk=self.kwargs['club_id'])
if 'can_access_budget' not in get_perms(self.request.user, club):
raise PermissionDenied
return True
def generate_books(self, divs):
books = []
for div in divs:
budgets = div.budget_set.all()
total_budget = 0
for budget in budgets:
total_budget = total_budget + budget.planned
events = div.event_set.all()
total_expense = 0
for event in events:
total_expense = total_expense + event.event_cost
books.append({ 'division' : div, 'budgets' : budgets, 'events' : events,
'total_budget' : total_budget, 'total_expense' : total_expense })
return books
def get(self, request, club_id, *args, **kwargs):
club = Club.objects.get(pk=club_id)
budget_form = self.budget_form_class()
budget_form.fields['did'].queryset = Division.objects.filter(cid=club)
division_form = self.division_form_class
books = self.generate_books(club.division_set.all())
total_budget = 0
total_expense = 0
for book in books:
total_budget = total_budget + book['total_budget']
total_expense = total_expense + book['total_expense']
return render(request, self.template_name, { 'books': books,
'club': club,
'budget_form' : budget_form,
'division_form' : division_form,
'total_budget' : total_budget,
'total_expense' : total_expense})
def post(self, request, club_id, *args, **kwargs):
club = Club.objects.get(pk=club_id)
budget_form = self.budget_form_class()
budget_form.fields['did'].queryset = Division.objects.filter(cid=club)
division_form = self.division_form_class
if 'division' in request.POST:
division_form = self.division_form_class(request.POST)
if division_form.is_valid():
division = division_form.save()
division.cid = club
division.save()
messages.add_message(request, messages.SUCCESS, 'You Have Created a New Division!')
return HttpResponseRedirect(reverse('suite:budget', args=[club_id]))
else:
messages.add_message(request, messages.WARNING, 'Cannot Make Division with Same Name')
return HttpResponseRedirect(reverse('suite:budget', args=[club_id]))
elif 'budget' in request.POST:
budget_form = self.budget_form_class(request.POST)
if budget_form.is_valid():
budget = budget_form.save(commit=True)
budget.save()
else:
messages.add_message(request, messages.WARNING, 'Could not create budget.')
books = self.generate_books(club.division_set.all())
total_budget = 0
total_expense = 0
for book in books:
total_budget = total_budget + book['total_budget']
total_expense = total_expense + book['total_expense']
return render(request, self.template_name, { 'books' : books,
'club': club,
'budget_form' : budget_form,
'division_form' : division_form,
'total_budget' : total_budget,
'total_expense' : total_expense})
|
mit
| -6,044,569,809,767,433,000
| 38.196262
| 94
| 0.610157
| false
| 3.971591
| false
| false
| false
|
tarbell-project/tarbell
|
setup.py
|
1
|
2186
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
from setuptools import setup, find_packages
from tarbell import __VERSION__ as VERSION
APP_NAME = 'tarbell'
settings = dict()
# Publish Helper.
if sys.argv[-1] == 'publish':
os.system('python setup.py sdist upload')
sys.exit()
settings.update(
name=APP_NAME,
version=VERSION,
author=u'Tarbell Project',
author_email='davideads@gmail.com',
url='http://github.com/tarbell-project/tarbell',
license='MIT',
description='A very simple content management system',
long_description="""Read the docs at http://tarbell.readthedocs.org
Tarbell makes it simple to put your work on the web, whether you’re a team of one or a dozen. With Tarbell, you can collaboratively build beautiful websites and publish them with ease.
Tarbell makes use of familiar, flexible tools to take the magic (and frustration) out of publishing to the web. Google spreadsheets handle content management, so changes to your stories are easy to make without touching a line of code. Step-by-step prompts help you set up and configure your project, so that publishing it is a breeze.""",
zip_safe=False,
packages=find_packages(),
include_package_data=True,
install_requires=[
"Flask==0.10.1",
"Frozen-Flask==0.11",
"Jinja2==2.7.3",
"Markdown==2.4.1",
"MarkupSafe==0.23",
"PyYAML==3.11",
"boto==2.48.0",
"clint==0.4.1",
"gnureadline>=6.3.3",
"google-api-python-client==1.6.2",
"keyring==5.3",
"oauth2client==1.5.2",
"python-dateutil>=2.2",
"requests==2.3.0",
"sh==1.09",
"six>=1.10.0",
"xlrd==0.9.3",
],
setup_requires=['pytest-runner'],
tests_require=['pytest'],
entry_points={
'console_scripts': [
'tarbell = tarbell.cli:main',
],
},
keywords=['Development Status :: 5 - Production/Stable',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet',
],
)
setup(**settings)
|
bsd-3-clause
| -7,955,737,072,833,608,000
| 30.2
| 339
| 0.619505
| false
| 3.477707
| false
| true
| false
|
USGSDenverPychron/pychron
|
pychron/hardware/pychron_laser.py
|
1
|
1127
|
# ===============================================================================
# Copyright 2012 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
from pychron.hardware.fusions.fusions_logic_board import FusionsLogicBoard
# ============= standard library imports ========================
# ============= local library imports ==========================
class PychronLaser(FusionsLogicBoard):
pass
# ============= EOF =============================================
|
apache-2.0
| -491,999,547,571,266,600
| 40.740741
| 81
| 0.539485
| false
| 5.053812
| false
| false
| false
|
djmattyg007/bfinterpreter
|
bfinterpreter.py
|
1
|
6325
|
#!/usr/bin/python3
class Tape:
'''
A generic implementation of a record tape for a Turing Machine.
It's bounded on the left side and unbounded on the right side.
It stores only Python integers.
'''
def __init__(self):
self.reset()
def inc_val(self):
self.cells[self.pointer] += 1
def dec_val(self):
self.cells[self.pointer] -= 1
def move_right(self):
self.pointer += 1
if self.pointer == len(self.cells):
self.cells.append(0)
def move_left(self):
if self.pointer == 0:
raise Error("Cannot move past the start of the tape")
self.pointer -= 1
def get_val(self):
return self.cells[self.pointer]
def set_val(self, val):
self.cells[self.pointer] = val
def reset(self):
'''
Reset the tape to the same state it was in when it was
first initialised (ie. empty).
'''
self.cells = [0]
self.pointer = 0
class Brainfuck:
def __init__(self, tape, program, input_tape = None, allow_nested_loops = True, debug = False, eof_ord = 0):
self.tape = tape
self.program = program
self.input_tape = input_tape
self.pointer = 0
self.allow_nested_loops = allow_nested_loops
self.debug = debug
self.eof_ord = eof_ord
self.basic_ops = {
"+" : self.tape.inc_val,
"-" : self.tape.dec_val,
">" : self.tape.move_right,
"<" : self.tape.move_left,
}
def reset(self):
'''
Reset the interpreter to the same state it was in before
program execution commenced.
'''
self.tape.reset()
self.pointer = 0
if self.input_tape is not None:
self.input_tape.seek(0)
def read_input(self):
'''
Read a single character from the input tape supplied to
the interpreter.
'''
if self.input_tape is None:
return self.eof_ord
char = self.input_tape.read(1)
if char == "":
return self.eof_ord
else:
return ord(char)
def end_loop(self):
'''
Call when the start of a loop is encountered and nested loops
are supported. Move to the matching end-of-loop operator.
'''
nested_loop_count = 1
while nested_loop_count > 0:
self.pointer += 1
if self.program[self.pointer] == "]":
nested_loop_count -= 1
elif self.program[self.pointer] == "[":
nested_loop_count += 1
# Small optimisation: skip the end-of-loop operator
self.pointer += 1
def print_val(self):
'''
Print the unicode character represented by the byte value
stored at the current position on the recording tape.
'''
print(chr(self.tape.get_val()), end="")
def run_program(self):
if self.debug == True:
import time
loop_pointers = []
program_length = len(self.program)
while self.pointer < program_length:
char = self.program[self.pointer]
if self.debug == True:
debug_string = str(self.pointer) + "\t" + char + "\t"
if char in self.basic_ops.keys():
self.basic_ops[char]()
self.pointer += 1
elif char == ".":
self.print_val()
self.pointer += 1
elif char == "[":
if self.tape.get_val() == 0:
if self.allow_nested_loops == True:
self.end_loop()
else:
self.pointer = self.program.index("]", self.pointer) + 1
else:
loop_pointers.append(self.pointer)
self.pointer += 1
elif char == "]":
loop_start = loop_pointers.pop()
if self.tape.get_val() == 0:
self.pointer += 1
else:
self.pointer = loop_start
elif char == ",":
charval = self.read_input()
self.tape.set_val(charval)
self.pointer += 1
else:
self.pointer += 1
if self.debug == True:
debug_string += str(self.tape.pointer) + "\t" + str(self.tape.get_val())
if self.input_tape is not None:
debug_string += "\t" + str(self.input_tape.tell())
print("\n" + debug_string)
time.sleep(0.01)
if __name__ == "__main__":
import sys
def read_program_file(filename):
with open(filename, encoding="utf-8") as program_file:
return program_file.read()
def parse_bool(string):
'''
Turn a string representation of a boolean value into an actual
boolean-typed value.
'''
if string in ["true", "y", "yes", "1", "on"]:
return True
elif string in ["false", "n", "no", "0", "off"]:
return False
else:
return None
program = ""
input_tape = None
allow_nested_loops = True
debug = False
eof_ord = 0
dump_tape = False
args = sys.argv[1:]
for x, arg in enumerate(args):
if arg == "--program":
program = args[x + 1]
elif arg == "--program-file":
program = read_program_file(args[x + 1])
elif arg == "--input":
from io import StringIO
input_tape = StringIO(args[x + 1])
elif arg == "--input-file":
input_tape = open(args[x + 1], encoding="utf-8")
elif arg == "--nested-loops":
allow_nested_loops = parse_bool(args[x + 1])
elif arg == "--debug":
debug = parse_bool(args[x + 1])
elif arg == "--eof":
eof_ord = int(args[x + 1])
elif arg == "--dump-tape":
dump_tape = True
tape = Tape()
brainfuck = Brainfuck(tape, program, input_tape, allow_nested_loops, debug, eof_ord)
brainfuck.run_program()
if dump_tape == True:
print("\n" + str(tape.cells))
# Cleanup
if input_tape is not None:
input_tape.close()
|
unlicense
| 1,415,792,457,444,116,500
| 28.694836
| 112
| 0.502609
| false
| 3.943267
| false
| false
| false
|
eharney/cinder
|
cinder/cmd/volume_usage_audit.py
|
1
|
10138
|
#!/usr/bin/env python
# Copyright (c) 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Cron script to generate usage notifications for volumes existing during
the audit period.
Together with the notifications generated by volumes
create/delete/resize, over that time period, this allows an external
system consuming usage notification feeds to calculate volume usage
for each tenant.
Time periods are specified as 'hour', 'month', 'day' or 'year'
- `hour` - previous hour. If run at 9:07am, will generate usage for
8-9am.
- `month` - previous month. If the script is run April 1, it will
generate usages for March 1 through March 31.
- `day` - previous day. if run on July 4th, it generates usages for
July 3rd.
- `year` - previous year. If run on Jan 1, it generates usages for
Jan 1 through Dec 31 of the previous year.
"""
from __future__ import print_function
import datetime
import iso8601
import sys
from oslo_config import cfg
from oslo_log import log as logging
from cinder import i18n
i18n.enable_lazy()
from cinder import context
from cinder.i18n import _
from cinder import objects
from cinder import rpc
from cinder import utils
from cinder import version
import cinder.volume.utils
CONF = cfg.CONF
script_opts = [
cfg.StrOpt('start_time',
help="If this option is specified then the start time "
"specified is used instead of the start time of the "
"last completed audit period."),
cfg.StrOpt('end_time',
help="If this option is specified then the end time "
"specified is used instead of the end time of the "
"last completed audit period."),
cfg.BoolOpt('send_actions',
default=False,
help="Send the volume and snapshot create and delete "
"notifications generated in the specified period."),
]
CONF.register_cli_opts(script_opts)
def _time_error(LOG, begin, end):
if CONF.start_time:
begin = datetime.datetime.strptime(CONF.start_time,
"%Y-%m-%d %H:%M:%S")
if CONF.end_time:
end = datetime.datetime.strptime(CONF.end_time,
"%Y-%m-%d %H:%M:%S")
begin = begin.replace(tzinfo=iso8601.UTC)
end = end.replace(tzinfo=iso8601.UTC)
if not end > begin:
msg = _("The end time (%(end)s) must be after the start "
"time (%(start)s).") % {'start': begin,
'end': end}
LOG.error(msg)
sys.exit(-1)
return begin, end
def _vol_notify_usage(LOG, volume_ref, extra_info, admin_context):
"""volume_ref notify usage"""
try:
LOG.debug("Send exists notification for <volume_id: "
"%(volume_id)s> <project_id %(project_id)s> "
"<%(extra_info)s>",
{'volume_id': volume_ref.id,
'project_id': volume_ref.project_id,
'extra_info': extra_info})
cinder.volume.utils.notify_about_volume_usage(
admin_context, volume_ref, 'exists', extra_usage_info=extra_info)
except Exception as exc_msg:
LOG.error("Exists volume notification failed: %s",
exc_msg, resource=volume_ref)
def _snap_notify_usage(LOG, snapshot_ref, extra_info, admin_context):
"""snapshot_ref notify usage"""
try:
LOG.debug("Send notification for <snapshot_id: %(snapshot_id)s> "
"<project_id %(project_id)s> <%(extra_info)s>",
{'snapshot_id': snapshot_ref.id,
'project_id': snapshot_ref.project_id,
'extra_info': extra_info})
cinder.volume.utils.notify_about_snapshot_usage(
admin_context, snapshot_ref, 'exists', extra_info)
except Exception as exc_msg:
LOG.error("Exists snapshot notification failed: %s",
exc_msg, resource=snapshot_ref)
def _backup_notify_usage(LOG, backup_ref, extra_info, admin_context):
"""backup_ref notify usage"""
try:
cinder.volume.utils.notify_about_backup_usage(
admin_context, backup_ref, 'exists', extra_info)
LOG.debug("Sent notification for <backup_id: %(backup_id)s> "
"<project_id %(project_id)s> <%(extra_info)s>",
{'backup_id': backup_ref.id,
'project_id': backup_ref.project_id,
'extra_info': extra_info})
except Exception as exc_msg:
LOG.error("Exists backups notification failed: %s", exc_msg)
def _create_action(obj_ref, admin_context, LOG, notify_about_usage,
type_id_str, type_name):
try:
local_extra_info = {
'audit_period_beginning': str(obj_ref.created_at),
'audit_period_ending': str(obj_ref.created_at),
}
LOG.debug("Send create notification for <%(type_id_str)s: %(_id)s> "
"<project_id %(project_id)s> <%(extra_info)s>",
{'type_id_str': type_id_str,
'_id': obj_ref.id,
'project_id': obj_ref.project_id,
'extra_info': local_extra_info})
notify_about_usage(admin_context, obj_ref,
'create.start', extra_usage_info=local_extra_info)
notify_about_usage(admin_context, obj_ref,
'create.end', extra_usage_info=local_extra_info)
except Exception as exc_msg:
LOG.error("Create %(type)s notification failed: %(exc_msg)s",
{'type': type_name, 'exc_msg': exc_msg}, resource=obj_ref)
def _delete_action(obj_ref, admin_context, LOG, notify_about_usage,
type_id_str, type_name):
try:
local_extra_info = {
'audit_period_beginning': str(obj_ref.deleted_at),
'audit_period_ending': str(obj_ref.deleted_at),
}
LOG.debug("Send delete notification for <%(type_id_str)s: %(_id)s> "
"<project_id %(project_id)s> <%(extra_info)s>",
{'type_id_str': type_id_str,
'_id': obj_ref.id,
'project_id': obj_ref.project_id,
'extra_info': local_extra_info})
notify_about_usage(admin_context, obj_ref,
'delete.start', extra_usage_info=local_extra_info)
notify_about_usage(admin_context, obj_ref,
'delete.end', extra_usage_info=local_extra_info)
except Exception as exc_msg:
LOG.error("Delete %(type)s notification failed: %(exc_msg)s",
{'type': type_name, 'exc_msg': exc_msg}, resource=obj_ref)
def _obj_ref_action(_notify_usage, LOG, obj_ref, extra_info, admin_context,
begin, end, notify_about_usage, type_id_str, type_name):
_notify_usage(LOG, obj_ref, extra_info, admin_context)
if CONF.send_actions:
if begin < obj_ref.created_at < end:
_create_action(obj_ref, admin_context, LOG,
notify_about_usage, type_id_str, type_name)
if obj_ref.deleted_at and begin < obj_ref.deleted_at < end:
_delete_action(obj_ref, admin_context, LOG,
notify_about_usage, type_id_str, type_name)
def main():
objects.register_all()
admin_context = context.get_admin_context()
CONF(sys.argv[1:], project='cinder',
version=version.version_string())
logging.setup(CONF, "cinder")
LOG = logging.getLogger("cinder")
rpc.init(CONF)
begin, end = utils.last_completed_audit_period()
begin, end = _time_error(LOG, begin, end)
LOG.info("Starting volume usage audit")
LOG.info("Creating usages for %(begin_period)s until %(end_period)s",
{"begin_period": begin, "end_period": end})
extra_info = {
'audit_period_beginning': str(begin),
'audit_period_ending': str(end),
}
volumes = objects.VolumeList.get_all_active_by_window(admin_context,
begin,
end)
LOG.info("Found %d volumes", len(volumes))
for volume_ref in volumes:
_obj_ref_action(_vol_notify_usage, LOG, volume_ref, extra_info,
admin_context, begin, end,
cinder.volume.utils.notify_about_volume_usage,
"volume_id", "volume")
snapshots = objects.SnapshotList.get_all_active_by_window(admin_context,
begin, end)
LOG.info("Found %d snapshots", len(snapshots))
for snapshot_ref in snapshots:
_obj_ref_action(_snap_notify_usage, LOG, snapshot_ref, extra_info,
admin_context, begin,
end, cinder.volume.utils.notify_about_snapshot_usage,
"snapshot_id", "snapshot")
backups = objects.BackupList.get_all_active_by_window(admin_context,
begin, end)
LOG.info("Found %d backups", len(backups))
for backup_ref in backups:
_obj_ref_action(_backup_notify_usage, LOG, backup_ref, extra_info,
admin_context, begin,
end, cinder.volume.utils.notify_about_backup_usage,
"backup_id", "backup")
LOG.info("Volume usage audit completed")
|
apache-2.0
| -8,139,628,104,453,433,000
| 40.044534
| 78
| 0.577037
| false
| 3.890253
| false
| false
| false
|
Davideddu/python-liquidcrystal
|
setup.py
|
1
|
1093
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
from setuptools import setup
def read(fname):
with open(os.path.join(os.path.dirname(__file__), fname), "r") as f:
return f.read()
setup(
name="liquidcrystal",
version="0.1",
author="Davide Depau",
author_email="apps@davideddu.org",
description="A Python port of Arduino's LiquidCrystal library that uses PyWiring to access an HD44780-based LCD "
"display through any supported I/O port.",
license="GPLv2",
keywords="lcd pywiring i2c gpio parallel serial liquidcrystal display",
url="http://github.com/Davidedd/python-liquidcrystal",
packages=['liquidcrystal'],
long_description=read('README.md'),
requires=["pywiring", "numpy"],
classifiers=[
"Development Status :: 5 - Production/Stable",
"Topic :: Scientific/Engineering :: Interface Engine/Protocol Translator",
"Programming Language :: Python",
"Operating System :: POSIX :: Linux",
"License :: OSI Approved :: GNU General Public License v2 (GPLv2)",
],
)
|
gpl-2.0
| -760,723,976,487,144,700
| 31.147059
| 117
| 0.650503
| false
| 3.692568
| false
| false
| false
|
agrover/targetd
|
targetd/fs.py
|
1
|
10302
|
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Copyright 2012, Andy Grover <agrover@redhat.com>
# Copyright 2013, Tony Asleson <tasleson@redhat.com>
#
# fs support using btrfs.
import os
import time
from targetd.nfs import Nfs, Export
from targetd.utils import invoke, TargetdError
# Notes:
#
# User can configure block pools (lvm volume groups) 1 to many or 0-many file
# system mount points to be used as pools. At this time you have to specify
# a block pool for block operations and file system mount point pool for FS
# operations. We could use files on a file system for block too and create
# file systems on top of lvm too, but that is TBD.
#
# We are using btrfs to provide all the cool fast FS features. User supplies a
# btrfs mount point and we create a targetd_fs and targetd_ss subvolumes. Each
# time the user creates a file system we are creating a subvolume under fs.
# Each time a FS clone is made we create the clone under fs. For each snapshot
# (RO clone) we are creating a read only snapshot in
# <mount>/targetd_ss/<fsname>/<snapshot name>
#
# There may be better ways of utilizing btrfs.
import logging as log
fs_path = "targetd_fs"
ss_path = "targetd_ss"
fs_cmd = 'btrfs'
pools = []
def initialize(config_dict):
global pools
pools = config_dict['fs_pools']
for pool in pools:
# Make sure we have the appropriate subvolumes available
try:
create_sub_volume(os.path.join(pool, fs_path))
create_sub_volume(os.path.join(pool, ss_path))
except TargetdError as e:
log.error('Unable to create required subvolumes {0}'.format(e))
raise
return dict(
fs_list=fs,
fs_destroy=fs_destroy,
fs_create=fs_create,
fs_clone=fs_clone,
ss_list=ss,
fs_snapshot=fs_snapshot,
fs_snapshot_delete=fs_snapshot_delete,
nfs_export_auth_list=nfs_export_auth_list,
nfs_export_list=nfs_export_list,
nfs_export_add=nfs_export_add,
nfs_export_remove=nfs_export_remove,
)
def create_sub_volume(p):
if not os.path.exists(p):
invoke([fs_cmd, 'subvolume', 'create', p])
def split_stdout(out):
"""
Split the text out as an array of text arrays.
"""
strip_it = '<FS_TREE>/'
rc = []
for line in out.split('\n'):
elem = line.split(' ')
if len(elem) > 1:
tmp = []
for z in elem:
if z.startswith(strip_it):
tmp.append(z[len(strip_it):])
else:
tmp.append(z)
rc.append(tmp)
return rc
def fs_space_values(mount_point):
"""
Return a tuple (total, free) from the specified path
"""
st = os.statvfs(mount_point)
free = (st.f_bavail * st.f_frsize)
total = (st.f_blocks * st.f_frsize)
return total, free
def pool_check(pool_name):
"""
pool_name *cannot* be trusted, funcs taking a pool param must call
this or to ensure passed-in pool name is one targetd has
been configured to use.
"""
if pool_name not in pools:
raise TargetdError(-110, "Invalid filesystem pool")
def fs_create(req, pool_name, name, size_bytes):
pool_check(pool_name)
full_path = os.path.join(pool_name, fs_path, name)
if not os.path.exists(full_path):
invoke([fs_cmd, 'subvolume', 'create', full_path])
else:
raise TargetdError(-53, 'FS already exists')
def fs_snapshot(req, fs_uuid, dest_ss_name):
fs_ht = _get_fs_by_uuid(req, fs_uuid)
if fs_ht:
source_path = os.path.join(fs_ht['pool'], fs_path, fs_ht['name'])
dest_base = os.path.join(fs_ht['pool'], ss_path, fs_ht['name'])
dest_path = os.path.join(dest_base, dest_ss_name)
create_sub_volume(dest_base)
if os.path.exists(dest_path):
raise TargetdError(-53, "Snapshot already exists with that name")
invoke([fs_cmd, 'subvolume', 'snapshot', '-r', source_path, dest_path])
def fs_snapshot_delete(req, fs_uuid, ss_uuid):
fs_ht = _get_fs_by_uuid(req, fs_uuid)
snapshot = _get_ss_by_uuid(req, fs_uuid, ss_uuid, fs_ht)
path = os.path.join(fs_ht['pool'], ss_path, fs_ht['name'],
snapshot['name'])
fs_subvolume_delete(path)
def fs_subvolume_delete(path):
invoke([fs_cmd, 'subvolume', 'delete', path])
def fs_destroy(req, uuid):
# Check to see if this file system has any read-only snapshots, if yes then
# delete. The API requires a FS to list its RO copies, we may want to
# reconsider this decision.
fs_ht = _get_fs_by_uuid(req, uuid)
base_snapshot_dir = os.path.join(fs_ht['pool'], ss_path, fs_ht['name'])
snapshots = ss(req, uuid)
for s in snapshots:
fs_subvolume_delete(os.path.join(base_snapshot_dir, s['name']))
if os.path.exists(base_snapshot_dir):
fs_subvolume_delete(base_snapshot_dir)
fs_subvolume_delete(os.path.join(fs_ht['pool'], fs_path, fs_ht['name']))
def fs_pools(req):
results = []
for pool in pools:
total, free = fs_space_values(pool)
results.append(dict(name=pool, size=total, free_size=free, type='fs'))
return results
def _invoke_retries(command, throw_exception):
# TODO take out this loop, used to handle bug in btrfs
# ERROR: Failed to lookup path for root 0 - No such file or directory
for i in range(0, 5):
result, out, err = invoke(command, False)
if result == 0:
return result, out, err
elif result == 19:
time.sleep(1)
continue
else:
raise TargetdError(-303, "Unexpected exit code %d" % result)
raise TargetdError(-303, "Unable to execute command after "
"multiple retries %s" % (str(command)))
def _fs_hash():
fs_list = {}
for pool in pools:
full_path = os.path.join(pool, fs_path)
result, out, err = _invoke_retries(
[fs_cmd, 'subvolume', 'list', '-ua', pool], False)
data = split_stdout(out)
if len(data):
(total, free) = fs_space_values(full_path)
for e in data:
sub_vol = e[10]
prefix = fs_path + os.path.sep
if sub_vol[:len(prefix)] == prefix:
key = os.path.join(pool, sub_vol)
fs_list[key] = dict(name=sub_vol[len(prefix):],
uuid=e[8],
total_space=total,
free_space=free,
pool=pool,
full_path=key)
return fs_list
def fs(req):
return list(_fs_hash().values())
def ss(req, fs_uuid, fs_cache=None):
snapshots = []
if fs_cache is None:
fs_cache = _get_fs_by_uuid(req, fs_uuid)
full_path = os.path.join(fs_cache['pool'], ss_path, fs_cache['name'])
if os.path.exists(full_path):
result, out, err = _invoke_retries([fs_cmd, 'subvolume', 'list', '-s',
full_path], False)
data = split_stdout(out)
if len(data):
for e in data:
ts = "%s %s" % (e[10], e[11])
time_epoch = int(time.mktime(
time.strptime(ts, '%Y-%m-%d %H:%M:%S')))
st = dict(name=e[-1], uuid=e[-3], timestamp=time_epoch)
snapshots.append(st)
return snapshots
def _get_fs_by_uuid(req, fs_uuid):
for f in fs(req):
if f['uuid'] == fs_uuid:
return f
def _get_ss_by_uuid(req, fs_uuid, ss_uuid, fs_ht=None):
if fs_ht is None:
fs_ht = _get_fs_by_uuid(req, fs_uuid)
for s in ss(req, fs_uuid, fs_ht):
if s['uuid'] == ss_uuid:
return s
def fs_clone(req, fs_uuid, dest_fs_name, snapshot_id):
fs_ht = _get_fs_by_uuid(req, fs_uuid)
if not fs_ht:
raise TargetdError(-104, "fs_uuid not found")
if snapshot_id:
snapshot = _get_ss_by_uuid(req, fs_uuid, snapshot_id)
if not snapshot:
raise TargetdError(-112, "snapshot not found")
source = os.path.join(fs_ht['pool'], ss_path, fs_ht['name'],
snapshot['name'])
dest = os.path.join(fs_ht['pool'], fs_path, dest_fs_name)
else:
source = os.path.join(fs_ht['pool'], fs_path, fs_ht['name'])
dest = os.path.join(fs_ht['pool'], fs_path, dest_fs_name)
if os.path.exists(dest):
raise TargetdError(-51, "Filesystem with that name exists")
invoke([fs_cmd, 'subvolume', 'snapshot', source, dest])
def nfs_export_auth_list(req):
return Nfs.security_options()
def nfs_export_list(req):
rc = []
exports = Nfs.exports()
for e in exports:
rc.append(dict(host=e.host, path=e.path, options=e.options_list()))
return rc
def nfs_export_add(req, host, path, export_path, options):
if export_path is not None:
raise TargetdError(-401, "separate export path not supported at "
"this time")
bit_opt = 0
key_opt = {}
for o in options:
if '=' in o:
k, v = o.split('=')
key_opt[k] = v
else:
bit_opt |= Export.bool_option[o]
Nfs.export_add(host, path, bit_opt, key_opt)
def nfs_export_remove(req, host, path):
found = False
for e in Nfs.exports():
if e.host == host and e.path == path:
Nfs.export_remove(e)
found = True
if not found:
raise TargetdError(
-400, "NFS export to remove not found %s:%s", (host, path))
|
gpl-3.0
| -551,828,838,198,015,300
| 28.603448
| 79
| 0.584158
| false
| 3.411258
| false
| false
| false
|
viranch/exodus
|
resources/lib/indexers/episodes.py
|
1
|
65108
|
# -*- coding: utf-8 -*-
'''
Exodus Add-on
Copyright (C) 2016 Exodus
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
from resources.lib.modules import trakt
from resources.lib.modules import cleantitle
from resources.lib.modules import cleangenre
from resources.lib.modules import control
from resources.lib.modules import client
from resources.lib.modules import cache
from resources.lib.modules import playcount
from resources.lib.modules import workers
from resources.lib.modules import views
import os,sys,re,json,zipfile,StringIO,urllib,urllib2,urlparse,datetime
params = dict(urlparse.parse_qsl(sys.argv[2].replace('?','')))
action = params.get('action')
control.moderator()
class seasons:
def __init__(self):
self.list = []
self.lang = control.apiLanguage()['tvdb']
self.datetime = (datetime.datetime.utcnow() - datetime.timedelta(hours = 5))
self.today_date = (self.datetime).strftime('%Y-%m-%d')
self.tvdb_key = 'MUQ2MkYyRjkwMDMwQzQ0NA=='
self.tvdb_info_link = 'http://thetvdb.com/api/%s/series/%s/all/%s.zip' % (self.tvdb_key.decode('base64'), '%s', '%s')
self.tvdb_by_imdb = 'http://thetvdb.com/api/GetSeriesByRemoteID.php?imdbid=%s'
self.tvdb_by_query = 'http://thetvdb.com/api/GetSeries.php?seriesname=%s'
self.imdb_by_query = 'http://www.omdbapi.com/?t=%s&y=%s'
self.tvdb_image = 'http://thetvdb.com/banners/'
self.tvdb_poster = 'http://thetvdb.com/banners/_cache/'
def get(self, tvshowtitle, year, imdb, tvdb, idx=True):
if control.window.getProperty('PseudoTVRunning') == 'True':
return episodes().get(tvshowtitle, year, imdb, tvdb)
if idx == True:
self.list = cache.get(self.tvdb_list, 24, tvshowtitle, year, imdb, tvdb, self.lang)
self.seasonDirectory(self.list)
return self.list
else:
self.list = self.tvdb_list(tvshowtitle, year, imdb, tvdb, 'en')
return self.list
def tvdb_list(self, tvshowtitle, year, imdb, tvdb, lang, limit=''):
try:
if imdb == '0':
url = self.imdb_by_query % (urllib.quote_plus(tvshowtitle), year)
imdb = client.request(url, timeout='10')
try: imdb = json.loads(imdb)['imdbID']
except: imdb = '0'
if imdb == None or imdb == '' or imdb == 'N/A': imdb = '0'
if tvdb == '0' and not imdb == '0':
url = self.tvdb_by_imdb % imdb
result = client.request(url, timeout='10')
try: tvdb = client.parseDOM(result, 'seriesid')[0]
except: tvdb = '0'
try: name = client.parseDOM(result, 'SeriesName')[0]
except: name = '0'
dupe = re.compile('[***]Duplicate (\d*)[***]').findall(name)
if len(dupe) > 0: tvdb = str(dupe[0])
if tvdb == '': tvdb = '0'
if tvdb == '0':
url = self.tvdb_by_query % (urllib.quote_plus(tvshowtitle))
years = [str(year), str(int(year)+1), str(int(year)-1)]
tvdb = client.request(url, timeout='10')
tvdb = re.sub(r'[^\x00-\x7F]+', '', tvdb)
tvdb = client.replaceHTMLCodes(tvdb)
tvdb = client.parseDOM(tvdb, 'Series')
tvdb = [(x, client.parseDOM(x, 'SeriesName'), client.parseDOM(x, 'FirstAired')) for x in tvdb]
tvdb = [(x, x[1][0], x[2][0]) for x in tvdb if len(x[1]) > 0 and len(x[2]) > 0]
tvdb = [x for x in tvdb if cleantitle.get(tvshowtitle) == cleantitle.get(x[1])]
tvdb = [x[0][0] for x in tvdb if any(y in x[2] for y in years)][0]
tvdb = client.parseDOM(tvdb, 'seriesid')[0]
if tvdb == '': tvdb = '0'
except:
return
try:
if tvdb == '0': return
url = self.tvdb_info_link % (tvdb, 'en')
data = urllib2.urlopen(url, timeout=30).read()
zip = zipfile.ZipFile(StringIO.StringIO(data))
result = zip.read('%s.xml' % 'en')
artwork = zip.read('banners.xml')
zip.close()
dupe = client.parseDOM(result, 'SeriesName')[0]
dupe = re.compile('[***]Duplicate (\d*)[***]').findall(dupe)
if len(dupe) > 0:
tvdb = str(dupe[0]).encode('utf-8')
url = self.tvdb_info_link % (tvdb, 'en')
data = urllib2.urlopen(url, timeout=30).read()
zip = zipfile.ZipFile(StringIO.StringIO(data))
result = zip.read('%s.xml' % 'en')
artwork = zip.read('banners.xml')
zip.close()
if not lang == 'en':
url = self.tvdb_info_link % (tvdb, lang)
data = urllib2.urlopen(url, timeout=30).read()
zip = zipfile.ZipFile(StringIO.StringIO(data))
result2 = zip.read('%s.xml' % lang)
zip.close()
else:
result2 = result
artwork = artwork.split('<Banner>')
artwork = [i for i in artwork if '<Language>en</Language>' in i and '<BannerType>season</BannerType>' in i]
artwork = [i for i in artwork if not 'seasonswide' in re.findall('<BannerPath>(.+?)</BannerPath>', i)[0]]
result = result.split('<Episode>')
result2 = result2.split('<Episode>')
item = result[0] ; item2 = result2[0]
episodes = [i for i in result if '<EpisodeNumber>' in i]
episodes = [i for i in episodes if not '<SeasonNumber>0</SeasonNumber>' in i]
episodes = [i for i in episodes if not '<EpisodeNumber>0</EpisodeNumber>' in i]
seasons = [i for i in episodes if '<EpisodeNumber>1</EpisodeNumber>' in i]
locals = [i for i in result2 if '<EpisodeNumber>' in i]
result = '' ; result2 = ''
if limit == '':
episodes = []
elif limit == '-1':
seasons = []
else:
episodes = [i for i in episodes if '<SeasonNumber>%01d</SeasonNumber>' % int(limit) in i]
seasons = []
try: poster = client.parseDOM(item, 'poster')[0]
except: poster = ''
if not poster == '': poster = self.tvdb_image + poster
else: poster = '0'
poster = client.replaceHTMLCodes(poster)
poster = poster.encode('utf-8')
try: banner = client.parseDOM(item, 'banner')[0]
except: banner = ''
if not banner == '': banner = self.tvdb_image + banner
else: banner = '0'
banner = client.replaceHTMLCodes(banner)
banner = banner.encode('utf-8')
try: fanart = client.parseDOM(item, 'fanart')[0]
except: fanart = ''
if not fanart == '': fanart = self.tvdb_image + fanart
else: fanart = '0'
fanart = client.replaceHTMLCodes(fanart)
fanart = fanart.encode('utf-8')
if not poster == '0': pass
elif not fanart == '0': poster = fanart
elif not banner == '0': poster = banner
if not banner == '0': pass
elif not fanart == '0': banner = fanart
elif not poster == '0': banner = poster
try: status = client.parseDOM(item, 'Status')[0]
except: status = ''
if status == '': status = 'Ended'
status = client.replaceHTMLCodes(status)
status = status.encode('utf-8')
try: studio = client.parseDOM(item, 'Network')[0]
except: studio = ''
if studio == '': studio = '0'
studio = client.replaceHTMLCodes(studio)
studio = studio.encode('utf-8')
try: genre = client.parseDOM(item, 'Genre')[0]
except: genre = ''
genre = [x for x in genre.split('|') if not x == '']
genre = ' / '.join(genre)
if genre == '': genre = '0'
genre = client.replaceHTMLCodes(genre)
genre = genre.encode('utf-8')
try: duration = client.parseDOM(item, 'Runtime')[0]
except: duration = ''
if duration == '': duration = '0'
duration = client.replaceHTMLCodes(duration)
duration = duration.encode('utf-8')
try: rating = client.parseDOM(item, 'Rating')[0]
except: rating = ''
if rating == '': rating = '0'
rating = client.replaceHTMLCodes(rating)
rating = rating.encode('utf-8')
try: votes = client.parseDOM(item, 'RatingCount')[0]
except: votes = '0'
if votes == '': votes = '0'
votes = client.replaceHTMLCodes(votes)
votes = votes.encode('utf-8')
try: mpaa = client.parseDOM(item, 'ContentRating')[0]
except: mpaa = ''
if mpaa == '': mpaa = '0'
mpaa = client.replaceHTMLCodes(mpaa)
mpaa = mpaa.encode('utf-8')
try: cast = client.parseDOM(item, 'Actors')[0]
except: cast = ''
cast = [x for x in cast.split('|') if not x == '']
try: cast = [(x.encode('utf-8'), '') for x in cast]
except: cast = []
try: label = client.parseDOM(item2, 'SeriesName')[0]
except: label = '0'
label = client.replaceHTMLCodes(label)
label = label.encode('utf-8')
try: plot = client.parseDOM(item2, 'Overview')[0]
except: plot = ''
if plot == '': plot = '0'
plot = client.replaceHTMLCodes(plot)
plot = plot.encode('utf-8')
except:
pass
for item in seasons:
try:
premiered = client.parseDOM(item, 'FirstAired')[0]
if premiered == '' or '-00' in premiered: premiered = '0'
premiered = client.replaceHTMLCodes(premiered)
premiered = premiered.encode('utf-8')
if status == 'Ended': pass
elif premiered == '0': raise Exception()
elif int(re.sub('[^0-9]', '', str(premiered))) > int(re.sub('[^0-9]', '', str(self.today_date))): raise Exception()
season = client.parseDOM(item, 'SeasonNumber')[0]
season = '%01d' % int(season)
season = season.encode('utf-8')
thumb = [i for i in artwork if client.parseDOM(i, 'Season')[0] == season]
try: thumb = client.parseDOM(thumb[0], 'BannerPath')[0]
except: thumb = ''
if not thumb == '': thumb = self.tvdb_image + thumb
else: thumb = '0'
thumb = client.replaceHTMLCodes(thumb)
thumb = thumb.encode('utf-8')
if thumb == '0': thumb = poster
self.list.append({'season': season, 'tvshowtitle': tvshowtitle, 'label': label, 'year': year, 'premiered': premiered, 'status': status, 'studio': studio, 'genre': genre, 'duration': duration, 'rating': rating, 'votes': votes, 'mpaa': mpaa, 'cast': cast, 'plot': plot, 'imdb': imdb, 'tvdb': tvdb, 'poster': poster, 'banner': banner, 'fanart': fanart, 'thumb': thumb})
except:
pass
for item in episodes:
try:
premiered = client.parseDOM(item, 'FirstAired')[0]
if premiered == '' or '-00' in premiered: premiered = '0'
premiered = client.replaceHTMLCodes(premiered)
premiered = premiered.encode('utf-8')
if status == 'Ended': pass
elif premiered == '0': raise Exception()
elif int(re.sub('[^0-9]', '', str(premiered))) > int(re.sub('[^0-9]', '', str(self.today_date))): raise Exception()
season = client.parseDOM(item, 'SeasonNumber')[0]
season = '%01d' % int(season)
season = season.encode('utf-8')
episode = client.parseDOM(item, 'EpisodeNumber')[0]
episode = re.sub('[^0-9]', '', '%01d' % int(episode))
episode = episode.encode('utf-8')
title = client.parseDOM(item, 'EpisodeName')[0]
if title == '': title = '0'
title = client.replaceHTMLCodes(title)
title = title.encode('utf-8')
try: thumb = client.parseDOM(item, 'filename')[0]
except: thumb = ''
if not thumb == '': thumb = self.tvdb_image + thumb
else: thumb = '0'
thumb = client.replaceHTMLCodes(thumb)
thumb = thumb.encode('utf-8')
if not thumb == '0': pass
elif not fanart == '0': thumb = fanart.replace(self.tvdb_image, self.tvdb_poster)
elif not poster == '0': thumb = poster
try: rating = client.parseDOM(item, 'Rating')[0]
except: rating = ''
if rating == '': rating = '0'
rating = client.replaceHTMLCodes(rating)
rating = rating.encode('utf-8')
try: director = client.parseDOM(item, 'Director')[0]
except: director = ''
director = [x for x in director.split('|') if not x == '']
director = ' / '.join(director)
if director == '': director = '0'
director = client.replaceHTMLCodes(director)
director = director.encode('utf-8')
try: writer = client.parseDOM(item, 'Writer')[0]
except: writer = ''
writer = [x for x in writer.split('|') if not x == '']
writer = ' / '.join(writer)
if writer == '': writer = '0'
writer = client.replaceHTMLCodes(writer)
writer = writer.encode('utf-8')
try:
local = client.parseDOM(item, 'id')[0]
local = [x for x in locals if '<id>%s</id>' % str(local) in x][0]
except:
local = item
label = client.parseDOM(local, 'EpisodeName')[0]
if label == '': label = '0'
label = client.replaceHTMLCodes(label)
label = label.encode('utf-8')
try: episodeplot = client.parseDOM(local, 'Overview')[0]
except: episodeplot = ''
if episodeplot == '': episodeplot = '0'
if episodeplot == '0': episodeplot = plot
episodeplot = client.replaceHTMLCodes(episodeplot)
try: episodeplot = episodeplot.encode('utf-8')
except: pass
self.list.append({'title': title, 'label': label, 'season': season, 'episode': episode, 'tvshowtitle': tvshowtitle, 'year': year, 'premiered': premiered, 'status': status, 'studio': studio, 'genre': genre, 'duration': duration, 'rating': rating, 'votes': votes, 'mpaa': mpaa, 'director': director, 'writer': writer, 'cast': cast, 'plot': episodeplot, 'imdb': imdb, 'tvdb': tvdb, 'poster': poster, 'banner': banner, 'fanart': fanart, 'thumb': thumb})
except:
pass
return self.list
def seasonDirectory(self, items):
if items == None or len(items) == 0: control.idle() ; sys.exit()
sysaddon = sys.argv[0]
syshandle = int(sys.argv[1])
addonPoster, addonBanner = control.addonPoster(), control.addonBanner()
addonFanart, settingFanart = control.addonFanart(), control.setting('fanart')
traktCredentials = trakt.getTraktCredentialsInfo()
try: isOld = False ; control.item().getArt('type')
except: isOld = True
try: indicators = playcount.getSeasonIndicators(items[0]['imdb'])
except: pass
watchedMenu = control.lang(32068).encode('utf-8') if trakt.getTraktIndicatorsInfo() == True else control.lang(32066).encode('utf-8')
unwatchedMenu = control.lang(32069).encode('utf-8') if trakt.getTraktIndicatorsInfo() == True else control.lang(32067).encode('utf-8')
queueMenu = control.lang(32065).encode('utf-8')
traktManagerMenu = control.lang(32070).encode('utf-8')
labelMenu = control.lang(32055).encode('utf-8')
for i in items:
try:
label = '%s %s' % (labelMenu, i['season'])
systitle = sysname = urllib.quote_plus(i['tvshowtitle'])
imdb, tvdb, year, season = i['imdb'], i['tvdb'], i['year'], i['season']
meta = dict((k,v) for k, v in i.iteritems() if not v == '0')
meta.update({'mediatype': 'tvshow'})
meta.update({'trailer': '%s?action=trailer&name=%s' % (sysaddon, sysname)})
if not 'duration' in i: meta.update({'duration': '60'})
elif i['duration'] == '0': meta.update({'duration': '60'})
try: meta.update({'duration': str(int(meta['duration']) * 60)})
except: pass
try: meta.update({'genre': cleangenre.lang(meta['genre'], self.lang)})
except: pass
try: meta.update({'tvshowtitle': i['label']})
except: pass
try:
if season in indicators: meta.update({'playcount': 1, 'overlay': 7})
else: meta.update({'playcount': 0, 'overlay': 6})
except:
pass
url = '%s?action=episodes&tvshowtitle=%s&year=%s&imdb=%s&tvdb=%s&season=%s' % (sysaddon, systitle, year, imdb, tvdb, season)
cm = []
cm.append((queueMenu, 'RunPlugin(%s?action=queueItem)' % sysaddon))
cm.append((watchedMenu, 'RunPlugin(%s?action=tvPlaycount&name=%s&imdb=%s&tvdb=%s&season=%s&query=7)' % (sysaddon, systitle, imdb, tvdb, season)))
cm.append((unwatchedMenu, 'RunPlugin(%s?action=tvPlaycount&name=%s&imdb=%s&tvdb=%s&season=%s&query=6)' % (sysaddon, systitle, imdb, tvdb, season)))
if traktCredentials == True:
cm.append((traktManagerMenu, 'RunPlugin(%s?action=traktManager&name=%s&tvdb=%s&content=tvshow)' % (sysaddon, sysname, tvdb)))
if isOld == True:
cm.append((control.lang2(19033).encode('utf-8'), 'Action(Info)'))
item = control.item(label=label)
art = {}
if 'thumb' in i and not i['thumb'] == '0':
art.update({'icon': i['thumb'], 'thumb': i['thumb'], 'poster': i['thumb']})
elif 'poster' in i and not i['poster'] == '0':
art.update({'icon': i['poster'], 'thumb': i['poster'], 'poster': i['poster']})
else:
art.update({'icon': addonPoster, 'thumb': addonPoster, 'poster': addonPoster})
if 'banner' in i and not i['banner'] == '0':
art.update({'banner': i['banner']})
elif 'fanart' in i and not i['fanart'] == '0':
art.update({'banner': i['fanart']})
else:
art.update({'banner': addonBanner})
if settingFanart == 'true' and 'fanart' in i and not i['fanart'] == '0':
item.setProperty('Fanart_Image', i['fanart'])
elif not addonFanart == None:
item.setProperty('Fanart_Image', addonFanart)
item.setArt(art)
item.addContextMenuItems(cm)
item.setInfo(type='Video', infoLabels = meta)
control.addItem(handle=syshandle, url=url, listitem=item, isFolder=True)
except:
pass
try: control.property(syshandle, 'showplot', items[0]['plot'])
except: pass
control.content(syshandle, 'seasons')
control.directory(syshandle, cacheToDisc=True)
views.setView('seasons', {'skin.estuary': 55, 'skin.confluence': 500})
class episodes:
def __init__(self):
self.list = []
self.trakt_link = 'http://api-v2launch.trakt.tv'
self.tvmaze_link = 'http://api.tvmaze.com'
self.tvdb_key = 'MUQ2MkYyRjkwMDMwQzQ0NA=='
self.datetime = (datetime.datetime.utcnow() - datetime.timedelta(hours = 5))
self.systime = (self.datetime).strftime('%Y%m%d%H%M%S%f')
self.today_date = (self.datetime).strftime('%Y-%m-%d')
self.trakt_user = control.setting('trakt.user').strip()
self.lang = control.apiLanguage()['tvdb']
self.tvdb_info_link = 'http://thetvdb.com/api/%s/series/%s/all/%s.zip' % (self.tvdb_key.decode('base64'), '%s', '%s')
self.tvdb_image = 'http://thetvdb.com/banners/'
self.tvdb_poster = 'http://thetvdb.com/banners/_cache/'
self.added_link = 'http://api.tvmaze.com/schedule'
self.mycalendar_link = 'http://api-v2launch.trakt.tv/calendars/my/shows/date[29]/60/'
self.trakthistory_link = 'http://api-v2launch.trakt.tv/users/me/history/shows?limit=300'
self.progress_link = 'http://api-v2launch.trakt.tv/users/me/watched/shows'
self.hiddenprogress_link = 'http://api-v2launch.trakt.tv/users/hidden/progress_watched?limit=1000&type=show'
self.calendar_link = 'http://api.tvmaze.com/schedule?date=%s'
self.traktlists_link = 'http://api-v2launch.trakt.tv/users/me/lists'
self.traktlikedlists_link = 'http://api-v2launch.trakt.tv/users/likes/lists?limit=1000000'
self.traktlist_link = 'http://api-v2launch.trakt.tv/users/%s/lists/%s/items'
def get(self, tvshowtitle, year, imdb, tvdb, season=None, episode=None, idx=True):
try:
if idx == True:
if season == None and episode == None:
self.list = cache.get(seasons().tvdb_list, 1, tvshowtitle, year, imdb, tvdb, self.lang, '-1')
elif episode == None:
self.list = cache.get(seasons().tvdb_list, 1, tvshowtitle, year, imdb, tvdb, self.lang, season)
else:
self.list = cache.get(seasons().tvdb_list, 1, tvshowtitle, year, imdb, tvdb, self.lang, '-1')
num = [x for x,y in enumerate(self.list) if y['season'] == str(season) and y['episode'] == str(episode)][-1]
self.list = [y for x,y in enumerate(self.list) if x >= num]
self.episodeDirectory(self.list)
return self.list
else:
self.list = seasons().tvdb_list(tvshowtitle, year, imdb, tvdb, 'en', '-1')
return self.list
except:
pass
def calendar(self, url):
try:
try: url = getattr(self, url + '_link')
except: pass
if self.trakt_link in url and url == self.progress_link:
self.blist = cache.get(self.trakt_progress_list, 720, url, self.trakt_user, self.lang)
self.list = []
self.list = cache.get(self.trakt_progress_list, 0, url, self.trakt_user, self.lang)
elif self.trakt_link in url and url == self.mycalendar_link:
self.blist = cache.get(self.trakt_episodes_list, 720, url, self.trakt_user, self.lang)
self.list = []
self.list = cache.get(self.trakt_episodes_list, 0, url, self.trakt_user, self.lang)
elif self.trakt_link in url and '/users/' in url:
self.list = cache.get(self.trakt_list, 0, url, self.trakt_user)
self.list = self.list[::-1]
elif self.trakt_link in url:
self.list = cache.get(self.trakt_list, 1, url, self.trakt_user)
elif self.tvmaze_link in url and url == self.added_link:
urls = [i['url'] for i in self.calendars(idx=False)][:5]
self.list = []
for url in urls:
self.list += cache.get(self.tvmaze_list, 720, url, True)
elif self.tvmaze_link in url:
self.list = cache.get(self.tvmaze_list, 1, url, False)
self.episodeDirectory(self.list)
return self.list
except:
pass
def widget(self):
if trakt.getTraktIndicatorsInfo() == True:
setting = control.setting('tv.widget.alt')
else:
setting = control.setting('tv.widget')
if setting == '2':
self.calendar(self.progress_link)
elif setting == '3':
self.calendar(self.mycalendar_link)
else:
self.calendar(self.added_link)
def calendars(self, idx=True):
m = control.lang(32060).encode('utf-8').split('|')
try: months = [(m[0], 'January'), (m[1], 'February'), (m[2], 'March'), (m[3], 'April'), (m[4], 'May'), (m[5], 'June'), (m[6], 'July'), (m[7], 'August'), (m[8], 'September'), (m[9], 'October'), (m[10], 'November'), (m[11], 'December')]
except: months = []
d = control.lang(32061).encode('utf-8').split('|')
try: days = [(d[0], 'Monday'), (d[1], 'Tuesday'), (d[2], 'Wednesday'), (d[3], 'Thursday'), (d[4], 'Friday'), (d[5], 'Saturday'), (d[6], 'Sunday')]
except: days = []
for i in range(0, 30):
try:
name = (self.datetime - datetime.timedelta(days = i))
name = (control.lang(32062) % (name.strftime('%A'), name.strftime('%d %B'))).encode('utf-8')
for m in months: name = name.replace(m[1], m[0])
for d in days: name = name.replace(d[1], d[0])
try: name = name.encode('utf-8')
except: pass
url = self.calendar_link % (self.datetime - datetime.timedelta(days = i)).strftime('%Y-%m-%d')
self.list.append({'name': name, 'url': url, 'image': 'calendar.png', 'action': 'calendar'})
except:
pass
if idx == True: self.addDirectory(self.list)
return self.list
def userlists(self):
try:
userlists = []
if trakt.getTraktCredentialsInfo() == False: raise Exception()
activity = trakt.getActivity()
except:
pass
try:
if trakt.getTraktCredentialsInfo() == False: raise Exception()
try:
if activity > cache.timeout(self.trakt_user_list, self.traktlists_link, self.trakt_user): raise Exception()
userlists += cache.get(self.trakt_user_list, 720, self.traktlists_link, self.trakt_user)
except:
userlists += cache.get(self.trakt_user_list, 0, self.traktlists_link, self.trakt_user)
except:
pass
try:
self.list = []
if trakt.getTraktCredentialsInfo() == False: raise Exception()
try:
if activity > cache.timeout(self.trakt_user_list, self.traktlikedlists_link, self.trakt_user): raise Exception()
userlists += cache.get(self.trakt_user_list, 720, self.traktlikedlists_link, self.trakt_user)
except:
userlists += cache.get(self.trakt_user_list, 0, self.traktlikedlists_link, self.trakt_user)
except:
pass
self.list = userlists
for i in range(0, len(self.list)): self.list[i].update({'image': 'userlists.png', 'action': 'calendar'})
self.addDirectory(self.list, queue=True)
return self.list
def trakt_list(self, url, user):
try:
for i in re.findall('date\[(\d+)\]', url):
url = url.replace('date[%s]' % i, (self.datetime - datetime.timedelta(days = int(i))).strftime('%Y-%m-%d'))
q = dict(urlparse.parse_qsl(urlparse.urlsplit(url).query))
q.update({'extended': 'full,images'})
q = (urllib.urlencode(q)).replace('%2C', ',')
u = url.replace('?' + urlparse.urlparse(url).query, '') + '?' + q
result = trakt.getTrakt(u)
itemlist = []
items = json.loads(result)
except:
return
for item in items:
try:
title = item['episode']['title']
if title == None or title == '': raise Exception()
title = client.replaceHTMLCodes(title)
title = title.encode('utf-8')
season = item['episode']['season']
season = re.sub('[^0-9]', '', '%01d' % int(season))
if season == '0': raise Exception()
season = season.encode('utf-8')
episode = item['episode']['number']
episode = re.sub('[^0-9]', '', '%01d' % int(episode))
if episode == '0': raise Exception()
episode = episode.encode('utf-8')
tvshowtitle = item['show']['title']
if tvshowtitle == None or tvshowtitle == '': raise Exception()
tvshowtitle = client.replaceHTMLCodes(tvshowtitle)
tvshowtitle = tvshowtitle.encode('utf-8')
year = item['show']['year']
year = re.sub('[^0-9]', '', str(year))
year = year.encode('utf-8')
imdb = item['show']['ids']['imdb']
if imdb == None or imdb == '': imdb = '0'
else: imdb = 'tt' + re.sub('[^0-9]', '', str(imdb))
imdb = imdb.encode('utf-8')
tvdb = item['show']['ids']['tvdb']
if tvdb == None or tvdb == '': raise Exception()
tvdb = re.sub('[^0-9]', '', str(tvdb))
tvdb = tvdb.encode('utf-8')
premiered = item['episode']['first_aired']
try: premiered = re.compile('(\d{4}-\d{2}-\d{2})').findall(premiered)[0]
except: premiered = '0'
premiered = premiered.encode('utf-8')
studio = item['show']['network']
if studio == None: studio = '0'
studio = studio.encode('utf-8')
genre = item['show']['genres']
genre = [i.title() for i in genre]
if genre == []: genre = '0'
genre = ' / '.join(genre)
genre = genre.encode('utf-8')
try: duration = str(item['show']['runtime'])
except: duration = '0'
if duration == None: duration = '0'
duration = duration.encode('utf-8')
try: rating = str(item['episode']['rating'])
except: rating = '0'
if rating == None or rating == '0.0': rating = '0'
rating = rating.encode('utf-8')
try: votes = str(item['show']['votes'])
except: votes = '0'
try: votes = str(format(int(votes),',d'))
except: pass
if votes == None: votes = '0'
votes = votes.encode('utf-8')
mpaa = item['show']['certification']
if mpaa == None: mpaa = '0'
mpaa = mpaa.encode('utf-8')
plot = item['episode']['overview']
if plot == None or plot == '': plot = item['show']['overview']
if plot == None or plot == '': plot = '0'
plot = client.replaceHTMLCodes(plot)
plot = plot.encode('utf-8')
itemlist.append({'title': title, 'season': season, 'episode': episode, 'tvshowtitle': tvshowtitle, 'year': year, 'premiered': premiered, 'status': 'Continuing', 'studio': studio, 'genre': genre, 'duration': duration, 'rating': rating, 'votes': votes, 'mpaa': mpaa, 'plot': plot, 'imdb': imdb, 'tvdb': tvdb, 'poster': '0', 'thumb': '0'})
except:
pass
itemlist = itemlist[::-1]
return itemlist
def trakt_progress_list(self, url, user, lang):
try:
url += '?extended=full'
result = trakt.getTrakt(url)
result = json.loads(result)
items = []
except:
return
for item in result:
try:
num_1 = 0
for i in range(0, len(item['seasons'])): num_1 += len(item['seasons'][i]['episodes'])
num_2 = int(item['show']['aired_episodes'])
if num_1 >= num_2: raise Exception()
season = str(item['seasons'][-1]['number'])
season = season.encode('utf-8')
episode = str(item['seasons'][-1]['episodes'][-1]['number'])
episode = episode.encode('utf-8')
tvshowtitle = item['show']['title']
if tvshowtitle == None or tvshowtitle == '': raise Exception()
tvshowtitle = client.replaceHTMLCodes(tvshowtitle)
tvshowtitle = tvshowtitle.encode('utf-8')
year = item['show']['year']
year = re.sub('[^0-9]', '', str(year))
if int(year) > int(self.datetime.strftime('%Y')): raise Exception()
imdb = item['show']['ids']['imdb']
if imdb == None or imdb == '': imdb = '0'
imdb = imdb.encode('utf-8')
tvdb = item['show']['ids']['tvdb']
if tvdb == None or tvdb == '': raise Exception()
tvdb = re.sub('[^0-9]', '', str(tvdb))
tvdb = tvdb.encode('utf-8')
items.append({'imdb': imdb, 'tvdb': tvdb, 'tvshowtitle': tvshowtitle, 'year': year, 'snum': season, 'enum': episode})
except:
pass
try:
result = trakt.getTrakt(self.hiddenprogress_link)
result = json.loads(result)
result = [str(i['show']['ids']['tvdb']) for i in result]
items = [i for i in items if not i['tvdb'] in result]
except:
pass
def items_list(i):
try:
item = [x for x in self.blist if x['tvdb'] == i['tvdb'] and x['snum'] == i['snum'] and x['enum'] == i['enum']][0]
item['action'] = 'episodes'
self.list.append(item)
return
except:
pass
try:
url = self.tvdb_info_link % (i['tvdb'], lang)
data = urllib2.urlopen(url, timeout=10).read()
zip = zipfile.ZipFile(StringIO.StringIO(data))
result = zip.read('%s.xml' % lang)
artwork = zip.read('banners.xml')
zip.close()
result = result.split('<Episode>')
item = [x for x in result if '<EpisodeNumber>' in x]
item2 = result[0]
num = [x for x,y in enumerate(item) if re.compile('<SeasonNumber>(.+?)</SeasonNumber>').findall(y)[0] == str(i['snum']) and re.compile('<EpisodeNumber>(.+?)</EpisodeNumber>').findall(y)[0] == str(i['enum'])][-1]
item = [y for x,y in enumerate(item) if x > num][0]
premiered = client.parseDOM(item, 'FirstAired')[0]
if premiered == '' or '-00' in premiered: premiered = '0'
premiered = client.replaceHTMLCodes(premiered)
premiered = premiered.encode('utf-8')
try: status = client.parseDOM(item2, 'Status')[0]
except: status = ''
if status == '': status = 'Ended'
status = client.replaceHTMLCodes(status)
status = status.encode('utf-8')
if status == 'Ended': pass
elif premiered == '0': raise Exception()
elif int(re.sub('[^0-9]', '', str(premiered))) > int(re.sub('[^0-9]', '', str(self.today_date))): raise Exception()
title = client.parseDOM(item, 'EpisodeName')[0]
if title == '': title = '0'
title = client.replaceHTMLCodes(title)
title = title.encode('utf-8')
season = client.parseDOM(item, 'SeasonNumber')[0]
season = '%01d' % int(season)
season = season.encode('utf-8')
episode = client.parseDOM(item, 'EpisodeNumber')[0]
episode = re.sub('[^0-9]', '', '%01d' % int(episode))
episode = episode.encode('utf-8')
tvshowtitle = i['tvshowtitle']
imdb, tvdb = i['imdb'], i['tvdb']
year = i['year']
try: year = year.encode('utf-8')
except: pass
try: poster = client.parseDOM(item2, 'poster')[0]
except: poster = ''
if not poster == '': poster = self.tvdb_image + poster
else: poster = '0'
poster = client.replaceHTMLCodes(poster)
poster = poster.encode('utf-8')
try: banner = client.parseDOM(item2, 'banner')[0]
except: banner = ''
if not banner == '': banner = self.tvdb_image + banner
else: banner = '0'
banner = client.replaceHTMLCodes(banner)
banner = banner.encode('utf-8')
try: fanart = client.parseDOM(item2, 'fanart')[0]
except: fanart = ''
if not fanart == '': fanart = self.tvdb_image + fanart
else: fanart = '0'
fanart = client.replaceHTMLCodes(fanart)
fanart = fanart.encode('utf-8')
try: thumb = client.parseDOM(item, 'filename')[0]
except: thumb = ''
if not thumb == '': thumb = self.tvdb_image + thumb
else: thumb = '0'
thumb = client.replaceHTMLCodes(thumb)
thumb = thumb.encode('utf-8')
if not poster == '0': pass
elif not fanart == '0': poster = fanart
elif not banner == '0': poster = banner
if not banner == '0': pass
elif not fanart == '0': banner = fanart
elif not poster == '0': banner = poster
if not thumb == '0': pass
elif not fanart == '0': thumb = fanart.replace(self.tvdb_image, self.tvdb_poster)
elif not poster == '0': thumb = poster
try: studio = client.parseDOM(item2, 'Network')[0]
except: studio = ''
if studio == '': studio = '0'
studio = client.replaceHTMLCodes(studio)
studio = studio.encode('utf-8')
try: genre = client.parseDOM(item2, 'Genre')[0]
except: genre = ''
genre = [x for x in genre.split('|') if not x == '']
genre = ' / '.join(genre)
if genre == '': genre = '0'
genre = client.replaceHTMLCodes(genre)
genre = genre.encode('utf-8')
try: duration = client.parseDOM(item2, 'Runtime')[0]
except: duration = ''
if duration == '': duration = '0'
duration = client.replaceHTMLCodes(duration)
duration = duration.encode('utf-8')
try: rating = client.parseDOM(item, 'Rating')[0]
except: rating = ''
if rating == '': rating = '0'
rating = client.replaceHTMLCodes(rating)
rating = rating.encode('utf-8')
try: votes = client.parseDOM(item2, 'RatingCount')[0]
except: votes = '0'
if votes == '': votes = '0'
votes = client.replaceHTMLCodes(votes)
votes = votes.encode('utf-8')
try: mpaa = client.parseDOM(item2, 'ContentRating')[0]
except: mpaa = ''
if mpaa == '': mpaa = '0'
mpaa = client.replaceHTMLCodes(mpaa)
mpaa = mpaa.encode('utf-8')
try: director = client.parseDOM(item, 'Director')[0]
except: director = ''
director = [x for x in director.split('|') if not x == '']
director = ' / '.join(director)
if director == '': director = '0'
director = client.replaceHTMLCodes(director)
director = director.encode('utf-8')
try: writer = client.parseDOM(item, 'Writer')[0]
except: writer = ''
writer = [x for x in writer.split('|') if not x == '']
writer = ' / '.join(writer)
if writer == '': writer = '0'
writer = client.replaceHTMLCodes(writer)
writer = writer.encode('utf-8')
try: cast = client.parseDOM(item2, 'Actors')[0]
except: cast = ''
cast = [x for x in cast.split('|') if not x == '']
try: cast = [(x.encode('utf-8'), '') for x in cast]
except: cast = []
try: plot = client.parseDOM(item, 'Overview')[0]
except: plot = ''
if plot == '':
try: plot = client.parseDOM(item2, 'Overview')[0]
except: plot = ''
if plot == '': plot = '0'
plot = client.replaceHTMLCodes(plot)
plot = plot.encode('utf-8')
self.list.append({'title': title, 'season': season, 'episode': episode, 'tvshowtitle': tvshowtitle, 'year': year, 'premiered': premiered, 'status': status, 'studio': studio, 'genre': genre, 'duration': duration, 'rating': rating, 'votes': votes, 'mpaa': mpaa, 'director': director, 'writer': writer, 'cast': cast, 'plot': plot, 'imdb': imdb, 'tvdb': tvdb, 'poster': poster, 'banner': banner, 'fanart': fanart, 'thumb': thumb, 'snum': i['snum'], 'enum': i['enum'], 'action': 'episodes'})
except:
pass
items = items[:100]
threads = []
for i in items: threads.append(workers.Thread(items_list, i))
[i.start() for i in threads]
[i.join() for i in threads]
try: self.list = sorted(self.list, key=lambda k: k['premiered'], reverse=True)
except: pass
return self.list
def trakt_episodes_list(self, url, user, lang):
items = self.trakt_list(url, user)
def items_list(i):
try:
item = [x for x in self.blist if x['tvdb'] == i['tvdb'] and x['season'] == i['season'] and x['episode'] == i['episode']][0]
if item['poster'] == '0': raise Exception()
self.list.append(item)
return
except:
pass
try:
url = self.tvdb_info_link % (i['tvdb'], lang)
data = urllib2.urlopen(url, timeout=10).read()
zip = zipfile.ZipFile(StringIO.StringIO(data))
result = zip.read('%s.xml' % lang)
artwork = zip.read('banners.xml')
zip.close()
result = result.split('<Episode>')
item = [(re.findall('<SeasonNumber>%01d</SeasonNumber>' % int(i['season']), x), re.findall('<EpisodeNumber>%01d</EpisodeNumber>' % int(i['episode']), x), x) for x in result]
item = [x[2] for x in item if len(x[0]) > 0 and len(x[1]) > 0][0]
item2 = result[0]
premiered = client.parseDOM(item, 'FirstAired')[0]
if premiered == '' or '-00' in premiered: premiered = '0'
premiered = client.replaceHTMLCodes(premiered)
premiered = premiered.encode('utf-8')
try: status = client.parseDOM(item2, 'Status')[0]
except: status = ''
if status == '': status = 'Ended'
status = client.replaceHTMLCodes(status)
status = status.encode('utf-8')
title = client.parseDOM(item, 'EpisodeName')[0]
if title == '': title = '0'
title = client.replaceHTMLCodes(title)
title = title.encode('utf-8')
season = client.parseDOM(item, 'SeasonNumber')[0]
season = '%01d' % int(season)
season = season.encode('utf-8')
episode = client.parseDOM(item, 'EpisodeNumber')[0]
episode = re.sub('[^0-9]', '', '%01d' % int(episode))
episode = episode.encode('utf-8')
tvshowtitle = i['tvshowtitle']
imdb, tvdb = i['imdb'], i['tvdb']
year = i['year']
try: year = year.encode('utf-8')
except: pass
try: poster = client.parseDOM(item2, 'poster')[0]
except: poster = ''
if not poster == '': poster = self.tvdb_image + poster
else: poster = '0'
poster = client.replaceHTMLCodes(poster)
poster = poster.encode('utf-8')
try: banner = client.parseDOM(item2, 'banner')[0]
except: banner = ''
if not banner == '': banner = self.tvdb_image + banner
else: banner = '0'
banner = client.replaceHTMLCodes(banner)
banner = banner.encode('utf-8')
try: fanart = client.parseDOM(item2, 'fanart')[0]
except: fanart = ''
if not fanart == '': fanart = self.tvdb_image + fanart
else: fanart = '0'
fanart = client.replaceHTMLCodes(fanart)
fanart = fanart.encode('utf-8')
try: thumb = client.parseDOM(item, 'filename')[0]
except: thumb = ''
if not thumb == '': thumb = self.tvdb_image + thumb
else: thumb = '0'
thumb = client.replaceHTMLCodes(thumb)
thumb = thumb.encode('utf-8')
if not poster == '0': pass
elif not fanart == '0': poster = fanart
elif not banner == '0': poster = banner
if not banner == '0': pass
elif not fanart == '0': banner = fanart
elif not poster == '0': banner = poster
if not thumb == '0': pass
elif not fanart == '0': thumb = fanart.replace(self.tvdb_image, self.tvdb_poster)
elif not poster == '0': thumb = poster
try: studio = client.parseDOM(item2, 'Network')[0]
except: studio = ''
if studio == '': studio = '0'
studio = client.replaceHTMLCodes(studio)
studio = studio.encode('utf-8')
try: genre = client.parseDOM(item2, 'Genre')[0]
except: genre = ''
genre = [x for x in genre.split('|') if not x == '']
genre = ' / '.join(genre)
if genre == '': genre = '0'
genre = client.replaceHTMLCodes(genre)
genre = genre.encode('utf-8')
try: duration = client.parseDOM(item2, 'Runtime')[0]
except: duration = ''
if duration == '': duration = '0'
duration = client.replaceHTMLCodes(duration)
duration = duration.encode('utf-8')
try: rating = client.parseDOM(item, 'Rating')[0]
except: rating = ''
if rating == '': rating = '0'
rating = client.replaceHTMLCodes(rating)
rating = rating.encode('utf-8')
try: votes = client.parseDOM(item2, 'RatingCount')[0]
except: votes = '0'
if votes == '': votes = '0'
votes = client.replaceHTMLCodes(votes)
votes = votes.encode('utf-8')
try: mpaa = client.parseDOM(item2, 'ContentRating')[0]
except: mpaa = ''
if mpaa == '': mpaa = '0'
mpaa = client.replaceHTMLCodes(mpaa)
mpaa = mpaa.encode('utf-8')
try: director = client.parseDOM(item, 'Director')[0]
except: director = ''
director = [x for x in director.split('|') if not x == '']
director = ' / '.join(director)
if director == '': director = '0'
director = client.replaceHTMLCodes(director)
director = director.encode('utf-8')
try: writer = client.parseDOM(item, 'Writer')[0]
except: writer = ''
writer = [x for x in writer.split('|') if not x == '']
writer = ' / '.join(writer)
if writer == '': writer = '0'
writer = client.replaceHTMLCodes(writer)
writer = writer.encode('utf-8')
try: cast = client.parseDOM(item2, 'Actors')[0]
except: cast = ''
cast = [x for x in cast.split('|') if not x == '']
try: cast = [(x.encode('utf-8'), '') for x in cast]
except: cast = []
try: plot = client.parseDOM(item, 'Overview')[0]
except: plot = ''
if plot == '':
try: plot = client.parseDOM(item2, 'Overview')[0]
except: plot = ''
if plot == '': plot = '0'
plot = client.replaceHTMLCodes(plot)
plot = plot.encode('utf-8')
self.list.append({'title': title, 'season': season, 'episode': episode, 'tvshowtitle': tvshowtitle, 'year': year, 'premiered': premiered, 'status': status, 'studio': studio, 'genre': genre, 'duration': duration, 'rating': rating, 'votes': votes, 'mpaa': mpaa, 'director': director, 'writer': writer, 'cast': cast, 'plot': plot, 'imdb': imdb, 'tvdb': tvdb, 'poster': poster, 'banner': banner, 'fanart': fanart, 'thumb': thumb})
except:
pass
items = items[:100]
threads = []
for i in items: threads.append(workers.Thread(items_list, i))
[i.start() for i in threads]
[i.join() for i in threads]
return self.list
def trakt_user_list(self, url, user):
try:
result = trakt.getTrakt(url)
items = json.loads(result)
except:
pass
for item in items:
try:
try: name = item['list']['name']
except: name = item['name']
name = client.replaceHTMLCodes(name)
name = name.encode('utf-8')
try: url = (trakt.slug(item['list']['user']['username']), item['list']['ids']['slug'])
except: url = ('me', item['ids']['slug'])
url = self.traktlist_link % url
url = url.encode('utf-8')
self.list.append({'name': name, 'url': url, 'context': url})
except:
pass
self.list = sorted(self.list, key=lambda k: re.sub('(^the |^a )', '', k['name'].lower()))
return self.list
def tvmaze_list(self, url, limit):
try:
result = client.request(url)
itemlist = []
items = json.loads(result)
except:
return
for item in items:
try:
if not 'english' in item['show']['language'].lower(): raise Exception()
if limit == True and not 'scripted' in item['show']['type'].lower(): raise Exception()
title = item['name']
if title == None or title == '': raise Exception()
title = client.replaceHTMLCodes(title)
title = title.encode('utf-8')
season = item['season']
season = re.sub('[^0-9]', '', '%01d' % int(season))
if season == '0': raise Exception()
season = season.encode('utf-8')
episode = item['number']
episode = re.sub('[^0-9]', '', '%01d' % int(episode))
if episode == '0': raise Exception()
episode = episode.encode('utf-8')
tvshowtitle = item['show']['name']
if tvshowtitle == None or tvshowtitle == '': raise Exception()
tvshowtitle = client.replaceHTMLCodes(tvshowtitle)
tvshowtitle = tvshowtitle.encode('utf-8')
year = item['show']['premiered']
year = re.findall('(\d{4})', year)[0]
year = year.encode('utf-8')
imdb = item['show']['externals']['imdb']
if imdb == None or imdb == '': imdb = '0'
else: imdb = 'tt' + re.sub('[^0-9]', '', str(imdb))
imdb = imdb.encode('utf-8')
tvdb = item['show']['externals']['thetvdb']
if tvdb == None or tvdb == '': raise Exception()
tvdb = re.sub('[^0-9]', '', str(tvdb))
tvdb = tvdb.encode('utf-8')
poster = '0'
try: poster = item['show']['image']['original']
except: poster = '0'
if poster == None or poster == '': poster = '0'
poster = poster.encode('utf-8')
try: thumb1 = item['show']['image']['original']
except: thumb1 = '0'
try: thumb2 = item['image']['original']
except: thumb2 = '0'
if thumb2 == None or thumb2 == '0': thumb = thumb1
else: thumb = thumb2
if thumb == None or thumb == '': thumb = '0'
thumb = thumb.encode('utf-8')
premiered = item['airdate']
try: premiered = re.findall('(\d{4}-\d{2}-\d{2})', premiered)[0]
except: premiered = '0'
premiered = premiered.encode('utf-8')
try: studio = item['show']['network']['name']
except: studio = '0'
if studio == None: studio = '0'
studio = studio.encode('utf-8')
try: genre = item['show']['genres']
except: genre = '0'
genre = [i.title() for i in genre]
if genre == []: genre = '0'
genre = ' / '.join(genre)
genre = genre.encode('utf-8')
try: duration = item['show']['runtime']
except: duration = '0'
if duration == None: duration = '0'
duration = str(duration)
duration = duration.encode('utf-8')
try: rating = item['show']['rating']['average']
except: rating = '0'
if rating == None or rating == '0.0': rating = '0'
rating = str(rating)
rating = rating.encode('utf-8')
try: plot = item['show']['summary']
except: plot = '0'
if plot == None: plot = '0'
plot = re.sub('<.+?>|</.+?>|\n', '', plot)
plot = client.replaceHTMLCodes(plot)
plot = plot.encode('utf-8')
itemlist.append({'title': title, 'season': season, 'episode': episode, 'tvshowtitle': tvshowtitle, 'year': year, 'premiered': premiered, 'status': 'Continuing', 'studio': studio, 'genre': genre, 'duration': duration, 'rating': rating, 'plot': plot, 'imdb': imdb, 'tvdb': tvdb, 'poster': poster, 'thumb': thumb})
except:
pass
itemlist = itemlist[::-1]
return itemlist
def episodeDirectory(self, items):
if items == None or len(items) == 0: control.idle() ; sys.exit()
sysaddon = sys.argv[0]
syshandle = int(sys.argv[1])
addonPoster, addonBanner = control.addonPoster(), control.addonBanner()
addonFanart, settingFanart = control.addonFanart(), control.setting('fanart')
traktCredentials = trakt.getTraktCredentialsInfo()
try: isOld = False ; control.item().getArt('type')
except: isOld = True
isPlayable = 'true' if not 'plugin' in control.infoLabel('Container.PluginName') else 'false'
indicators = playcount.getTVShowIndicators(refresh=True)
try: multi = [i['tvshowtitle'] for i in items]
except: multi = []
multi = len([x for y,x in enumerate(multi) if x not in multi[:y]])
multi = True if multi > 1 else False
try: sysaction = items[0]['action']
except: sysaction = ''
isFolder = False if not sysaction == 'episodes' else True
playbackMenu = control.lang(32063).encode('utf-8') if control.setting('hosts.mode') == '2' else control.lang(32064).encode('utf-8')
watchedMenu = control.lang(32068).encode('utf-8') if trakt.getTraktIndicatorsInfo() == True else control.lang(32066).encode('utf-8')
unwatchedMenu = control.lang(32069).encode('utf-8') if trakt.getTraktIndicatorsInfo() == True else control.lang(32067).encode('utf-8')
queueMenu = control.lang(32065).encode('utf-8')
traktManagerMenu = control.lang(32070).encode('utf-8')
tvshowBrowserMenu = control.lang(32071).encode('utf-8')
for i in items:
try:
if not 'label' in i: i['label'] = i['title']
if i['label'] == '0':
label = '%sx%02d . %s %s' % (i['season'], int(i['episode']), 'Episode', i['episode'])
else:
label = '%sx%02d . %s' % (i['season'], int(i['episode']), i['label'])
if multi == True:
label = '%s - %s' % (i['tvshowtitle'], label)
imdb, tvdb, year, season, episode = i['imdb'], i['tvdb'], i['year'], i['season'], i['episode']
systitle = urllib.quote_plus(i['title'])
systvshowtitle = urllib.quote_plus(i['tvshowtitle'])
syspremiered = urllib.quote_plus(i['premiered'])
meta = dict((k,v) for k, v in i.iteritems() if not v == '0')
meta.update({'mediatype': 'episode'})
meta.update({'trailer': '%s?action=trailer&name=%s' % (sysaddon, systvshowtitle)})
if not 'duration' in i: meta.update({'duration': '60'})
elif i['duration'] == '0': meta.update({'duration': '60'})
try: meta.update({'duration': str(int(meta['duration']) * 60)})
except: pass
try: meta.update({'genre': cleangenre.lang(meta['genre'], self.lang)})
except: pass
try: meta.update({'year': re.findall('(\d{4})', i['premiered'])[0]})
except: pass
try: meta.update({'title': i['label']})
except: pass
sysmeta = urllib.quote_plus(json.dumps(meta))
url = '%s?action=play&title=%s&year=%s&imdb=%s&tvdb=%s&season=%s&episode=%s&tvshowtitle=%s&premiered=%s&meta=%s&t=%s' % (sysaddon, systitle, year, imdb, tvdb, season, episode, systvshowtitle, syspremiered, sysmeta, self.systime)
sysurl = urllib.quote_plus(url)
path = '%s?action=play&title=%s&year=%s&imdb=%s&tvdb=%s&season=%s&episode=%s&tvshowtitle=%s&premiered=%s' % (sysaddon, systitle, year, imdb, tvdb, season, episode, systvshowtitle, syspremiered)
if isFolder == True:
url = '%s?action=episodes&tvshowtitle=%s&year=%s&imdb=%s&tvdb=%s&season=%s&episode=%s' % (sysaddon, systvshowtitle, year, imdb, tvdb, season, episode)
cm = []
cm.append((queueMenu, 'RunPlugin(%s?action=queueItem)' % sysaddon))
if multi == True:
cm.append((tvshowBrowserMenu, 'Container.Update(%s?action=seasons&tvshowtitle=%s&year=%s&imdb=%s&tvdb=%s,return)' % (sysaddon, systvshowtitle, year, imdb, tvdb)))
try:
overlay = int(playcount.getEpisodeOverlay(indicators, imdb, tvdb, season, episode))
if overlay == 7:
cm.append((unwatchedMenu, 'RunPlugin(%s?action=episodePlaycount&imdb=%s&tvdb=%s&season=%s&episode=%s&query=6)' % (sysaddon, imdb, tvdb, season, episode)))
meta.update({'playcount': 1, 'overlay': 7})
else:
cm.append((watchedMenu, 'RunPlugin(%s?action=episodePlaycount&imdb=%s&tvdb=%s&season=%s&episode=%s&query=7)' % (sysaddon, imdb, tvdb, season, episode)))
meta.update({'playcount': 0, 'overlay': 6})
except:
pass
if traktCredentials == True:
cm.append((traktManagerMenu, 'RunPlugin(%s?action=traktManager&name=%s&tvdb=%s&content=tvshow)' % (sysaddon, systvshowtitle, tvdb)))
if isFolder == False:
cm.append((playbackMenu, 'RunPlugin(%s?action=alterSources&url=%s&meta=%s)' % (sysaddon, sysurl, sysmeta)))
if isOld == True:
cm.append((control.lang2(19033).encode('utf-8'), 'Action(Info)'))
item = control.item(label=label)
art = {}
if 'poster' in i and not i['poster'] == '0':
art.update({'poster': i['poster'], 'tvshow.poster': i['poster'], 'season.poster': i['poster']})
else:
art.update({'poster': addonPoster})
if 'thumb' in i and not i['thumb'] == '0':
art.update({'icon': i['thumb'], 'thumb': i['thumb']})
elif 'fanart' in i and not i['fanart'] == '0':
art.update({'icon': i['fanart'], 'thumb': i['fanart']})
elif 'poster' in i and not i['poster'] == '0':
art.update({'icon': i['poster'], 'thumb': i['poster']})
else:
art.update({'icon': addonFanart, 'thumb': addonFanart})
if 'banner' in i and not i['banner'] == '0':
art.update({'banner': i['banner']})
elif 'fanart' in i and not i['fanart'] == '0':
art.update({'banner': i['fanart']})
else:
art.update({'banner': addonBanner})
if settingFanart == 'true' and 'fanart' in i and not i['fanart'] == '0':
item.setProperty('Fanart_Image', i['fanart'])
elif not addonFanart == None:
item.setProperty('Fanart_Image', addonFanart)
item.setArt(art)
item.addContextMenuItems(cm)
item.setProperty('IsPlayable', isPlayable)
item.setInfo(type='Video', infoLabels = meta)
control.addItem(handle=syshandle, url=url, listitem=item, isFolder=isFolder)
except:
pass
control.content(syshandle, 'episodes')
control.directory(syshandle, cacheToDisc=True)
views.setView('episodes', {'skin.estuary': 55, 'skin.confluence': 504})
def addDirectory(self, items, queue=False):
if items == None or len(items) == 0: control.idle() ; sys.exit()
sysaddon = sys.argv[0]
syshandle = int(sys.argv[1])
addonFanart, addonThumb, artPath = control.addonFanart(), control.addonThumb(), control.artPath()
queueMenu = control.lang(32065).encode('utf-8')
for i in items:
try:
name = i['name']
if i['image'].startswith('http'): thumb = i['image']
elif not artPath == None: thumb = os.path.join(artPath, i['image'])
else: thumb = addonThumb
url = '%s?action=%s' % (sysaddon, i['action'])
try: url += '&url=%s' % urllib.quote_plus(i['url'])
except: pass
cm = []
if queue == True:
cm.append((queueMenu, 'RunPlugin(%s?action=queueItem)' % sysaddon))
item = control.item(label=name)
item.setArt({'icon': thumb, 'thumb': thumb})
if not addonFanart == None: item.setProperty('Fanart_Image', addonFanart)
item.addContextMenuItems(cm)
control.addItem(handle=syshandle, url=url, listitem=item, isFolder=True)
except:
pass
control.content(syshandle, 'addons')
control.directory(syshandle, cacheToDisc=True)
|
gpl-3.0
| -4,454,347,603,305,280,000
| 41.526453
| 502
| 0.504408
| false
| 3.897516
| false
| false
| false
|
dakrauth/strutil
|
setup.py
|
1
|
1169
|
#!/usr/bin/env python
import os, sys
from setuptools import setup
if sys.argv[-1] == 'publish':
os.system('python setup.py sdist upload')
sys.exit(0)
with open('README.rst', 'r') as f:
long_description = f.read()
# Dynamically calculate the version based on swingtime.VERSION.
version = __import__('strutil').__version__
setup(
name='strutil',
url='https://github.com/dakrauth/strutil',
author='David A Krauth',
author_email='dakrauth@gmail.com',
description='Simple tools for downloading, cleaning, extracting and parsing content',
version=version,
long_description=long_description,
platforms=['any'],
license='MIT License',
py_modules=['strutil'],
classifiers=(
'Development Status :: 4 - Beta',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Text Processing'
),
)
|
mit
| -4,161,712,837,771,272,700
| 30.594595
| 89
| 0.63302
| false
| 3.962712
| false
| false
| false
|
gabstopper/smc-python
|
smc/elements/profiles.py
|
1
|
9278
|
"""
Profiles are templates used in other parts of the system to provide default
functionality for specific feature sets. For example, to enable DNS Relay on
an engine you must specify a DNSRelayProfile to use which defines the common
settings (or sub-settings) for that feature.
A DNS Relay Profile allows multiple DNS related mappings that can be configured.
Example usage::
>>> from smc.elements.profiles import DNSRelayProfile
>>> profile = DNSRelayProfile('mynewprofile')
.. note:: If the DNSRelayProfile does not exist, it will automatically be
created when a DNS relay rule is added to the DNSRelayProfile instance.
Add a fixed domain answer rule::
>>> profile.fixed_domain_answer.add([('microsoft3.com', 'foo.com'), ('microsoft4.com',)])
>>> profile.fixed_domain_answer.all()
[{u'domain_name': u'microsoft3.com', u'translated_domain_name': u'foo.com'}, {u'domain_name': u'microsoft4.com'}]
Translate hostnames (not fqdn) to a specific IP address::
>>> profile.hostname_mapping.add([('hostname1,hostname2', '1.1.1.12')])
>>> profile.hostname_mapping.all()
[{u'hostnames': u'hostname1,hostname2', u'ipaddress': u'1.1.1.12'}]
Translate an IP address to another::
>>> profile.dns_answer_translation.add([('12.12.12.12', '172.18.1.20')])
>>> profile.dns_answer_translation.all()
[{u'translated_ipaddress': u'172.18.1.20', u'original_ipaddress': u'12.12.12.12'}]
Specify a DNS server to handle specific domains::
>>> profile.domain_specific_dns_server.add([('myfoo.com', '172.18.1.20')])
>>> profile.domain_specific_dns_server.all()
[{u'dns_server_addresses': u'172.18.1.20', u'domain_name': u'myfoo.com'}]
"""
from smc.base.model import Element, ElementCreator
from smc.api.exceptions import ElementNotFound
from smc.base.util import element_resolver
class DNSRule(object):
"""
DNSRule is the parent class for all DNS relay rules.
"""
__slots__ = ('profile')
def __init__(self, profile):
self.profile = profile
def add(self, instance, answers):
key, left, right = instance._attr
json = [dict(zip([left, right], d))
for d in answers]
try:
self.profile.data[key].extend(json)
self.profile.update()
except ElementNotFound:
j = {'name': self.profile.name,
key: json}
return ElementCreator(self.profile.__class__, j)
def all(self):
"""
Return all entries
:rtype: list(dict)
"""
attribute = self._attr[0]
return self.profile.data.get(attribute, [])
class FixedDomainAnswer(DNSRule):
"""
Direct requests for specific domains to IPv4 addresses, IPv6
addresses, fully qualified domain names (FQDNs), or empty DNS replies
"""
_attr = ('fixed_domain_answer', 'domain_name', 'translated_domain_name')
def add(self, answers):
"""
Add a fixed domain answer. This should be a list of
two-tuples, the first entry is the domain name, and
the second is the translated domain value::
profile = DNSRelayProfile('dnsrules')
profile.fixed_domain_answer.add([
('microsoft.com', 'foo.com'), ('microsoft2.com',)])
:param answers: (domain_name, translated_domain_name)
:type answers: tuple[str, str]
:raises UpdateElementFailed: failure to add to SMC
:return: None
.. note:: translated_domain_name can be none, which will cause
the NGFW to return NXDomain for the specified domain.
"""
super(FixedDomainAnswer, self).add(self, answers)
class HostnameMapping(DNSRule):
"""
Statically map host names, aliases for host names, and unqualified
names (a host name without the domain suffix) to IPv4 or IPv6
addresses
"""
_attr = ('hostname_mapping', 'hostnames', 'ipaddress')
def add(self, answers):
"""
Map specific hostname to specified IP address. Provide a list
of two-tuples. The first entry is the hostname/s to translate
(you can provide multiple comma separated values). The second
entry should be the IP address to map the hostnames to::
profile = DNSRelayProfile('dnsrules')
profile.hostname_mapping.add([('hostname1,hostname2', '1.1.1.1')])
:param answers: (hostnames, ipaddress), hostnames can be a
comma separated list.
:type answers: tuple[str, str]
:raises UpdateElementFailed: failure to add to SMC
:return: None
"""
super(HostnameMapping, self).add(self, answers)
class DomainSpecificDNSServer(DNSRule):
"""
Forward DNS requests to different DNS servers based on
the requested domain.
"""
_attr = ('domain_specific_dns_server', 'domain_name', 'dns_server_addresses')
def add(self, answers):
"""
Relay specific domains to a specified DNS server. Provide
a list of two-tuple with first entry the domain name to relay
for. The second entry is the DNS server that should handle the
query::
profile = DNSRelayProfile('dnsrules')
profile.domain_specific_dns_server.add([('myfoo.com', '172.18.1.20')])
:param answers: (domain_name, dns_server_addresses), dns server
addresses can be a comma separated string
:type answers: tuple[str, str]
:raises UpdateElementFailed: failure to add to SMC
:return: None
"""
super(DomainSpecificDNSServer, self).add(self, answers)
class DNSAnswerTranslation(DNSRule):
"""
Map IPv4 addresses resolved by external DNS servers to IPv4
addresses in the internal network.
"""
_attr = ('dns_answer_translation', 'original_ipaddress', 'translated_ipaddress')
def add(self, answers):
"""
Takes an IPv4 address and translates to a specified IPv4 value.
Provide a list of two-tuple with the first entry providing the
original address and second entry specifying the translated address::
profile = DNSRelayProfile('dnsrules')
profile.dns_answer_translation.add([('12.12.12.12', '172.18.1.20')])
:param answers: (original_ipaddress, translated_ipaddress)
:type answers: tuple[str, str]
:raises UpdateElementFailed: failure to add to SMC
:return: None
"""
super(DNSAnswerTranslation, self).add(self, answers)
class DNSRelayProfile(Element):
"""
DNS Relay Settings specify a profile to handle how the engine will
interpret DNS queries. Stonesoft can act as a DNS relay, rewrite
DNS queries or redirect domains to the specified DNS servers.
"""
typeof = 'dns_relay_profile'
@property
def fixed_domain_answer(self):
"""
Add a fixed domain answer entry.
:rtype: FixedDomainAnswer
"""
return FixedDomainAnswer(self)
@property
def hostname_mapping(self):
"""
Add a hostname to IP mapping
:rtype: HostnameMapping
"""
return HostnameMapping(self)
@property
def domain_specific_dns_server(self):
"""
Add domain to DNS server mapping
:rtype: DomainSpecificDNSServer
"""
return DomainSpecificDNSServer(self)
@property
def dns_answer_translation(self):
"""
Add a DNS answer translation
:rtype: DNSAnswerTranslation
"""
return DNSAnswerTranslation(self)
class SNMPAgent(Element):
"""
Minimal implementation of SNMPAgent
"""
typeof = 'snmp_agent'
@classmethod
def create(cls, name, snmp_monitoring_contact=None,
snmp_monitoring_listening_port=161, snmp_version='v3',
comment=None):
json = {'boot': False,
'go_offline': False,
'go_online': False,
'hardware_alerts': False,
'name': name,
'policy_applied': False,
'shutdown': False,
'snmp_monitoring_contact': snmp_monitoring_contact,
'snmp_monitoring_listening_port': snmp_monitoring_listening_port,
'snmp_monitoring_user_name': [],
'snmp_trap_destination': [],
'snmp_user_name': [],
'snmp_version': snmp_version,
'user_login': False}
return ElementCreator(cls, json)
class SandboxService(Element):
typeof = 'sandbox_service'
@classmethod
def create(cls, name, sandbox_data_center, portal_username=None, comment=None):
"""
Create a Sandbox Service element
"""
json = {
'name': name,
'sandbox_data_center': element_resolver(sandbox_data_center),
'portal_username': portal_username if portal_username else '',
'comment': comment}
return ElementCreator(cls, json)
class SandboxDataCenter(Element):
typeof = 'sandbox_data_center'
|
apache-2.0
| -6,040,503,220,421,455,000
| 32.861314
| 117
| 0.613171
| false
| 4.223031
| false
| false
| false
|
unicef/rhizome
|
rhizome/api/resources/source_submission.py
|
1
|
1345
|
from rhizome.api.resources.base_model import BaseModelResource
from rhizome.models.document_models import SourceSubmission
class SourceSubmissionResource(BaseModelResource):
'''
**GET Request** Returns all SourceSubmissions unless an optional parameter is specified
- *Optional Parameters:*
'document_id': return only the source submissions with the specified document ids
- *Errors:*
if an incorrect document id is provided, returns an empty object list
'''
class Meta(BaseModelResource.Meta):
resource_name = 'source_submission'
object_class = SourceSubmission
# GET_params_required = ['document_id']
def apply_filters(self, request, applicable_filters):
"""
An ORM-specific implementation of ``apply_filters``.
The default simply applies the ``applicable_filters`` as ``**kwargs``,
but should make it possible to do more advanced things.
"""
## fix this in the front end to request the resourec in REST style ##
id_param = request.GET.get('id', None)
if id_param:
return self.get_object_list(request).filter(**{'id': id_param})
doc_filter = {'document_id': request.GET.get('document_id')}
return self.get_object_list(request).filter(**doc_filter)
|
agpl-3.0
| 15,860,401,860,111,412
| 39.757576
| 93
| 0.657993
| false
| 4.453642
| false
| false
| false
|
Eksmo/calibre
|
src/calibre/gui2/preferences/search_ui.py
|
1
|
13356
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file '/home/gugu/w/calibre/src/calibre/gui2/preferences/search.ui'
#
# Created: Thu Jul 19 23:32:29 2012
# by: PyQt4 UI code generator 4.9.1
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
_fromUtf8 = lambda s: s
class Ui_Form(object):
def setupUi(self, Form):
Form.setObjectName(_fromUtf8("Form"))
Form.resize(670, 663)
self.gridLayout = QtGui.QGridLayout(Form)
self.gridLayout.setObjectName(_fromUtf8("gridLayout"))
self.opt_search_as_you_type = QtGui.QCheckBox(Form)
self.opt_search_as_you_type.setObjectName(_fromUtf8("opt_search_as_you_type"))
self.gridLayout.addWidget(self.opt_search_as_you_type, 0, 0, 1, 1)
self.opt_use_primary_find_in_search = QtGui.QCheckBox(Form)
self.opt_use_primary_find_in_search.setObjectName(_fromUtf8("opt_use_primary_find_in_search"))
self.gridLayout.addWidget(self.opt_use_primary_find_in_search, 0, 1, 1, 1)
self.opt_highlight_search_matches = QtGui.QCheckBox(Form)
self.opt_highlight_search_matches.setObjectName(_fromUtf8("opt_highlight_search_matches"))
self.gridLayout.addWidget(self.opt_highlight_search_matches, 1, 0, 1, 2)
self.groupBox = QtGui.QGroupBox(Form)
self.groupBox.setObjectName(_fromUtf8("groupBox"))
self.gridLayout_2 = QtGui.QGridLayout(self.groupBox)
self.gridLayout_2.setObjectName(_fromUtf8("gridLayout_2"))
self.label = QtGui.QLabel(self.groupBox)
self.label.setWordWrap(True)
self.label.setObjectName(_fromUtf8("label"))
self.gridLayout_2.addWidget(self.label, 0, 0, 1, 2)
self.opt_limit_search_columns = QtGui.QCheckBox(self.groupBox)
self.opt_limit_search_columns.setObjectName(_fromUtf8("opt_limit_search_columns"))
self.gridLayout_2.addWidget(self.opt_limit_search_columns, 1, 0, 1, 2)
self.label_2 = QtGui.QLabel(self.groupBox)
self.label_2.setObjectName(_fromUtf8("label_2"))
self.gridLayout_2.addWidget(self.label_2, 2, 0, 1, 1)
self.opt_limit_search_columns_to = EditWithComplete(self.groupBox)
self.opt_limit_search_columns_to.setObjectName(_fromUtf8("opt_limit_search_columns_to"))
self.gridLayout_2.addWidget(self.opt_limit_search_columns_to, 2, 1, 1, 1)
self.label_3 = QtGui.QLabel(self.groupBox)
self.label_3.setWordWrap(True)
self.label_3.setObjectName(_fromUtf8("label_3"))
self.gridLayout_2.addWidget(self.label_3, 5, 0, 1, 2)
self.gridLayout.addWidget(self.groupBox, 4, 0, 1, 2)
self.groupBox_2 = QtGui.QGroupBox(Form)
self.groupBox_2.setObjectName(_fromUtf8("groupBox_2"))
self.gridLayout_3 = QtGui.QGridLayout(self.groupBox_2)
self.gridLayout_3.setObjectName(_fromUtf8("gridLayout_3"))
self.l12 = QtGui.QHBoxLayout()
self.l12.setObjectName(_fromUtf8("l12"))
self.la10 = QtGui.QLabel(self.groupBox_2)
self.la10.setObjectName(_fromUtf8("la10"))
self.l12.addWidget(self.la10)
self.gst_names = QtGui.QComboBox(self.groupBox_2)
self.gst_names.setEditable(True)
self.gst_names.setMinimumContentsLength(10)
self.gst_names.setObjectName(_fromUtf8("gst_names"))
self.l12.addWidget(self.gst_names)
self.gst_delete_button = QtGui.QToolButton(self.groupBox_2)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(_fromUtf8(I("trash.png"))), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.gst_delete_button.setIcon(icon)
self.gst_delete_button.setObjectName(_fromUtf8("gst_delete_button"))
self.l12.addWidget(self.gst_delete_button)
self.gst_value = EditWithComplete(self.groupBox_2)
self.gst_value.setObjectName(_fromUtf8("gst_value"))
self.l12.addWidget(self.gst_value)
self.gst_save_button = QtGui.QToolButton(self.groupBox_2)
self.gst_save_button.setObjectName(_fromUtf8("gst_save_button"))
self.l12.addWidget(self.gst_save_button)
self.gridLayout_3.addLayout(self.l12, 0, 0, 1, 1)
self.gst_explanation = QtGui.QTextBrowser(self.groupBox_2)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(100)
sizePolicy.setHeightForWidth(self.gst_explanation.sizePolicy().hasHeightForWidth())
self.gst_explanation.setSizePolicy(sizePolicy)
self.gst_explanation.setObjectName(_fromUtf8("gst_explanation"))
self.gridLayout_3.addWidget(self.gst_explanation, 0, 1, 3, 1)
self.hboxlayout = QtGui.QHBoxLayout()
self.hboxlayout.setObjectName(_fromUtf8("hboxlayout"))
self.l11 = QtGui.QLabel(self.groupBox_2)
self.l11.setObjectName(_fromUtf8("l11"))
self.hboxlayout.addWidget(self.l11)
self.opt_grouped_search_make_user_categories = EditWithComplete(self.groupBox_2)
self.opt_grouped_search_make_user_categories.setObjectName(_fromUtf8("opt_grouped_search_make_user_categories"))
self.hboxlayout.addWidget(self.opt_grouped_search_make_user_categories)
self.gridLayout_3.addLayout(self.hboxlayout, 1, 0, 1, 1)
spacerItem = QtGui.QSpacerItem(20, 40, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
self.gridLayout_3.addItem(spacerItem, 2, 0, 1, 1)
self.gridLayout.addWidget(self.groupBox_2, 6, 0, 1, 2)
self.clear_history_button = QtGui.QPushButton(Form)
self.clear_history_button.setObjectName(_fromUtf8("clear_history_button"))
self.gridLayout.addWidget(self.clear_history_button, 5, 0, 1, 2)
self.groupBox22 = QtGui.QGroupBox(Form)
self.groupBox22.setObjectName(_fromUtf8("groupBox22"))
self.gridLayout_22 = QtGui.QGridLayout(self.groupBox22)
self.gridLayout_22.setObjectName(_fromUtf8("gridLayout_22"))
self.label1 = QtGui.QLabel(self.groupBox22)
self.label1.setWordWrap(True)
self.label1.setObjectName(_fromUtf8("label1"))
self.gridLayout_22.addWidget(self.label1, 0, 0, 1, 6)
self.label_221 = QtGui.QLabel(self.groupBox22)
self.label_221.setObjectName(_fromUtf8("label_221"))
self.gridLayout_22.addWidget(self.label_221, 1, 0, 1, 1)
self.similar_authors_search_key = QtGui.QComboBox(self.groupBox22)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(10)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.similar_authors_search_key.sizePolicy().hasHeightForWidth())
self.similar_authors_search_key.setSizePolicy(sizePolicy)
self.similar_authors_search_key.setObjectName(_fromUtf8("similar_authors_search_key"))
self.gridLayout_22.addWidget(self.similar_authors_search_key, 1, 1, 1, 1)
self.opt_similar_authors_match_kind = QtGui.QComboBox(self.groupBox22)
self.opt_similar_authors_match_kind.setObjectName(_fromUtf8("opt_similar_authors_match_kind"))
self.gridLayout_22.addWidget(self.opt_similar_authors_match_kind, 1, 2, 1, 1)
self.label_222 = QtGui.QLabel(self.groupBox22)
self.label_222.setObjectName(_fromUtf8("label_222"))
self.gridLayout_22.addWidget(self.label_222, 1, 3, 1, 1)
self.similar_series_search_key = QtGui.QComboBox(self.groupBox22)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(10)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.similar_series_search_key.sizePolicy().hasHeightForWidth())
self.similar_series_search_key.setSizePolicy(sizePolicy)
self.similar_series_search_key.setObjectName(_fromUtf8("similar_series_search_key"))
self.gridLayout_22.addWidget(self.similar_series_search_key, 1, 4, 1, 1)
self.opt_similar_series_match_kind = QtGui.QComboBox(self.groupBox22)
self.opt_similar_series_match_kind.setObjectName(_fromUtf8("opt_similar_series_match_kind"))
self.gridLayout_22.addWidget(self.opt_similar_series_match_kind, 1, 5, 1, 1)
self.label_223 = QtGui.QLabel(self.groupBox22)
self.label_223.setObjectName(_fromUtf8("label_223"))
self.gridLayout_22.addWidget(self.label_223, 2, 0, 1, 1)
self.similar_tags_search_key = QtGui.QComboBox(self.groupBox22)
self.similar_tags_search_key.setObjectName(_fromUtf8("similar_tags_search_key"))
self.gridLayout_22.addWidget(self.similar_tags_search_key, 2, 1, 1, 1)
self.opt_similar_tags_match_kind = QtGui.QComboBox(self.groupBox22)
self.opt_similar_tags_match_kind.setObjectName(_fromUtf8("opt_similar_tags_match_kind"))
self.gridLayout_22.addWidget(self.opt_similar_tags_match_kind, 2, 2, 1, 1)
self.label_224 = QtGui.QLabel(self.groupBox22)
self.label_224.setObjectName(_fromUtf8("label_224"))
self.gridLayout_22.addWidget(self.label_224, 2, 3, 1, 1)
self.similar_publisher_search_key = QtGui.QComboBox(self.groupBox22)
self.similar_publisher_search_key.setObjectName(_fromUtf8("similar_publisher_search_key"))
self.gridLayout_22.addWidget(self.similar_publisher_search_key, 2, 4, 1, 1)
self.opt_similar_publisher_match_kind = QtGui.QComboBox(self.groupBox22)
self.opt_similar_publisher_match_kind.setObjectName(_fromUtf8("opt_similar_publisher_match_kind"))
self.gridLayout_22.addWidget(self.opt_similar_publisher_match_kind, 2, 5, 1, 1)
self.gridLayout.addWidget(self.groupBox22, 7, 0, 1, 2)
self.label_2.setBuddy(self.opt_limit_search_columns_to)
self.la10.setBuddy(self.gst_names)
self.l11.setBuddy(self.opt_grouped_search_make_user_categories)
self.retranslateUi(Form)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
Form.setWindowTitle(_("Form"))
self.opt_search_as_you_type.setText(_("Search as you &type"))
self.opt_use_primary_find_in_search.setText(_("Unaccented characters match accented characters"))
self.opt_highlight_search_matches.setText(_("&Highlight search results instead of restricting the book list to the results"))
self.groupBox.setTitle(_("What to search by default"))
self.label.setText(_("When you enter a search term without a prefix, by default calibre will search all metadata for matches. For example, entering, \"asimov\" will search not just authors but title/tags/series/comments/etc. Use these options if you would like to change this behavior."))
self.opt_limit_search_columns.setText(_("&Limit the searched metadata"))
self.label_2.setText(_("&Columns that non-prefixed searches are limited to:"))
self.label_3.setText(_("Note that this option affects all searches, including saved searches and restrictions. Therefore, if you use this option, it is best to ensure that you always use prefixes in your saved searches. For example, use \"series:Foundation\" rather than just \"Foundation\" in a saved search"))
self.groupBox_2.setTitle(_("Grouped Search Terms"))
self.la10.setText(_("&Names:"))
self.gst_names.setToolTip(_("Contains the names of the currently-defined group search terms.\n"
"Create a new name by entering it into the empty box, then\n"
"pressing Save. Rename a search term by selecting it then\n"
"changing the name and pressing Save. Change the value of\n"
"a search term by changing the value box then pressing Save."))
self.gst_delete_button.setToolTip(_("Delete the current search term"))
self.gst_delete_button.setText(_("..."))
self.gst_save_button.setToolTip(_("Save the current search term. You can rename a search term by\n"
"changing the name then pressing Save. You can change the value\n"
"of a search term by changing the value box then pressing Save."))
self.gst_save_button.setText(_("&Save"))
self.l11.setText(_("Make &user categories from:"))
self.opt_grouped_search_make_user_categories.setToolTip(_("Enter the names of any grouped search terms you wish\n"
"to be shown as user categories"))
self.clear_history_button.setToolTip(_("Clear search histories from all over calibre. Including the book list, e-book viewer, fetch news dialog, etc."))
self.clear_history_button.setText(_("Clear search &histories"))
self.groupBox22.setTitle(_("What to search when searching similar books"))
self.label1.setText(_("<p>When you search for similar books by right clicking the\n"
" book and selecting \"Similar books...\",\n"
" calibre constructs a search using the column lookup names specified below.\n"
" By changing the lookup name to a grouped search term you can\n"
" search multiple columns at once.</p>"))
self.label_221.setText(_("Similar authors: "))
self.label_222.setText(_("Similar series: "))
self.label_223.setText(_("Similar tags: "))
self.label_224.setText(_("Similar publishers: "))
from calibre.gui2.complete2 import EditWithComplete
|
gpl-3.0
| 2,683,885,192,424,116,700
| 63.834951
| 320
| 0.699386
| false
| 3.60973
| false
| false
| false
|
RobRuana/sideboard
|
sideboard/internal/logging.py
|
1
|
1819
|
from __future__ import unicode_literals, absolute_import
import os
import logging.config
import logging_unterpolation
from sideboard.config import config
class IndentMultilinesLogFormatter(logging.Formatter):
"""
Provide a formatter (unused by default) which adds indentation to messages
which are split across multiple lines.
"""
def format(self, record):
s = super(IndentMultilinesLogFormatter, self).format(record)
# indent all lines that start with a newline so they are easier for external log programs to parse
s = s.rstrip('\n').replace('\n', '\n ')
return s
def _configure_logging():
logging_unterpolation.patch_logging()
fname = '/etc/sideboard/logging.cfg'
if os.path.exists(fname):
logging.config.fileConfig(fname, disable_existing_loggers=True)
else:
# ConfigObj doesn't support interpolation escaping, so we manually work around it here
formatters = config['formatters'].dict()
for formatter in formatters.values():
formatter['format'] = formatter['format'].replace('$$', '%')
formatter['datefmt'] = formatter['datefmt'].replace('$$', '%') or None
formatters['indent_multiline'] = {
'()': IndentMultilinesLogFormatter,
'format': formatters['default']['format']
}
logging.config.dictConfig({
'version': 1,
'root': {
'level': config['loggers']['root'],
'handlers': config['handlers'].dict().keys()
},
'loggers': {
name: {'level': level}
for name, level in config['loggers'].items() if name != 'root'
},
'handlers': config['handlers'].dict(),
'formatters': formatters
})
|
bsd-3-clause
| -6,979,212,933,264,625,000
| 35.38
| 106
| 0.59978
| false
| 4.558897
| true
| false
| false
|
jolyonb/edx-platform
|
openedx/core/djangoapps/user_api/accounts/tests/test_image_helpers.py
|
1
|
2916
|
"""
Tests for helpers.py
"""
from __future__ import absolute_import
import datetime
import hashlib
from django.test import TestCase
from mock import patch
from pytz import UTC
from openedx.core.djangolib.testing.utils import skip_unless_lms
from student.tests.factories import UserFactory
from ..image_helpers import get_profile_image_urls_for_user
TEST_SIZES = {'full': 50, 'small': 10}
TEST_PROFILE_IMAGE_UPLOAD_DT = datetime.datetime(2002, 1, 9, 15, 43, 1, tzinfo=UTC)
@patch.dict('django.conf.settings.PROFILE_IMAGE_SIZES_MAP', TEST_SIZES, clear=True)
@skip_unless_lms
class ProfileImageUrlTestCase(TestCase):
"""
Tests for profile image URL generation helpers.
"""
def setUp(self):
super(ProfileImageUrlTestCase, self).setUp()
self.user = UserFactory()
# Ensure that parental controls don't apply to this user
self.user.profile.year_of_birth = 1980
self.user.profile.profile_image_uploaded_at = TEST_PROFILE_IMAGE_UPLOAD_DT
self.user.profile.save()
def verify_url(self, actual_url, expected_name, expected_pixels, expected_version):
"""
Verify correct url structure.
"""
self.assertEqual(
actual_url,
'http://example-storage.com/profile-images/{name}_{size}.jpg?v={version}'.format(
name=expected_name, size=expected_pixels, version=expected_version
)
)
def verify_default_url(self, actual_url, expected_pixels):
"""
Verify correct url structure for a default profile image.
"""
self.assertEqual(
actual_url,
'/static/default_{size}.png'.format(size=expected_pixels)
)
def verify_urls(self, actual_urls, expected_name, is_default=False):
"""
Verify correct url dictionary structure.
"""
self.assertEqual(set(TEST_SIZES.keys()), set(actual_urls.keys()))
for size_display_name, url in actual_urls.items():
if is_default:
self.verify_default_url(url, TEST_SIZES[size_display_name])
else:
self.verify_url(
url, expected_name, TEST_SIZES[size_display_name], TEST_PROFILE_IMAGE_UPLOAD_DT.strftime("%s")
)
def test_get_profile_image_urls(self):
"""
Tests `get_profile_image_urls_for_user`
"""
self.user.profile.profile_image_uploaded_at = TEST_PROFILE_IMAGE_UPLOAD_DT
self.user.profile.save()
expected_name = hashlib.md5('secret' + self.user.username).hexdigest()
actual_urls = get_profile_image_urls_for_user(self.user)
self.verify_urls(actual_urls, expected_name, is_default=False)
self.user.profile.profile_image_uploaded_at = None
self.user.profile.save()
self.verify_urls(get_profile_image_urls_for_user(self.user), 'default', is_default=True)
|
agpl-3.0
| -586,315,608,555,252,000
| 34.560976
| 114
| 0.641632
| false
| 3.762581
| true
| false
| false
|
MarkusHackspacher/unknown-horizons
|
horizons/world/buildability/potentialroadconnectivitycache.py
|
1
|
3725
|
# ###################################################
# Copyright (C) 2008-2017 The Unknown Horizons Team
# team@unknown-horizons.org
# This file is part of Unknown Horizons.
#
# Unknown Horizons is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the
# Free Software Foundation, Inc.,
# 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
# ###################################################
from horizons.ai.aiplayer.constants import BUILDING_PURPOSE
from horizons.world.buildability.connectedareacache import ConnectedAreaCache
class PotentialRoadConnectivityCache:
"""
Query whether a toad connection between two sets of coordinates is possible.
This class is used by the AI to figure out whether it might be possible to build a
road between two sets of coordinates. Potentially because the area contains some part
of the AI's plan and the land it has a plan for may be either owned by the AI or not
yet owned by anyone.
"""
def __init__(self, area_builder):
self._area_builder = area_builder
self._land_manager = area_builder.land_manager
self._settlement_ground_map = area_builder.settlement.ground_map
self._cache = ConnectedAreaCache()
self.area_numbers = self._cache.area_numbers # {(x, y): area id, ...}
def modify_area(self, coords_list):
"""
Refresh the usability of the coordinates in the given list.
This function is called with a list of coordinates on which the possibility of
building a road may have changed. It figures out whether it is possible to build
a road on (x, y) and updates the underlying ConnectedAreaCache accordingly.
"""
add_list = []
remove_list = []
for coords in coords_list:
if coords not in self._settlement_ground_map:
if coords in self.area_numbers:
remove_list.append(coords)
elif coords in self._land_manager.coastline:
if coords in self.area_numbers:
remove_list.append(coords)
elif coords in self._land_manager.roads:
if coords not in self.area_numbers:
add_list.append(coords)
elif coords in self._area_builder.plan:
if self._area_builder.plan[coords][0] == BUILDING_PURPOSE.NONE:
if coords not in self.area_numbers:
add_list.append(coords)
else:
assert self._area_builder.plan[coords][0] != BUILDING_PURPOSE.ROAD
if coords in self.area_numbers:
remove_list.append(coords)
else:
if coords in self.area_numbers:
remove_list.append(coords)
if add_list:
self._cache.add_area(add_list)
if remove_list:
self._cache.remove_area(remove_list)
def is_connection_possible(self, coords_set1, coords_set2):
"""Return True if and only if it is possible to connect the two coordinate sets.
More specifically, it returns True if and only if it is possible to build a toad
from some (x1, y1) in coords_set1 to some (x2, y2) in coords_set2 entirely within
the area. This is done cheaply using the underlying ConnectedAreaCache.
"""
areas1 = set()
for coords in coords_set1:
if coords in self.area_numbers:
areas1.add(self.area_numbers[coords])
for coords in coords_set2:
if coords in self.area_numbers:
if self.area_numbers[coords] in areas1:
return True
return False
|
gpl-2.0
| -8,918,137,794,307,005,000
| 37.010204
| 86
| 0.713826
| false
| 3.527462
| false
| false
| false
|
beeftornado/sentry
|
src/sentry/integrations/msteams/utils.py
|
1
|
3654
|
from __future__ import absolute_import
import six
import logging
import enum
from django.http import Http404
from sentry.models import (
Integration,
Organization,
IdentityProvider,
)
from sentry.shared_integrations.exceptions import ApiError
from sentry.utils.compat import filter
from .client import MsTeamsClient, MsTeamsPreInstallClient, get_token_data
MSTEAMS_MAX_ITERS = 100
logger = logging.getLogger("sentry.integrations.msteams")
# MS Teams will convert integers into strings in value inputs sent in adaptive
# cards, may as well just do that here first.
class ACTION_TYPE(six.text_type, enum.Enum):
RESOLVE = "1"
IGNORE = "2"
ASSIGN = "3"
UNRESOLVE = "4"
UNASSIGN = "5"
def channel_filter(channel, name):
# the general channel has no name in the list
# retrieved from the REST API call
if channel.get("name"):
return name.lower() == channel.get("name").lower()
else:
return name.lower() == "general"
def get_channel_id(organization, integration_id, name):
try:
integration = Integration.objects.get(
provider="msteams", organizations=organization, id=integration_id
)
except Integration.DoesNotExist:
return None
team_id = integration.external_id
client = MsTeamsClient(integration)
# handle searching for channels first
channel_list = client.get_channel_list(team_id)
filtered_channels = list(filter(lambda x: channel_filter(x, name), channel_list))
if len(filtered_channels) > 0:
return filtered_channels[0].get("id")
# handle searching for users
members = client.get_member_list(team_id, None)
for i in range(MSTEAMS_MAX_ITERS):
member_list = members.get("members")
continuation_token = members.get("continuationToken")
filtered_members = list(
filter(lambda x: x.get("name").lower() == name.lower(), member_list)
)
if len(filtered_members) > 0:
# TODO: handle duplicate username case
user_id = filtered_members[0].get("id")
tenant_id = filtered_members[0].get("tenantId")
return client.get_user_conversation_id(user_id, tenant_id)
if not continuation_token:
return None
members = client.get_member_list(team_id, continuation_token)
return None
def send_incident_alert_notification(action, incident, metric_value):
from .card_builder import build_incident_attachment
channel = action.target_identifier
integration = action.integration
attachment = build_incident_attachment(incident, metric_value)
client = MsTeamsClient(integration)
try:
client.send_card(channel, attachment)
except ApiError as e:
logger.info("rule.fail.msteams_post", extra={"error": six.text_type(e)})
def get_identity(user, organization_id, integration_id):
try:
organization = Organization.objects.get(id__in=user.get_orgs(), id=organization_id)
except Organization.DoesNotExist:
raise Http404
try:
integration = Integration.objects.get(id=integration_id, organizations=organization)
except Integration.DoesNotExist:
raise Http404
try:
idp = IdentityProvider.objects.get(external_id=integration.external_id, type="msteams")
except IdentityProvider.DoesNotExist:
raise Http404
return organization, integration, idp
def get_preinstall_client(service_url):
# may want try/catch here since this makes an external API call
access_token = get_token_data()["access_token"]
return MsTeamsPreInstallClient(access_token, service_url)
|
bsd-3-clause
| 6,982,754,459,844,222,000
| 29.966102
| 95
| 0.688013
| false
| 3.883103
| false
| false
| false
|
shnergle/ShnergleServer
|
api/util.py
|
1
|
5137
|
import calendar
import datetime
import functools
import json
import os
import time
import cherrypy
import pyodbc
def connect(thread_index):
cherrypy.thread_data.db = pyodbc.connect(os.environ['DATABASE'])
current_dir = os.path.dirname(os.path.abspath(__file__))
cherrypy.thread_data.placeholder_image = open(os.path.join(current_dir, 'placeholder.png'), 'rb').read()
def dont_cache():
cherrypy.response.headers['Expires'] = datetime.datetime.utcnow().strftime(
'%a, %d %b %Y %H:%M:%S GMT')
cherrypy.response.headers['Cache-Control'] = ('no-store, '
'no-cache, '
'must-revalidate, '
'post-check=0, '
'pre-check=0')
def protect(func):
@functools.wraps(func)
def decorator(*args, **kwargs):
if kwargs.pop('app_secret', False) != os.environ['APP_SECRET']:
raise cherrypy.HTTPError(403)
return func(*args, **kwargs)
return decorator
def auth(func):
@functools.wraps(func)
def decorator(*args, **kwargs):
if not kwargs.get('facebook_id', False):
raise cherrypy.HTTPError(403)
cursor = kwargs['cursor']
qry = {'select': 'id',
'table': 'users',
'where': 'facebook_id = ?',
'order_by': 'id',
'limit': 1}
cursor.execute(query(**qry), (kwargs['facebook_id'],))
res = cursor.fetchone().id
if not res:
raise cherrypy.HTTPError(403)
kwargs.update(user_id=res)
return func(*args, **kwargs)
return decorator
def jsonp(func):
@functools.wraps(func)
def decorator(*args, **kwargs):
dont_cache()
res = json.dumps(func(*args, **kwargs), separators=(',', ':'),
default=lambda o: str(o))
callback = kwargs.pop('callback', False)
if callback:
cherrypy.response.headers['Content-Type'] = ('text/javascript; '
'charset=utf-8')
res = callback + '(' + res + ');'
else:
cherrypy.response.headers['Content-Type'] = 'application/json'
return res
return decorator
def db(func):
@functools.wraps(func)
def decorator(*args, **kwargs):
cursor = cherrypy.thread_data.db.cursor()
kwargs.update(cursor=cursor)
try:
res = func(*args, **kwargs)
finally:
cursor.commit()
cursor.close()
return res
return decorator
def implode(glue, list):
return list if isinstance(list, str) else glue.join(list)
def query(select=None, table=None, left_join=None, on=None, where=None,
delete=None,
group_by=None, order_by=None, limit=None,
insert_into=None, columns=None,
update=None, set_values=None,
last_id=False):
if select:
qry = 'SELECT ' + implode(', ', select)
if table:
qry += ' FROM ' + implode(', ', table)
if left_join and on:
if isinstance(left_join, str):
left_join = [left_join]
if isinstance(on, str):
on = [on]
for j, o in zip(left_join, on):
qry += ' LEFT JOIN ' + j + ' ON ' + o
if where:
qry += ' WHERE ' + implode(' AND ', where)
if group_by:
qry += ' GROUP BY ' + implode(', ', group_by)
if order_by:
qry += ' ORDER BY ' + implode(', ', order_by)
if limit:
if isinstance(limit, str) or isinstance(limit, int):
qry += ' OFFSET 0 ROWS FETCH NEXT ' + str(limit) + ' ROWS ONLY'
else:
qry += (' OFFSET ' + str(int(limit[0])) + ' ROWS FETCH NEXT ' +
str(int(limit[1])) + ' ROWS ONLY')
elif delete:
qry = 'DELETE FROM ' + delete + ' WHERE ' + implode(' AND ', where)
elif insert_into:
qry = 'INSERT INTO ' + insert_into
if columns:
qry += (' (' + implode(', ', columns) + ')' + ' VALUES (' +
('?' + ', ?' * (len(columns) - 1)) + ')')
elif update:
qry = 'UPDATE ' + update
if set_values:
qry += ' SET ' + implode(' = ?, ', set_values) + ' = ?'
if where:
qry += ' WHERE ' + implode(' AND ', where)
elif last_id:
qry = 'SELECT @@Identity AS [identity]'
return qry
expose = cherrypy.expose
def to_int(value):
return int(value) if value else None
def to_bool(value):
if value is None:
return None
if not value:
return False
if value in ('none', 'false', 'no', 'off', '0'):
return False
return True
def to_float(value):
return float(value) if value else None
def row_to_dict(cursor, row):
return {t[0]: val for t, val in zip(cursor.description, row)}
def now():
return calendar.timegm(datetime.datetime.utcnow().utctimetuple())
|
mit
| 8,012,379,061,196,236,000
| 30.133333
| 108
| 0.512167
| false
| 3.819331
| false
| false
| false
|
WheatonCS/Lexos
|
lexos/receivers/rolling_window_receiver.py
|
1
|
7808
|
"""This is the receiver for rolling windows analysis model."""
import pandas as pd
from enum import Enum
from typing import NamedTuple, Optional, List
from lexos.receivers.base_receiver import BaseReceiver
from lexos.managers.utility import load_file_manager
class RWATokenType(Enum):
"""This type specify what kind of token (or term) to find in a window."""
string = "string"
regex = "regex"
word = "word"
class WindowUnitType(Enum):
"""This type specify what is the unit of each window.
Say it is letter, each window consist of `window_size` number of letters.
"""
letter = "letter"
word = "word"
line = "line"
class RWAWindowOptions(NamedTuple):
"""The options related to window creation."""
# The size of the window.
window_size: int
# The unit of the window, see WindowUnitType for more detail.
window_unit: WindowUnitType
class RWARatioTokenOptions(NamedTuple):
"""The option if you choose to count by ratio."""
# The type of the token, see RWATokenType for more detail.
token_type: RWATokenType
# The frame saves token count as list of numerator and token count as list
# of denominator.
token_frame: pd.DataFrame
class RWAAverageTokenOptions(NamedTuple):
"""The options if you choose to count by average."""
# The type of the token, see RWATokenType for more detail.
token_type: RWATokenType
# A list of tokens to count.
tokens: List[str]
class RWAPlotOptions(NamedTuple):
"""The option for adjusting plotly result."""
# Show individual points if true.
individual_points: bool
# Return plot in black-white scale if true.
black_white: bool
class RWAFrontEndOptions(NamedTuple):
"""All the options to get from the front end."""
# The options if you choose ratio count,
# it will be None if you did not choose ratio.
ratio_token_options: Optional[RWARatioTokenOptions]
# The option if you choose average count
# it will be None if you did not choose Average.
average_token_options: Optional[RWAAverageTokenOptions]
# The id of the passage to run rolling window.
passage_file_id: int
# The setting related to the windows.
window_options: RWAWindowOptions
# The settings related to the plot result.
plot_options: RWAPlotOptions
# A milestone, it is none if it is not given from frontend.
milestone: Optional[str]
# The color to use
text_color: str
class RollingWindowsReceiver(BaseReceiver):
"""Get all the options to generate rolling windows result."""
def _get_ratio_token_options(self) -> RWARatioTokenOptions:
"""Get all the options to generate ratio count."""
raw_numerator = self._front_end_data['search_term']
raw_denominator = self._front_end_data['search_term_denominator']
if self._front_end_data['input_type'] == 'Strings':
token_type = RWATokenType.string
numerator_token = raw_numerator.split(",")
denominator_token = raw_denominator.split(",")
elif self._front_end_data['input_type'] == 'Regex':
token_type = RWATokenType.regex
numerator_token = raw_numerator.split(",")
denominator_token = raw_denominator.split(",")
elif self._front_end_data['input_type'] == 'Words':
token_type = RWATokenType.word
numerator_token = [token.strip()
for token in raw_numerator.split(",")]
denominator_token = [token.strip()
for token in raw_denominator.split(",")]
else:
raise ValueError("invalid token type from front end")
# Pack data in a data frame.
token_frame = pd.DataFrame(
data={
"numerator": numerator_token,
"denominator": denominator_token,
}
)
return RWARatioTokenOptions(token_type=token_type,
token_frame=token_frame)
def _get_average_token_options(self) -> RWAAverageTokenOptions:
"""Get all the options to generate average count."""
# the unprocessed token
raw_token = self._front_end_data['search_term']
if self._front_end_data['input_type'] == 'Strings':
token_type = RWATokenType.string
tokens = raw_token.split(',')
elif self._front_end_data['input_type'] == 'Regex':
token_type = RWATokenType.regex
tokens = raw_token.split(',')
elif self._front_end_data['input_type'] == 'Words':
token_type = RWATokenType.word
tokens = [token.strip() for token in raw_token.split(',')]
else:
raise ValueError("invalid token type from front end")
return RWAAverageTokenOptions(token_type=token_type, tokens=tokens)
def _get_window_option(self) -> RWAWindowOptions:
"""Get all the option for windows."""
if self._front_end_data['window_type'] == 'Characters':
window_unit = WindowUnitType.letter
elif self._front_end_data['window_type'] == 'Words':
window_unit = WindowUnitType.word
elif self._front_end_data['window_type'] == 'Lines':
window_unit = WindowUnitType.line
else:
raise ValueError("invalid window unit from front end")
window_size = int(self._front_end_data['window_size'])
return RWAWindowOptions(window_size=window_size,
window_unit=window_unit)
def _get_milestone(self) -> Optional[List[str]]:
"""Get the milestone string from front end and split it into words."""
if 'enable_milestone' not in self._front_end_data:
return None
else:
raw_mile_stones = self._front_end_data['milestone']
return [mile_stone.strip()
for mile_stone in raw_mile_stones.split(",")]
def _get_passage_file_id(self) -> int:
"""Get the file id for the passage to run rolling window."""
return load_file_manager().get_active_files()[0].id
def _get_plot_option(self) -> RWAPlotOptions:
"""Get the plot option from front end."""
individual_points = True if 'show_points' \
in self._front_end_data else False
black_white = True if 'black_and_white' \
in self._front_end_data else False
return RWAPlotOptions(individual_points=individual_points,
black_white=black_white)
def options_from_front_end(self) -> RWAFrontEndOptions:
"""Pack all the front end options together."""
if self._front_end_data['calculation_type'] == 'Rolling Ratio':
return RWAFrontEndOptions(
average_token_options=None,
ratio_token_options=self._get_ratio_token_options(),
window_options=self._get_window_option(),
plot_options=self._get_plot_option(),
milestone=self._get_milestone(),
passage_file_id=self._get_passage_file_id(),
text_color=self._front_end_data["text_color"]
)
elif self._front_end_data['calculation_type'] == 'Rolling Average':
return RWAFrontEndOptions(
average_token_options=self._get_average_token_options(),
ratio_token_options=None,
window_options=self._get_window_option(),
plot_options=self._get_plot_option(),
milestone=self._get_milestone(),
passage_file_id=self._get_passage_file_id(),
text_color=self._front_end_data["text_color"]
)
else:
raise ValueError("invalid count type from front end")
|
mit
| 428,797,673,821,050,750
| 34.981567
| 78
| 0.615907
| false
| 4.053998
| false
| false
| false
|
alexandresobolevski/yahoo_ff
|
yahoo_ff/tools/scrapingTools.py
|
1
|
2687
|
from urllib.request import urlopen
import time
import numpy as np
BASE_URL = 'https://ca.finance.yahoo.com/'
powers = {'%': 10 ** (-2), 'M': 10 ** 6, 'B': 10 ** 9, 'T': 10 ** 12}
def getUnixTime (dateTime):
return int(time.mktime(dateTime.timetuple()))
def parse_powers(x):
power = x[-1]
if (power in powers.keys()):
return float(x[:-1]) * powers[power]
else :
return x
def float_or_none(x):
x = x.replace(',','')
try:
# if negative value (1000)
if x[0]=='(' and x[-1]==')':
return -float(x[1:-2])
else:
return float(x)
except: return None
def scrape_report(source_code, information):
return parse_table(find_section(source_code, information))
def get_annual_is_url(stock):
return BASE_URL + '/q/is?s=' + stock + '&annual'
def get_quarterly_is_url(stock):
return BASE_URL + '/q/is?s=' + stock
def get_annual_bs_url(stock):
return BASE_URL + '/q/bs?s=' + stock + '&annual'
def get_quarterly_bs_url(stock):
return BASE_URL + '/q/bs?s=' + stock
def get_annual_cf_url(stock):
return BASE_URL + '/q/cf?s=' + stock + '&annual'
def get_quarterly_cf_url(stock):
return BASE_URL + '/q/cf?s=' + stock
def get_stockinfo_url(stock):
return BASE_URL + '/q/pr?s=' + stock + '+Profile'
def get_keystats_url(stock):
return BASE_URL + '/q/ks?s=' + stock
def get_source_code(url):
return urlopen(url).read().decode()
def parse_table(source_code):
source_code = source_code.split('</td></tr>')[0]
source_code = source_code.replace('<strong>', '')
source_code = source_code.replace('</strong>', '')
source_code = source_code.replace('\n', '')
source_code = source_code.replace(' ', '')
source_code = source_code.replace('<td align="right">','')
source_code = source_code.replace(' ', '')
source_code = source_code.split('</td>')
source_code = filter(None, source_code)
return [float_or_none(x.replace(',', '')) for x in source_code]
def find_section(source_code, section_name):
try:
return source_code.split(section_name)[1]
except:
print('failed acquiring ' + section_name)
def scrape_company_infos(source_code, field):
return [source_code.split(field+':')[1].split('</td>')[1].replace('</a>','').split('>')[-1]]
def scrape_key_stats(source_code, field):
try:
return [parse_powers(source_code.split(field)[1].split('</td></tr>')[0].replace('</span>', '').split('>')[-1])]
except:
return [np.nan]
def get_current_price(source_code):
return {'Price': [float_or_none(source_code.split('time_rtq_ticker')[1].split('span')[1].split('>')[1].split('<')[0])]}
|
mit
| 3,454,508,166,530,037,000
| 29.885057
| 123
| 0.599181
| false
| 3.036158
| false
| false
| false
|
totoro-zhang/hello-word
|
spider/distributespider/myfirstSpider_URLManager.py
|
1
|
2465
|
#coding:utf-8
import _compat_pickle
import hashlib
class UrlManager(object):
def __init__(self):
#self.new_urls = set()
#self.old_urls = set()
self.new_urls = self.load_progress('new_urls.txt')#未爬取的url集合
self.old_urls = self.load_progress('old_urls.txt')#已爬取的URL集合
def has_new_url(self):
'''
判断是否有未爬取的URL
:return:
'''
return self.new_url_size() != 0
def get_new_url(self):
'''
获取一个未爬取的URL
:return:
'''
new_url = self.new_urls.pop()
m = hashlib.md5()
m.update(new_url.encode("utf8"))
self.old_urls.add(m.hexdigest()[8:-8])
return new_url
def add_new_url(self,url):
'''
将新的URL添加到未爬取的URL集合中
:return:
'''
if url is None:
return
m = hashlib.md5()
m.update(url.encode("utf8"))
url_md5 = m.hexdigest()[8:-8]
if url not in self.new_urls and url_md5 not in self.old_urls:
self.new_urls.add(url)
def add_new_urls(self,urls):
'''
将新的URLs添加到未爬取的URL集合中
:param urls :url 集合
:param urls:
:return:
'''
if urls is None or len(urls)== 0 :
return
for url in urls:
self.add_new_url(url)
def new_url_size(self):
'''
获取url集合大小
:return:
'''
return len(self.new_urls)
def old_url_size(self):
'''
获取已经爬取的URL集合的大小
:return:
'''
return len(self.old_urls)
def save_progress(selfs,path,data):
'''
:param path:文件路径
:param data: 数据
:return:
'''
with open(path,'wb') as f:
_compat_pickle.dump(data,f)
def load_progress(self,path):
'''
从本地加载文件进度
:param path: 文件路径
:return: 返回set集合
'''
print('[+] 从文件加载进度: %s'%path)
try:
with open(path,'rb') as f:
tmp = _compat_pickle.load(f)
return tmp
except:
print('[!]无进度文件,创建:%s'%path)
return set()
|
unlicense
| -7,736,062,366,020,673,000
| 22.833333
| 69
| 0.46264
| false
| 2.983979
| false
| false
| false
|
Azure/azure-sdk-for-python
|
sdk/storage/azure-mgmt-storagesync/azure/mgmt/storagesync/aio/operations/_operation_status_operations.py
|
1
|
5447
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class OperationStatusOperations:
"""OperationStatusOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.storagesync.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def get(
self,
resource_group_name: str,
location_name: str,
workflow_id: str,
operation_id: str,
**kwargs
) -> "_models.OperationStatus":
"""Get Operation status.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param location_name: The desired region to obtain information from.
:type location_name: str
:param workflow_id: workflow Id.
:type workflow_id: str
:param operation_id: operation Id.
:type operation_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: OperationStatus, or the result of cls(response)
:rtype: ~azure.mgmt.storagesync.models.OperationStatus
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.OperationStatus"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-09-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'locationName': self._serialize.url("location_name", location_name, 'str'),
'workflowId': self._serialize.url("workflow_id", workflow_id, 'str'),
'operationId': self._serialize.url("operation_id", operation_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.StorageSyncError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
response_headers = {}
response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id'))
response_headers['x-ms-correlation-request-id']=self._deserialize('str', response.headers.get('x-ms-correlation-request-id'))
deserialized = self._deserialize('OperationStatus', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.StorageSync/locations/{locationName}/workflows/{workflowId}/operations/{operationId}'} # type: ignore
|
mit
| 5,413,257,254,874,317,000
| 48.072072
| 217
| 0.665504
| false
| 4.4141
| true
| false
| false
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.