repo_name stringlengths 5 100 | path stringlengths 4 231 | language stringclasses 1 value | license stringclasses 15 values | size int64 6 947k | score float64 0 0.34 | prefix stringlengths 0 8.16k | middle stringlengths 3 512 | suffix stringlengths 0 8.17k |
|---|---|---|---|---|---|---|---|---|
obi-two/Rebelion | data/scripts/templates/object/building/poi/shared_anywhere_rebel_camp_small_2.py | Python | mit | 458 | 0.045852 | #### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Building()
result.template = "object/building/poi/sha | red_anywhere_rebel_camp_small_2.iff"
result.attribute_template_id = -1
result.stfName("poi_n","base_poi_building")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return res | ult |
wndias/bc.repository | script.module.urlresolver/lib/urlresolver/plugins/usersfiles.py | Python | gpl-2.0 | 2,201 | 0.003635 | # -*- coding: UTF-8 -*-
"""
Copyright (C) 2015 tknorris
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public Li | cense for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import re
from lib import jsunpack
f | rom urlresolver import common
from urlresolver.resolver import UrlResolver, ResolverError
class UsersFilesResolver(UrlResolver):
name = "UsersFiles"
domains = ["usersfiles.com"]
pattern = '(?://|\.)(usersfiles\.com)/(?:embed-)?([0-9a-zA-Z/]+)'
def __init__(self):
self.net = common.Net()
self.net.set_user_agent(common.IE_USER_AGENT)
self.headers = {'User-Agent': common.IE_USER_AGENT}
def get_media_url(self, host, media_id):
web_url = self.get_url(host, media_id)
html = self.net.http_GET(web_url).content
match = re.search('<script[^>]*>(eval.*?)</script>', html, re.DOTALL)
if match:
js_data = jsunpack.unpack(match.group(1))
stream_url = re.findall('<param\s+name="src"\s*value="([^"]+)', js_data)
stream_url += re.findall('file\s*:\s*[\'|\"](.+?)[\'|\"]', js_data)
stream_url = [i for i in stream_url if not i.endswith('.srt')]
if stream_url:
return stream_url[0]
raise ResolverError('Unable to find userfiles video')
def get_url(self, host, media_id):
return 'http://usersfiles.com/%s' % media_id
def get_host_and_id(self, url):
r = re.search(self.pattern, url)
if r:
return r.groups()
else:
return False
def valid_url(self, url, host):
return re.search(self.pattern, url) or self.name in host
|
yamaguchiyuto/icwsm15 | indegree_distribution.py | Python | mit | 510 | 0.009804 | import sys
filepath = sys | .argv[1]
delim = '\t'
if len(sys.argv) > 2:
delim = sys.argv[2]
indegrees = {}
for line in open(filepath):
entry = line.rstrip().split(delim)
src = entry[0]
dst = entry[1]
tag = entry[2]
if not dst in | indegrees: indegrees[dst] = 0
indegrees[dst] += 1
distribution = {}
for nid in indegrees:
d = indegrees[nid]
if not d in distribution: distribution[d] = 0
distribution[d] += 1
for d, count in sorted(distribution.items()):
print d,count
|
setrar/INF1069 | C.PYTHON/mysql.py | Python | mit | 475 | 0.008421 | #!/usr/bin/python
# Script pour telecharger City
import MySQLdb
file_ = open('city.csv', 'w')
file_.write ('city_id,city,country_id\n')
db = MySQLdb.connect( user='etudiants',
| passwd='etudiants_1',
host='192.168.99.100',
db='sakila')
cur = db.cursor()
cur.execute("SELECT * FROM city")
|
for row in cur.fetchall():
file_.write(str(row[0])+','+ row[1]+','+ str(row[2])+'\n')
db.close()
file_.close()
|
damaggu/SAMRI | samri/examples/2017-09_aic-pom.py | Python | gpl-3.0 | 774 | 0.040052 | from os import path
import matplotlib.pyplot as plt
from samri.plotting.aggregate import registration_qc
from samri.typesetting import inline_anova
this_path = path.dirname(path.realpath(__file__))
data_dir = path.join(this_path,"../../example_data/")
df_path = path.join(data_dir,"f_reg_quality.csv")
plt.style.use(path.join(this_path,"dark.conf"))
registration_qc(df_path,
value | ={"similarity":"Similarity"},
group={"subject":"Subject"},
repeat={"session":"Session"},
extra={"acquisition":"Type"},
save_as="2017-09_aic-pom.pdf",
show=False,
samri_style=False,
extra_cmap=["#FFFFFF","#000000"],
cmap=["#EE1111","#11DD11","#1111FF","#CCCC22","#AA11AA"],
values_rename={
"sub":"Subject",
"EPI_CBV_chr_longSOA":"longer",
"EPI_CBV_jb_long":"shorter",
| },
)
|
XiangYz/webscraper | test10.py | Python | lgpl-2.1 | 368 | 0.019022 | import requests
params = {'username':'Ryan', 'password':'password'}
r = requests.post("http://pythonscraping.com/pages/cookies/welcome.php", params)
print("Cookie is set to:")
print(r.cookies.get_dict())
print("-------------")
print("Going to profile page...")
r = requests.get("http://pythonscraping.c | om/pages/cookies/p | rofile.php", cookies = r.cookies)
print(r.text) |
Ralev93/Clan-site | clan_site/battles/forms.py | Python | gpl-3.0 | 1,375 | 0.022545 | from django import forms
from django.contrib.admin.widgets import AdminDateWidget
from django.forms import ModelForm
from basic.models import *
from battles.models import *
from itertools import chain
def getMemberAsOptions():
members = Member.objects.all()
return [(member.id, member.get_username()) for member in members]
def getTerritoriesAsOptions():
territories = Territory.objects.all()
return [(terittory.id, terittory.get_name()) for terittory in territories]
# todo: modelForm!!!
class Battle(forms.Form):
status = forms.ChoiceField(Battle.STATUS_CHOICES)
date = forms.DateField(initial=datetime.date.today, widget=forms.SelectDateWidget)
class TerritoryBattleForm(Battle):
assigned_users = forms.MultipleChoiceField(choices=getMemberAsOptions(), widget=forms.CheckboxSelectMultiple())
planet=forms.ChoiceField(Territory.PLANET_CHOICES)
territory=forms.ChoiceField(choices=getTerr | itoriesAsOptions())
class ClanWarForm(Battle):
clan_war_type | = forms.ChoiceField(ClanWar.CLAN_WAR_TYPES)
result = forms.ChoiceField(ClanWar.RESULT_CHOICES)
enemy_clans=forms.CharField(max_length=50)
class HypothesisForm(forms.Form):
def __init__(self, *args, **kwargs):
super(HypothesisForm, self).__init__(*args, **kwargs)
for i in Hypothesis.OPPONENTS:
self.fields['opponent_' + str(i)] = forms.ChoiceField(Hypothesis.STAR_CHOICES) |
anhstudios/swganh | data/scripts/templates/object/building/poi/shared_corellia_solitude_medium3.py | Python | mit | 456 | 0.046053 | #### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Building()
result.template = "object/building/poi/sha | red_corellia_solitude_medium3.iff"
result.attribute_template_id = -1
result.stfName("poi_n","base_poi_building")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS | ####
return result |
pashinin-com/pashinin.com | src/core/tests/__init__.py | Python | gpl-3.0 | 1,556 | 0 | import requests
import gzip
from io import BytesIO
def validate_html(html, content_type):
# 'node_modules/vnu-jar/build/dist/vnu.jar'
vnu_url = 'http://vnu:8888'
with BytesIO() as buf:
with gzip.GzipFile(fileobj=buf, mode='wb') as gzipper:
gzipper.write(html)
gzippeddata = buf.getvalue()
r = requests.post(
vnu_url,
params={
'out': 'gnu',
'level': 'error',
},
headers={
'Content-Type': content_type,
'Accept-Encoding': 'gzip',
| 'Content-Encoding': 'gzip',
'Content-Length': str(len(gzippeddata)),
},
data=gzippeddata
)
return r.text.strip()
def validate_process(res, html):
# t = validate_html(html, ct)
if res == '':
return
# print(t)
print('- - - - -')
res = ''
errors = res.split('\n')
for e in errors:
error_string = e.strip(':')
rng, cls = error_string.split(':')[0:2]
| msg = error_string[len(rng+cls)+2:].strip()
# for skip in skips:
# if skip in msg:
# continue
print(rng, msg)
linePos1, linePos2 = rng.split('-')
line1, pos1 = map(int, linePos1.split('.'))
line2, pos2 = map(int, linePos2.split('.'))
# for line in itertools.islice(r.content.split('\n'), line1, line2):
res += error_string
for line in html.splitlines()[max(0, line1-1):line2]:
# res += line + '\n'
print(line)
print('')
|
joelfiddes/toposubv2 | topoMAPP/getERA/era_prep.py | Python | gpl-3.0 | 1,384 | 0.013006 | #!/usr/bin/env python
""" This module preprocesses ERA-Interim data, units, accumulated to instantaneous values and timestep interpolation for 6 h to 3 h va | lues.
Example:
as import:
from getERA import era_prep as prep
prep.main(wd, config['main']['startDate'], config['main']['endDate'])
Attributes:
wd = "/home/joel/sim/topomap_test/"
plotshp = TRUE
Todo:
"""
path2script = "./rsrc/toposcale_pre2.R"
# main
def main(wd, startDate, endDate):
"""Main entry point for the script."""
run_rscript_fileout(path2script,[wd | , startDate, endDate])
# functions
def run_rscript_stdout(path2script , args):
""" Function to define comands to run an Rscript. Returns an object. """
import subprocess
command = 'Rscript'
cmd = [command, path2script] + args
print("Running:" + str(cmd))
x = subprocess.check_output(cmd, universal_newlines=True)
return(x)
def run_rscript_fileout(path2script , args):
""" Function to define comands to run an Rscript. Outputs a file. """
import subprocess
command = 'Rscript'
cmd = [command, path2script] + args
print("Running:" + str(cmd))
subprocess.check_output(cmd)
# calling main
if __name__ == '__main__':
import sys
wd = sys.argv[1]
startDate = sys.argv[2]
endDate = sys.argv[3]
main(wd, startDate, endDate)
|
antoinecarme/pyaf | tests/model_control/detailed/transf_RelativeDifference/model_control_one_enabled_RelativeDifference_MovingMedian_BestCycle_AR.py | Python | bsd-3-clause | 163 | 0.04908 | import tests.model_control.test_ozone_custom_models_enabled as testmod
testmod.build_model( ['RelativeDiff | erence'] , ['MovingMedian'] , ['BestCycle'] , ['AR'] | ); |
Rambatino/CHAID | CHAID/__init__.py | Python | apache-2.0 | 239 | 0 | from .split import Split
from .tree import Tree
from .node import Node
from .column import NominalColumn, OrdinalColumn, ContinuousColumn
from .stats import Stats
from .invali | d_split | _reason import InvalidSplitReason
__version__ = "5.3.0"
|
icatproject/python-icat | tests/test_06_ingest.py | Python | apache-2.0 | 11,631 | 0.006276 | """Test icatdump and icatingest.
"""
from subprocess import CalledProcessError
import pytest
import icat
import icat.config
from icat.query import Query
from conftest import DummyDatafile, gettestdata, getConfig, callscript
# Test input
ds_params = str(gettestdata("ingest-ds-params.xml"))
datafiles = str(gettestdata("ingest-datafiles.xml"))
@pytest.fixture(scope="module")
def client(setupicat):
client, conf = getConfig(confSection="acord", ids="mandatory")
client.login(conf.auth, conf.credentials)
return client
@pytest.fixture(scope="module")
def cmdargs(setupicat):
_, conf = getConfig(confSection="acord", ids="mandatory")
return conf.cmdargs + ["-f", "XML"]
@pytest.fixture(scope="function")
def dataset(client):
"""A dataset to be used in the test.
The dataset is not created by the fixture, it is assumed that the
test does it. The dataset will be eventually be deleted after the
test.
"""
inv = client.assertedSearch("Investigation [name='10100601-ST']")[0]
dstype = client.assertedSearch("DatasetType [name='raw']")[0]
dataset = client.new("dataset",
name="e208343", complete=False,
investigation=inv, type=dstype)
yield dataset
try:
ds = client.searchMatching(dataset)
dataset.id = ds.id
except icat.SearchResultError:
# Dataset not found, maybe the test failed, nothing to
# clean up then.
pass
else:
# If any datafile has been uploaded (i.e. the location is
# not NULL), need to delete it from IDS first. Any other
# datafile or dataset parameter will be deleted
# automatically with the dataset by cascading in the ICAT
# server.
query = Query(client, "Datafile",
conditions={"dataset.id": "= %d" % dataset.id,
"location": "IS NOT NULL"})
client.deleteData(client.search(query))
client.delete(dataset)
# Test datafiles to be created by test_ingest_datafiles:
testdatafiles = [
{
'dfname': "e208343.dat",
'size': 394,
'mtime': 1286600400,
},
{
'dfname': "e208343.nxs",
'size': 52857,
'mtime': 1286600400,
},
]
def verify_dataset_params(client, dataset, params):
query = Query(client, "DatasetParameter",
conditions={"dataset.id": "= %d" % dataset.id},
includes={"type"})
ps = client.search(query)
assert len(ps) == len(params)
values = { (p.type.name, p.numericValue, p.type.units) for p in ps }
assert values == params
def test_ingest_dataset_params(client, dataset, cmdargs):
"""Ingest a file setting some dataset parameters.
"""
dataset.create()
args = cmdargs + ["-i", ds_params]
callscript("icatingest.py", args)
verify_dataset_params(client, dataset, {
("Magnetic field", 5.3, "T"),
("Reactor power", 10.0, "MW"),
("Sample temperature", 293.15, "K")
})
def test_ingest_duplicate_throw(client, dataset, cmdargs):
"""Ingest with a collision of a duplicate object.
Same test as above, but now place a duplicate object in the way.
"""
dataset.create()
ptype = client.assertedSearch("ParameterType [name='Reactor power']")[0]
p = client.new("datasetParameter", numericValue=5.0,
dataset=dataset, type=ptype)
p.create()
args = cmdargs + ["-i", ds_params]
# FIXME: should inspect stderr and verify ICATObjectExistsError.
with pytest.raises(CalledProcessError) as err:
callscript("icatingest.py", args)
# Verify that the params have been set. The exceptions should
# have been raised while trying to ingest the second parameter.
# The first one (Magnetic field) should have been created and
# Reactor power should still have the value set above.
verify_data | set_params(client, dataset, {
("Magnetic field", 5.3, "T"),
("Reactor power", 5.0, "MW")
})
def test_ingest_duplicate_ignore(client, dataset, | cmdargs):
"""Ingest with a collision of a duplicate object.
Same test as above, but now ignore the duplicate.
"""
dataset.create()
ptype = client.assertedSearch("ParameterType [name='Reactor power']")[0]
p = client.new("datasetParameter", numericValue=5.0,
dataset=dataset, type=ptype)
p.create()
args = cmdargs + ["-i", ds_params, "--duplicate", "IGNORE"]
callscript("icatingest.py", args)
verify_dataset_params(client, dataset, {
("Magnetic field", 5.3, "T"),
("Reactor power", 5.0, "MW"),
("Sample temperature", 293.15, "K")
})
def test_ingest_duplicate_check_err(client, dataset, cmdargs):
"""Ingest with a collision of a duplicate object.
Same test as above, but use CHECK which fails due to mismatch.
"""
dataset.create()
ptype = client.assertedSearch("ParameterType [name='Reactor power']")[0]
p = client.new("datasetParameter", numericValue=5.0,
dataset=dataset, type=ptype)
p.create()
args = cmdargs + ["-i", ds_params, "--duplicate", "CHECK"]
# FIXME: should inspect stderr and verify ICATObjectExistsError.
with pytest.raises(CalledProcessError) as err:
callscript("icatingest.py", args)
verify_dataset_params(client, dataset, {
("Magnetic field", 5.3, "T"),
("Reactor power", 5.0, "MW")
})
def test_ingest_duplicate_check_ok(client, dataset, cmdargs):
"""Ingest with a collision of a duplicate object.
Same test as above, but now it matches, so CHECK should return ok.
"""
dataset.create()
ptype = client.assertedSearch("ParameterType [name='Reactor power']")[0]
p = client.new("datasetParameter", numericValue=10.0,
dataset=dataset, type=ptype)
p.create()
args = cmdargs + ["-i", ds_params, "--duplicate", "CHECK"]
callscript("icatingest.py", args)
verify_dataset_params(client, dataset, {
("Magnetic field", 5.3, "T"),
("Reactor power", 10.0, "MW"),
("Sample temperature", 293.15, "K")
})
def test_ingest_duplicate_overwrite(client, dataset, cmdargs):
"""Ingest with a collision of a duplicate object.
Same test as above, but now overwrite the old value.
"""
dataset.create()
ptype = client.assertedSearch("ParameterType [name='Reactor power']")[0]
p = client.new("datasetParameter", numericValue=5.0,
dataset=dataset, type=ptype)
p.create()
args = cmdargs + ["-i", ds_params, "--duplicate", "OVERWRITE"]
callscript("icatingest.py", args)
verify_dataset_params(client, dataset, {
("Magnetic field", 5.3, "T"),
("Reactor power", 10.0, "MW"),
("Sample temperature", 293.15, "K")
})
# Minimal example, a Datafile featuring a string.
ingest_data_string = """<?xml version="1.0" encoding="utf-8"?>
<icatdata>
<data>
<datasetRef id="Dataset_001"
name="e208343"
investigation.name="10100601-ST"
investigation.visitId="1.1-N"/>
<datafile>
<name>dup_test_str.dat</name>
<dataset ref="Dataset_001"/>
</datafile>
</data>
</icatdata>
"""
# A Datafile featuring an int.
ingest_data_int = """<?xml version="1.0" encoding="utf-8"?>
<icatdata>
<data>
<datasetRef id="Dataset_001"
name="e208343"
investigation.name="10100601-ST"
investigation.visitId="1.1-N"/>
<datafile>
<fileSize>42</fileSize>
<name>dup_test_int.dat</name>
<dataset ref="Dataset_001"/>
</datafile>
</data>
</icatdata>
"""
# A Dataset featuring a boolean.
ingest_data_boolean = """<?xml version="1.0" encoding="utf-8"?>
<icatdata>
<data>
<dataset id="Dataset_001">
<complete>false</complete>
<name>e208343</name>
<investigation name="10100601-ST" visitId="1.1-N"/>
<type name="raw"/>
</dataset>
</data>
</icatdata>
"""
# A DatasetParameter featuring a float.
ingest_data_float = """<?xml version="1.0" encoding="utf-8"?>
<icatdata>
<data>
<datasetRef id="Dataset_001"
name="e208343"
investigation.name= |
rajalokan/nova | nova/tests/unit/test_identity.py | Python | apache-2.0 | 3,137 | 0 | # Copyright 2017 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file ex | cept in compliance with the License. You may obtain
# a copy of the Li | cense at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from keystoneauth1 import exceptions as kse
import webob
from nova.api.openstack import identity
from nova import test
class FakeResponse(object):
"""A basic response constainer that simulates requests.Response.
One of the critical things is that a success error code makes the
object return true.
"""
def __init__(self, status_code, content=""):
self.status_code = status_code
self.content = content
def __bool__(self):
# python 3
return self.__nonzero__()
def __nonzero__(self):
# python 2
return self.status_code < 400
@property
def text(self):
return self.content
class IdentityValidationTest(test.NoDBTestCase):
@mock.patch('keystoneauth1.session.Session.get')
def test_good_id(self, get):
get.return_value = FakeResponse(200)
self.assertTrue(identity.verify_project_id(mock.MagicMock(), "foo"))
get.assert_called_once_with(
'/v3/projects/foo',
endpoint_filter={'service_type': 'identity'},
raise_exc=False)
@mock.patch('keystoneauth1.session.Session.get')
def test_no_project(self, get):
get.return_value = FakeResponse(404)
self.assertRaises(webob.exc.HTTPBadRequest,
identity.verify_project_id,
mock.MagicMock(), "foo")
get.assert_called_once_with(
'/v3/projects/foo',
endpoint_filter={'service_type': 'identity'},
raise_exc=False)
@mock.patch('keystoneauth1.session.Session.get')
def test_unknown_id(self, get):
get.return_value = FakeResponse(403)
self.assertTrue(identity.verify_project_id(mock.MagicMock(), "foo"))
get.assert_called_once_with(
'/v3/projects/foo',
endpoint_filter={'service_type': 'identity'},
raise_exc=False)
@mock.patch('keystoneauth1.session.Session.get')
def test_unknown_error(self, get):
get.return_value = FakeResponse(500, "Oh noes!")
self.assertTrue(identity.verify_project_id(mock.MagicMock(), "foo"))
get.assert_called_once_with(
'/v3/projects/foo',
endpoint_filter={'service_type': 'identity'},
raise_exc=False)
@mock.patch('keystoneauth1.session.Session.get')
def test_early_fail(self, get):
get.side_effect = kse.EndpointNotFound()
self.assertTrue(identity.verify_project_id(mock.MagicMock(), "foo"))
|
dennybaa/st2 | st2api/st2api/signal_handlers.py | Python | apache-2.0 | 941 | 0 | # Licensed | to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in complianc | e with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import signal
__all__ = [
'register_api_signal_handlers'
]
def register_api_signal_handlers(handler_func):
signal.signal(signal.SIGINT, handler_func)
|
mou4e/zirconium | tools/perf/page_sets/key_hit_test_cases.py | Python | bsd-3-clause | 1,778 | 0.004499 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry.page import page as page_module
from telemetry.page import page_set as page_set_module
class KeyHitTestCasesPage(page_module.Page):
def __init__(self, url, page_set):
super(KeyHitTestCasesPage, self).__init__(
url=url, page_set=page_set, credentials_path = 'data/credentials.json')
self.user_agent_ | type = 'mobile'
def RunNavigateSteps(self, action_runner):
super(KeyHitTestCasesPage, self).RunNavigateSteps(action_runner)
action_runner.Wait(2)
def RunPageInteractions(self, action_runner):
action_runner.Wait(2)
for _ in xrange(100):
self.TapButton(action_runner)
class PaperCalculatorHitTest(KeyHitTestCasesPage):
def __init__(self, page_set):
super(PaperCalculatorHitTest, self).__init__(
# Generated from https:// | github.com/zqureshi/paper-calculator
# vulcanize --inline --strip paper-calculator/demo.html
url='file://key_hit_test_cases/paper-calculator-no-rendering.html',
page_set=page_set)
def TapButton(self, action_runner):
interaction = action_runner.BeginInteraction(
'Action_TapAction')
action_runner.TapElement(element_function='''
document.querySelector(
'body /deep/ #outerPanels'
).querySelector(
'#standard'
).shadowRoot.querySelector(
'paper-calculator-key[label="5"]'
)''')
interaction.End()
class KeyHitTestCasesPageSet(page_set_module.PageSet):
def __init__(self):
super(KeyHitTestCasesPageSet, self).__init__(
user_agent_type='mobile')
self.AddUserStory(PaperCalculatorHitTest(self))
|
AKSW/LODStats | test/test_lodstats.py | Python | gpl-3.0 | 1,647 | 0.001821 | import unittest
from requests.exceptions import HTTPError
import lodstats
from lodstats import RDFStats
from requests import HTTPError
from . import helpers
http_base = he | lpers.webserver(helpers.resources_path)
testfile_path = helpers.resources_path
class LodstatsTest(unittest.TestCase):
d | ef setUp(self):
lodstats.stats.stats_to_do = []
lodstats.stats.results = {}
def test_remote_bz2(self):
uri = http_base + 'heb.rdf.bz2'
rdfstats = RDFStats(uri)
#rdfstats.set_callback_function_download(test_callback_download)
#rdfstats.set_callback_function_extraction(test_callback_extraction)
rdfstats.start_statistics()
assert(len(rdfstats.get_stats_results()) > 5)
def test_local_rdf(self):
uri = 'file://' + testfile_path + 'heb-original.rdf'
rdfstats = RDFStats(uri)
rdfstats.start_statistics()
assert(len(rdfstats.voidify("turtle")) > 5)
def test_remote_tar(self):
uri = http_base + 'heb.nt.tgz'
rdfstats = RDFStats(uri)
rdfstats.start_statistics()
assert(len(rdfstats.get_stats_results()) > 5)
def test_404_remote_tar_gz(self):
import tarfile
uri = http_base + 'DOESNOTEXIST.nt.tgz'
with self.assertRaises(HTTPError):
rdfstats = RDFStats(uri)
rdfstats.start_statistics()
def test_remote_not_usual_extension(self):
uri = http_base + "heb.nt.tgz?accessType=DOWNLOAD"
rdfstats = RDFStats(uri, format="nt")
rdfstats.start_statistics()
assert(len(rdfstats.voidify("turtle")) > 5)
# FIXME add test for sitemaps
|
tgalal/inception | inception/argparsers/makers/submakers/submaker_supersu.py | Python | gpl-3.0 | 4,484 | 0.006467 | from .submaker import Submaker
import zipfile
import os
import shutil
import logging
logger = logging.getLogger(__name__ )
class SuperSuSubmaker(Submaker):
def make(self, workDir):
supersuZipProp = self.getTargetConfigProperty("root.methods.supersu.path")
assert supersuZipProp.getValue(), "Must set %s to the supersu zip file" % supersuZipProp.getKey()
includeApk = self.getTargetConfigValue("root.methods.supersu.include_apk", True)
includeArchs = set(self.getTargetConfigValue("root.methods.supersu.include_archs", []))
superSuTargetRelativePath = "supersu"
supersuTargetPath = os.path.join(workDir, superSuTargetRelativePath)
postinstFilePath = os.path.join(supersuTargetPath, "supersu_installer_includer")
supersuOriginalUpdatescriptPath = os.path.join(supersuTargetPath, "supersu_installer.sh")
newSuperSuZipPath = os.path.join(supersuTargetPath, "supersu.zip")
superSuZipTmpExtract = "/tmp/supersu.zip"
superSuUpdatescriptTmpExtract = "/tmp/supersu_installer.sh"
superuserApkPath = os.path.join("common", "Superuser.apk")
with self.newtmpWorkDir() as tmpDir:
with zipfile.ZipFile(supersuZipProp.resolveAsRelativePath(), "r") as z:
z.extractall(tmpDir)
os.mkdir(os.path.join(workDir, "supersu"))
archs = set(
[f for f in os.listdir(tmpDir) if not f in ("common", "META-INF")]
)
unsupportedArchs = includeArchs.difference(archs)
if len(unsupportedArchs):
unsupportedArchs = list(unsupportedArchs)
raise ValueError("Can't find archs: [%s] in supersu" % (", ".join(unsupportedArchs)))
targetArchs = includeArchs if len(includ | eArchs) else archs
newSuperSuZip = zipfile.ZipFile(newSuperSuZipPath, "w")
| for arch in targetArchs:
self.__addDirToZip(newSuperSuZip, os.path.join(tmpDir, arch), arch)
if not includeApk:
os.remove(os.path.join(tmpDir, superuserApkPath))
self.__addDirToZip(newSuperSuZip, os.path.join(tmpDir, "common"), "common")
if self.getMaker().getConfig().isMakeable("update.busybox"):
#process file, with busybox onboard in assumption
with open(os.path.join(tmpDir, "META-INF/com/google/android/update-binary"), "r") as f:
with open(supersuOriginalUpdatescriptPath, "w") as targetF:
for l in f.readlines():
if l.startswith("#!"):
targetF.write("#!" + self.getTargetConfigValue("root.methods.supersu.sh", "/system/bin/sh") + "\n")
else:
targetF.write(l)
else:
shutil.copy(os.path.join(tmpDir, "META-INF/com/google/android/update-binary"), supersuOriginalUpdatescriptPath)
postInstscript = "ui_print(\"Installing SuperSU..\");\n"
postInstscript += "run_program(\"%s\", \"1\", \"stdout\", \"%s\");" % (superSuUpdatescriptTmpExtract, superSuZipTmpExtract)
with open(postinstFilePath, "w") as postinstFile:
postinstFile.write(postInstscript)
superSuConfig = supersuZipProp.getConfig()
currPostInst = superSuConfig.get("script.post", [], directOnly=True)
currPostInst.append(postinstFilePath)
superSuConfig.set("update.script.post", currPostInst)
self.setValue("update.files.add." + newSuperSuZipPath.replace(workDir, "").replace(".", "\.") , {
"destination": superSuZipTmpExtract
})
self.setValue("update.files.add." + supersuOriginalUpdatescriptPath.replace(workDir, "").replace(".", "\."), {
"destination": superSuUpdatescriptTmpExtract,
"mode": "0755",
"uid": "0",
"gid": "0"
})
def __addDirToZip(self, zipFile, dirPath, zipRoot):
zipFile.write(dirPath, zipRoot)
for f in os.listdir(dirPath):
src = os.path.join(dirPath, f)
dest = os.path.join(zipRoot, f)
if os.path.isdir(src):
self.__addDirToZip(zipFile, src, dest)
else:
zipFile.write(src, dest)
|
zacharyvoase/relations | test/test_joins.py | Python | unlicense | 867 | 0 | import relations
employees = relations.Relation('name', 'emp_id', 'dept_name')
employees.add(name='Harry', emp_id=3415, dept_name='Finance')
employees.add(name='Sally', emp_id=2241, dept_name='Sales')
employees.add(name='George', emp_id=3401, dept_name='Finance')
employees.add(name='Harriet', emp_id=2202, dept_name='Sales')
departments = relations.Relation('dept_name', 'manager')
departments.add(dept_name='Finance', manager='George')
| departments.add(dept_name='Sales', manager='Harriet')
departments.add(dept_name='Production', manager='Charles')
def test_natural_join():
joined = employees.natural_join(departments)
assert len(joined) == 4
def test_natural_join_on_disjoint_relations_is_cartesian_product():
joined = employees.project('name', 'emp_id').natur | al_join(departments)
assert len(joined) == (len(employees) * len(departments))
|
jorisvanzundert/sfsf | sfsf/sfsf_config.py | Python | mit | 717 | 0.033473 | import os
PRODUCTION = 'merged-corpus'
DEVELOPMENT = 'test'
ENVIRONMENT = PRODUCTION
EPUB = 'epub'
EPUB_DIRNAME = EPUB
TXT = 'txt'
TXT_DIRNAME = TXT
def set_env( env=PRODUCTION ):
global ENVIRONMENT
| ENVIRONMENT = env
def get_data_dir():
path_to_here = os.path.dirname( os.path.abspath( __file__ ) )
if ENVIRONMENT == DEVELOPMENT:
data_dir = os.path.join( path_to_here, '../data/{t}'.format( t=DEVELOPMENT ) )
else:
data_dir = os.path.join( path_to_here, '../../docker_volume/{p}'.format( p=PRODUCTION ) )
return data_dir
def get_epub_dir():
return os.path.join( get_data_dir(), EPUB_DIRNAME )
def get_txt_dir():
return os.path.join( get_ | data_dir(), TXT_DIRNAME )
|
Chandra-MARX/marx-test | tests/source.py | Python | gpl-2.0 | 10,336 | 0.002032 | '''
|marx| offers several different source shapes. Tests in this module exercise
those sources (except ``SAOSAC``, which is heavily used in
:ref:`sect-tests.PSF` already).
'''
import shutil
import subprocess
import os
from collections import OrderedDict
from marxtest import base
title = 'Sources in |marx|'
tests = ['GeometricSources', 'ImageSource',
#'RayfileSource',
'UserSource']
class GeometricSources(base.MarxTest):
'''This test exercises build-in |marx| sources with different geometric
shapes.
Most source types have parameters, and not all parameters are tested here.
See :ref:`sect-sourcemodels` for a detailed description of source
parameters.
'''
title = 'Build-in | geometric sources'
figures = OrderedDict([('ds9', {'alternative': 'Six PSFs.',
| 'caption': '`ds9`_ image of the simulated PSFs in alphabetical order (beta distribution, disk, disk with hole, Gauss, line, and point).'})
])
@base.Marx
def step_10(self):
return [{'OutputDir': 'point'},
{'SourceType': 'GAUSS', 'S-GaussSigma': 20,
'OutputDir': 'gauss'},
{'SourceType': 'BETA', 'S-BetaCoreRadius': 10,
'S-BetaBeta': 0.6, 'OutputDir': 'beta'},
{'SourceType': 'DISK',
'S-DiskTheta0': 0, 'S-DiskTheta1': 20,
'OutputDir': 'disk'},
{'SourceType': 'DISK',
'S-DiskTheta0': 10, 'S-DiskTheta1': 20,
'OutputDir': 'diskhole'},
{'SourceType': 'LINE', 'S-LinePhi': 45, 'S-LineTheta': 30,
'OutputDir': 'line'},
]
# more to come for SAOSAC, RAYFILE, SIMPUT, USER
# but first make something work here
@base.Marx2fits
def step_20(self):
dirs = ['point', 'gauss', 'beta', 'disk', 'diskhole',
'line']
return ['--pixadj=EDSER'] * len(dirs), dirs, [d + '.fits' for d in dirs]
@base.Ciao
def step_30(self):
'''ds9 images of the PSF'''
return ['''ds9 -width 800 -height 500 -log -cmap heat *.fits -pan to 4018 4141 physical -match frame wcs -saveimage {0} -exit'''.format(self.figpath(list(self.figures.keys())[0]))]
class ImageSource(base.MarxTest):
'''An image can be used as |marx| input. In this case, the intensity of the
X-ray radiation on that sky is taken to be proportional to the value of the
image at that point.
'''
title = 'Image as source'
figures = OrderedDict([('ds9', {'alternative': 'The simulated events generally follow the input image, but with significant noise because of the short observation time.',
'caption': '`ds9`_ shows the input image (left) and the simulated event list (right).'})
])
@base.Python
def step_0(self):
'''Make input image
In this example we use python to make a simple image as input.
We setup a 3-d box and fill it with an emitting shell. We then
integrate along one dimension to obtain a collapsed image.
Physically, this represents the thin shell of a supernova
explosion.
'''
import numpy as np
from astropy.wcs import WCS
from astropy.io import fits
# Actually to make this run faster, we'll do only one quadrant here
cube = np.zeros((201, 201, 201))
mg = np.mgrid[0: 201., 0:201, 0:201 ]
d = np.sqrt(mg[0, :, :, :]**2 + mg[1, :, :, :]**2 + mg[2, :, :, :]**2)
cube[(d > 160.) & (d < 170)] = 1
im = cube.sum(axis=0)
# Now rotate and put the four quarters together
image = np.zeros((401, 401))
image[:201, :201] = np.fliplr(np.flipud(im))
image[:201, 200:] = np.flipud(im)
image[200:, :201] = np.fliplr(im)
image[200:, 200:] = im
# Create a new WCS object.
w = WCS(naxis=2)
w.wcs.crpix = [100., 100.]
# Pixel size of our image shall be 1 arcsec
w.wcs.cdelt = [1. / 3600., 1. / 3600.]
w.wcs.ctype = ["RA---TAN", "DEC--TAN"]
# Now, write out the WCS object as a FITS header
header = w.to_header()
# header is an astropy.io.fits.Header object. We can use it to create a new
# PrimaryHDU and write it to a file.
hdu = fits.PrimaryHDU(header=header, data=image)
# Save to FITS file
hdu.writeto(os.path.join(self.basepath, 'input_image.fits'), clobber=True)
@base.Marx
def step_1(self):
'''Run |marx|.
We run a monoenergetic simulation here for the Si XIII line at 6.65 Ang.
'''
return {'SourceType': "IMAGE", 'S-ImageFile': 'input_image.fits',
'MinEnergy': 1.9, 'MaxEnergy': 1.9, 'GratingType': 'NONE',
'OutputDir': 'image'}
@base.Marx2fits
def step_2(self):
return '--pixadj=EDSER', 'image', 'image.fits'
@base.Ciao
def step_30(self):
'''ds9 images of the PSF'''
return ['''ds9 -width 800 -height 500 -log -cmap heat input_image.fits image.fits -pan to 4018 4141 physical -zoom 0.5 -sleep 1 -saveimage {0} -exit'''.format(self.figpath(list(self.figures.keys())[0]))]
class RayfileSource(base.MarxTest):
'''|marx| is a Monte-Carlo code, thus the exact distribution of photons
on the sky will be different every time the code is run. Sometimes it
can be useful to generate a list of photons with position, time and
energy from the source on the sky and then "observe" the exact same list
with different instrument configurations so that any differences in the
result are only due to the different configuration and not to random
fluctuations in the source.
In this example, we look at a relatively large, diffuse emission region
with a very soft spectrum (for simplicity we are using a flat spectrum).
We compare simulations using ACIS-S and ACIS-I. ACIS-S has a better
response to soft photons, but some parts of the source may not be in the
field-of-view; ACIS-I is less efficient for soft photons, but has a
larger field-of-view.
'''
title = 'Using a RAYFILE source'
figures = OrderedDict([('ds9', {'alternative': 'As described above, ACIS-S shows more photons, but ACIS-I does include more the wings of the Gaussian source distribution',
'caption': '`ds9`_ shows the ACIS-I (left) and ACIS-S image (right). Both sources are generated from the same photon list. Sometimes the same pattern of photons can be seen in both images, but with a few events missing on ACIS-I due to the lower soft response.'})
])
@base.Marx
def step_1(self):
'''Write ray file
'''
return {'SourceType': 'GAUSS', 'S-GaussSigma': 300,
'DumpToRayFile': 'yes', 'MinEnergy': 0.3, 'MaxEnergy': 0.5}
@base.Marx
def step_2(self):
'''ACIS-S'''
return {'SourceType': 'RAYFILE', 'RayFile': 'marx.output',
'OutputDir': 'aciss', 'DetectorType': 'ACIS-S'}
@base.Marx
def step_3(self):
'''ACIS-I'''
return {'SourceType': 'RAYFILE', 'RayFile': 'marx.output',
'OutputDir': 'acisi', 'DetectorType': 'ACIS-I'}
@base.Marx2fits
def step_4(self):
'''Turn into fits files
We use the ``EXACT`` setting here to make the comparison simpler.
The default EDSER (energy-dependent sub-pixel event repositioning)
shifts photons of the same energy by a different amount for ACIS-S and
ACIS-I, which would make it harder to compare the resulting images.
'''
return ['--pixadj=EXACT', '--pixadj=EXACT'], ['acisi', 'aciss'], ['i.fits', 's.fits']
@base.Ciao
def step_30(self):
'''ds9 images of the PSF'''
return ['''ds9 -width 800 -height 500 -log -cmap heat i.fits s.fits -pan to 4018 4141 physical -match frame wcs -saveimage {0} -exit'''.format(self.figpath(list(self.figures.keys())[0]))]
class SimputSource(base.MarxTest):
pass
class UserSource(base.MarxTes |
chemreac/chemreac | chemreac/chemistry.py | Python | bsd-2-clause | 1,640 | 0 | # -*- coding: utf-8 -*-
"""
chemreac.chemistry
==================
This module collects classes useful for describing substances,
reactions and reaction systems. The classes have methods to help
with consistent low-level conversion to numerical parameters of
the model. The classes are from the
`chempy <https | ://pypi.python. | org/pypi/chempy>`_ package.
"""
from __future__ import print_function, division, absolute_import
from collections import OrderedDict
from chempy import Substance, Reaction, ReactionSystem
def mk_sn_dict_from_names(names, **kwargs):
"""
Convenience function to generate a OrderedDict of Substance
instances from a sequence of names and corresponding sequences
of kwargs to Substance class.
Parameters
----------
names: sequence of strings
names of substances
**kwargs:
sequences of corresponding keyword arguments
Examples
--------
>>> d = mk_sn_dict_from_names(
... 'ABCD', D=[0.1, 0.2, 0.3, 0.4])
>>> d # doctest: +NORMALIZE_WHITESPACE
OrderedDict([('A', <Substance(name=A, ...>),
('B', <Substance(name=B, ...)>), ('C', <Substance(name=C, ...)>),
('D', <Substance(name=D, ...)>)])
>>> d['A'].name
'A'
"""
kwargs_list = []
for i in range(len(names)):
d = {}
data = {}
for k, v in kwargs.items():
if k in Substance.attrs:
d[k] = v[i]
else:
data[k] = v[i]
d['data'] = data
kwargs_list.append(d)
return OrderedDict([(s, Substance(s, **kwargs_list[i])) for i, s
in enumerate(names)])
|
bolkedebruin/airflow | airflow/jobs/scheduler_job.py | Python | apache-2.0 | 57,604 | 0.003368 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import itertools
import logging
import multiprocessing
import os
import signal
import sys
import time
import warnings
from collections import defaultdict
from datetime import timedelta
from typing import Collection, DefaultDict, Dict, Iterator, List, Optional, Tuple
from sqlalchemy import and_, func, not_, or_, text, tuple_
from sqlalchemy.exc import OperationalError
from sqlalchemy.orm import load_only, selectinload
from sqlalchemy.orm.session import Session, make_transient
from airflow import models, settings
from airflow.callbacks.callback_requests import DagCallbackRequest, SlaCallbackRequest, TaskCallbackRequest
from airflow.callbacks.pipe_callback_sink import PipeCallbackSink
from airflow.configuration import conf
from airflow.dag_processing.manager import DagFileProcessorAgent
from airflow.executo | rs.executor_loader import UNPICKLEABLE_EXECUTORS
from airflow.jobs.base_job import BaseJob
from airflow.jobs.local_task_job import LocalTaskJob
from airflow.models import DAG
from airflow.models.dag import DagModel
from airflow.models.dagbag import DagBag
from airflow.models.dagrun import DagRun
from airflow.models.serialized_dag import SerializedDagModel
from airflow.models.taskinstance import SimpleTaskInstance, TaskInstance, TaskInstanceKey
from airflow.sta | ts import Stats
from airflow.ti_deps.dependencies_states import EXECUTION_STATES
from airflow.utils import timezone
from airflow.utils.docs import get_docs_url
from airflow.utils.event_scheduler import EventScheduler
from airflow.utils.retries import MAX_DB_RETRIES, retry_db_transaction, run_with_db_retries
from airflow.utils.session import create_session, provide_session
from airflow.utils.sqlalchemy import is_lock_not_available_error, prohibit_commit, skip_locked, with_row_locks
from airflow.utils.state import DagRunState, State, TaskInstanceState
from airflow.utils.types import DagRunType
TI = models.TaskInstance
DR = models.DagRun
DM = models.DagModel
def _is_parent_process():
"""
Returns True if the current process is the parent process. False if the current process is a child
process started by multiprocessing.
"""
return multiprocessing.current_process().name == 'MainProcess'
class SchedulerJob(BaseJob):
"""
This SchedulerJob runs for a specific time interval and schedules the jobs
that are ready to run. It figures out the latest runs for each
task and sees if the dependencies for the next schedules are met.
If so, it creates appropriate TaskInstances and sends run commands to the
executor. It does this for each task in each DAG and repeats.
:param subdir: directory containing Python files with Airflow DAG
definitions, or a specific path to a file
:param num_runs: The number of times to run the scheduling loop. If you
have a large number of DAG files this could complete before each file
has been parsed. -1 for unlimited times.
:param num_times_parse_dags: The number of times to try to parse each DAG file.
-1 for unlimited times.
:param scheduler_idle_sleep_time: The number of seconds to wait between
polls of running processors
:param do_pickle: once a DAG object is obtained by executing the Python
file, whether to serialize the DAG object to the DB
:param log: override the default Logger
"""
__mapper_args__ = {'polymorphic_identity': 'SchedulerJob'}
heartrate: int = conf.getint('scheduler', 'SCHEDULER_HEARTBEAT_SEC')
def __init__(
self,
subdir: str = settings.DAGS_FOLDER,
num_runs: int = conf.getint('scheduler', 'num_runs'),
num_times_parse_dags: int = -1,
scheduler_idle_sleep_time: float = conf.getfloat('scheduler', 'scheduler_idle_sleep_time'),
do_pickle: bool = False,
log: Optional[logging.Logger] = None,
processor_poll_interval: Optional[float] = None,
*args,
**kwargs,
):
self.subdir = subdir
self.num_runs = num_runs
# In specific tests, we want to stop the parse loop after the _files_ have been parsed a certain
# number of times. This is only to support testing, and isn't something a user is likely to want to
# configure -- they'll want num_runs
self.num_times_parse_dags = num_times_parse_dags
if processor_poll_interval:
# TODO: Remove in Airflow 3.0
warnings.warn(
"The 'processor_poll_interval' parameter is deprecated. "
"Please use 'scheduler_idle_sleep_time'.",
DeprecationWarning,
stacklevel=2,
)
scheduler_idle_sleep_time = processor_poll_interval
self._scheduler_idle_sleep_time = scheduler_idle_sleep_time
# How many seconds do we wait for tasks to heartbeat before mark them as zombies.
self._zombie_threshold_secs = conf.getint('scheduler', 'scheduler_zombie_task_threshold')
self.do_pickle = do_pickle
super().__init__(*args, **kwargs)
if log:
self._log = log
# Check what SQL backend we use
sql_conn: str = conf.get('core', 'sql_alchemy_conn').lower()
self.using_sqlite = sql_conn.startswith('sqlite')
self.using_mysql = sql_conn.startswith('mysql')
self.processor_agent: Optional[DagFileProcessorAgent] = None
self.dagbag = DagBag(dag_folder=self.subdir, read_dags_from_db=True, load_op_links=False)
if conf.getboolean('smart_sensor', 'use_smart_sensor'):
compatible_sensors = set(
map(lambda l: l.strip(), conf.get('smart_sensor', 'sensors_enabled').split(','))
)
docs_url = get_docs_url('concepts/smart-sensors.html#migrating-to-deferrable-operators')
warnings.warn(
f'Smart sensors are deprecated, yet can be used for {compatible_sensors} sensors.'
f' Please use Deferrable Operators instead. See {docs_url} for more info.',
DeprecationWarning,
)
def register_signals(self) -> None:
"""Register signals that stop child processes"""
signal.signal(signal.SIGINT, self._exit_gracefully)
signal.signal(signal.SIGTERM, self._exit_gracefully)
signal.signal(signal.SIGUSR2, self._debug_dump)
def _exit_gracefully(self, signum, frame) -> None:
"""Helper method to clean up processor_agent to avoid leaving orphan processes."""
if not _is_parent_process():
# Only the parent process should perform the cleanup.
return
self.log.info("Exiting gracefully upon receiving signal %s", signum)
if self.processor_agent:
self.processor_agent.end()
sys.exit(os.EX_OK)
def _debug_dump(self, signum, frame):
if not _is_parent_process():
# Only the parent process should perform the debug dump.
return
try:
sig_name = signal.Signals(signum).name
except Exception:
sig_name = str(signum)
self.log.info("%s\n%s received, printing debug\n%s", "-" * 80, sig_name, "-" * 80)
self.executor.debug_dump()
self.log.info("-" * 80)
def is_alive(self, grace_multiplier: Optional[float] = None) -> bool:
"""
Is this SchedulerJob alive?
We define alive as in a state of runni |
tadgh/ArgoRevisit | third_party/nltk/parse/nonprojectivedependencyparser.py | Python | apache-2.0 | 27,018 | 0.00533 | # Natural Language Toolkit: Dependency Grammars
#
# Copyright (C) 2001-2011 NLTK Project
# Author: Jason Narad <jason.narad@gmail.com>
#
# URL: <http://www.nltk.org/>
# For license information, see LICENSE.TXT
#
import math
from nltk.grammar import parse_dependency_grammar
from dependencygraph import *
#################################################################
# DependencyScorerI - Interface for Graph-Edge Weight Calculation
#################################################################
class DependencyScorerI(object):
"""
A scorer for calculated the weights on the edges of a weighted
dependency graph. This is used by a
C{ProbabilisticNonprojectiveParser} to initialize the edge
weights of a C{DependencyGraph}. While typically this would be done
by training a binary classifier, any class that can return a
multidimensional list representation of the edge weights can
implement this interface. As such, it has no necessary
fields.
"""
def __init__(self):
if self.__class__ == DependencyScorerI:
raise TypeError('DependencyScorerI is an abstract interface')
def train(self, graphs):
"""
@type graphs: A list of C{DependencyGraph}
@param graphs: A list of dependency graphs to train the scorer.
Typically the edges present in the graphs can be used as
positive training examples, and the edges not present as negative
examples.
"""
raise AssertionError('DependencyScorerI is an abstract interface')
def score(self, graph):
"""
@type graph: A C{DependencyGraph}
@param graph: A dependency graph whose set of edges need to be
scored.
@rtype: A three-dimensional list of numbers.
@return: The score is returned in a multidimensional(3) list, such
that the outer-dimension refers to the head, and the
inner-dimension refers to the dependencies. For instance,
scores[0][1] would reference the list of scores corresponding to
arcs from node 0 to node 1. The node's 'address' field can be used
to determine its number identification.
For further illustration, a score list corresponding to Fig.2 of
Keith Hall's 'K-best Spanning Tree Parsing' paper:
scores = [[[], [5], [1], [1]],
[[], [], [11], [4]],
[[], [10], [], [5]],
[[], [8], [8], []]]
When used in conjunction with a MaxEntClassifier, each score would
correspond to the confidence of a particular edge being classified
with the positive training examples.
"""
raise AssertionError('DependencyScorerI is an abstract interface')
#////////////////////////////////////////////////////////////
# Comparisons
#////////////////////////////////////////////////////////////
def __cmp__(self, other):
raise AssertionError('DependencyScorerI is an abstract interface')
def __hash__(self, other):
raise AssertionError('DependencyScorerI is an abstract interface')
#################################################################
# NaiveBayesDependencyScorer
#################################################################
class NaiveBayesDependencyScorer(DependencyScorerI):
"""
A dependency scorer built around a MaxEnt classifier. In this
particular class that classifier is a C{NaiveBayesClassifier}.
It uses head-word, head-tag, child-word, and child-tag features
for classification.
"""
def __init__(self):
print # Do nothing without throwing error?
def train(self, graphs):
"""
Trains a C{NaiveBayesClassifier} using the edges present in
graphs list as positive examples, the edges not present as
negative examples. Uses a feature vector of head-word,
head-tag, child-word, and child-tag.
@type graphs: A list of C{DependencyGraph}
@param graphs: A list of dependency graphs to train the scorer.
"""
# Create training labeled training examples
labeled_examples = []
for graph in graphs:
for head_node in graph.nodelist:
for child_index in range(len(graph.nodelist)):
child_node = graph.get_by_address(child_index)
if child_index in head_node['deps']:
label = "T"
else:
label = "F"
features = [head_node['word'], head_node['tag'], child_node['word'], child_node['tag']]
labeled_examples.append((dict(a=head_node['word'],b=head_node['tag'],c=child_node['word'],d=child_node['tag']), label))
# Train the classifier
import nltk
nltk.usage(nltk.ClassifierI)
self.classifier = nltk.classify.NaiveBayesClassifier.train(labeled_examples)
def score(self, graph):
"""
Converts the graph into a feature-based representation of
each edge, and then assigns a score to each based on the
confidence of the classifier in assigning it to the
positive label. Scores are returned in a multidimensional list.
@type graph: C{DependencyGraph}
@param graph: A dependency graph to score.
@rtype: 3 dimensional list
@return: Edge scores for the graph parameter.
"""
# Convert graph to feature representation
edges = []
for i in range(len(graph.nodelist)):
for j in range(len(graph.nodelist)):
head_node = graph.get_by_address(i)
child_node = graph.get_by_address(j)
print head_node
print child_node
edges.append((dict(a=head_node['word'],b=head_node['tag'],c=child_node['word'],d=child_node['tag'])))
# Score edges
edge_scores = []
row = []
count = 0
for pdist in self.classifier.batch_prob_classify(edges):
print '%.4f %.4f' % (pdist.prob('T'), pdist.prob('F'))
| row.append([math.log(pdist.prob("T"))])
count += 1
if count == len(graph.nodelist):
edge_scores.append(row)
row = []
count = 0
return edge_scores
#################################################################
# A Scorer for Demo Purposes
######################################################## | #########
# A short class necessary to show parsing example from paper
class DemoScorer:
def train(self, graphs):
print 'Training...'
def score(self, graph):
# scores for Keith Hall 'K-best Spanning Tree Parsing' paper
return [[[], [5], [1], [1]],
[[], [], [11], [4]],
[[], [10], [], [5]],
[[], [8], [8], []]]
#################################################################
# Non-Projective Probabilistic Parsing
#################################################################
class ProbabilisticNonprojectiveParser(object):
"""
A probabilistic non-projective dependency parser. Nonprojective
dependencies allows for "crossing branches" in the parse tree
which is necessary for representing particular linguistic
phenomena, or even typical parses in some languages. This parser
follows the MST parsing algorithm, outlined in McDonald(2005),
which likens the search for the best non-projective parse to
finding the maximum spanning tree in a weighted directed graph.
"""
def __init__(self):
"""
Creates a new non-projective parser.
"""
print 'initializing prob. nonprojective...'
def train(self, graphs, dependency_scorer):
"""
Trains a C{DependencyScorerI} from a set of C{DependencyGraph} objects,
and establishes this as the parser's scorer. This is used to
initialize the scores on a C{DependencyGraph} during the parsing
procedure.
@type graphs: A list |
jonasprobst/wandering-pillar-cli | firebase-test.py | Python | mit | 292 | 0.003425 | from firebase import | firebase
firebase = firebase.FirebaseApplication('https://wapi.firebaseio.com', None)
new_user = 'Ozgur Vatansever'
result = firebase.post('/users', new_user, name=None, connection=None, params={'print': 'pretty'}, headers={'X_FANCY_HEADER': 'VERY FANC | Y'})
print result
|
liboyin/algo-prac | arrays/longest_increasing_subsequence.py | Python | gpl-3.0 | 3,290 | 0.006079 | from bisect import bisect_left
from lib import argmax, bin_search_left, yield_while
from math import inf
def search(arr): # binary search, length only. O(n\log n) time
st = [arr[0]] # st[i]: smallest tail of LIS of length i + 1. naturally sorted, and all elements are distinct
for x in arr:
if x > st[-1]: # if x is greater than the current smallest tail, then no need to search
st.append(x)
else:
st[bisect_left(st, x)] = x # returns the index of x if in st, or the index of the smallest element larger than x
return len(st)
def search2(arr): # binary search with reconstruction. O(n\log n) time, O(n) space
st = [0] # st[i]: index (in arr) of the smallest tail of the LIS of length i + 1
bt = [-1] # bt[i]: index (in arr) of the predecessor of arr[i] in the LIS so far, or -1 if arr[i] is the head. when finished, len(bt) == len(arr)
for i, x in enumerate(arr[1:], start=1):
if x > arr[st[-1]]: # x is greater than the current smallest tail
bt.append(st[-1]) # point to the previous element of the current tail of st
st.append(i)
else:
pos = bin_search_left(st, x, key=lambda j: arr[j])
assert pos < len(st)
bt.append(st[pos - 1] if pos > 0 else -1) # pos == 0 -> arr[i] is the new head
st[pos] = i
return list(yield_while(st[-1], lambda x: x >= 0, lambda x: bt[x]))[::-1] # indices only
def search3(arr): # DP with reconstruction. O(n^2) time, O(n) space
dp = [1] # dp[i]: maximum length of increasing subsequence with arr[i] as tail
bt = [-1] # bt[i]: index (in arr) of the largest possible predecessor of arr[i], or -1 if arr[i] is the head
for i, x in enumerate(arr[1:], start=1):
m = -1 # m: in search for bt[i]
for j in range(i):
if arr[j] < x and (m == -1 or dp[j] > dp[m]):
# among all j < i s.t. arr[j] < arr[i], maximise dp[j]. if multiple such j exist, take the first one
m = j
if m == -1: # arr[i] as the start of dp new increasing subsequence
dp.append(1)
bt.append(-1)
else:
dp.append(dp[m] + 1)
bt.append(m)
return list(yield_while(argmax(dp), lambda s: s >= 0, lambda s: bt[s]))[::-1] # indices only
def search_triple(arr): # returns whether a triple i < j < k exists s.t. arr[i] < arr[j] < arr[k]
fst, snd = inf
for x in arr:
if x < fst:
fst = x
elif fst < x < snd:
snd = x
elif x > snd:
return True
return False
if __name__ == '__main__':
from random import shuffle
std_test = {(0, 8, 4, 12, 2, 10, 6, 14, 1, 9, 5, 13, 3, 11, 7, 15): (0, 4, 6, 9, 13, 15),
(2, 3, 1, 4, 0, 4, 0, 3, 1, 4, 0): (6, 8, 9)}
for k, v in std_test.items():
assert search(k) == len(search2(k)) == len(v)
| for _ in range(100):
rnd_test = list(range(50 | )) * 4
shuffle(rnd_test)
n = search(rnd_test)
bs = search2(rnd_test)
dp = search3(rnd_test)
assert n == len(bs) == len(dp)
for i in range(n - 1):
assert rnd_test[bs[i]] < rnd_test[bs[i + 1]]
assert rnd_test[dp[i]] < rnd_test[dp[i + 1]]
|
tommyip/zulip | zerver/tests/test_push_notifications.py | Python | apache-2.0 | 82,169 | 0.002312 | from contextlib import contextmanager
import datetime
import itertools
import requests
import mock
from mock import call
from typing import Any, Dict, List, Optional
import base64
import os
import ujson
import uuid
from django.test import override_settings
from django.conf import settings
from django.http import HttpResponse
from django.db import transaction
from django.db.models import F
from django.utils.crypto import get_random_string
from django.utils.timezone import utc as timezone_utc
from analytics.lib.counts import CountStat, LoggingCountStat
from analytics.models import InstallationCount, RealmCount
from zerver.models import (
PushDeviceToken,
Message,
UserMessage,
receives_offline_email_notifications,
receives_offline_push_notifications,
receives_online_notifications,
receives_stream_notifications,
get_client,
get_realm,
get_stream,
Recipient,
Stream,
Subscription,
)
from zerver.lib.actions import do_delete_messages, do_mark_stream_messages_as_read
from zerver.lib.soft_deactivation import do_soft_deactivate_users
from zerver.lib.push_n | otifications import (
absolute_avatar_url,
b64_to_hex,
datetime_to_timestamp,
DeviceToken,
get_apns_client,
get_display_recipient,
get_message_payload_apns,
get_message_payload_gcm,
get_mobile_push_content,
handle_push_notification,
handle_remove_push_notification,
hex_to_b64,
modernize_apns_payload,
num_push_devices_for_user,
parse_gcm_options,
send_android_push_notification_to_user,
send_apple_push | _notification,
send_notifications_to_bouncer,
send_to_push_bouncer,
)
from zerver.lib.remote_server import send_analytics_to_remote_server, \
build_analytics_data, PushNotificationBouncerException
from zerver.lib.request import JsonableError
from zerver.lib.test_classes import (
TestCase, ZulipTestCase,
)
from zilencer.models import RemoteZulipServer, RemotePushDeviceToken, \
RemoteRealmCount, RemoteInstallationCount
from django.utils.timezone import now
ZERVER_DIR = os.path.dirname(os.path.dirname(__file__))
class BouncerTestCase(ZulipTestCase):
def setUp(self) -> None:
self.server_uuid = "1234-abcd"
server = RemoteZulipServer(uuid=self.server_uuid,
api_key="magic_secret_api_key",
hostname="demo.example.com",
last_updated=now())
server.save()
super().setUp()
def tearDown(self) -> None:
RemoteZulipServer.objects.filter(uuid=self.server_uuid).delete()
super().tearDown()
def bounce_request(self, *args: Any, **kwargs: Any) -> HttpResponse:
"""This method is used to carry out the push notification bouncer
requests using the Django test browser, rather than python-requests.
"""
# args[0] is method, args[1] is URL.
local_url = args[1].replace(settings.PUSH_NOTIFICATION_BOUNCER_URL, "")
if args[0] == "POST":
result = self.api_post(self.server_uuid,
local_url,
kwargs['data'],
subdomain="")
elif args[0] == "GET":
result = self.api_get(self.server_uuid,
local_url,
kwargs['data'],
subdomain="")
else:
raise AssertionError("Unsupported method for bounce_request")
return result
def get_generic_payload(self, method: str='register') -> Dict[str, Any]:
user_id = 10
token = "111222"
token_kind = PushDeviceToken.GCM
return {'user_id': user_id,
'token': token,
'token_kind': token_kind}
class PushBouncerNotificationTest(BouncerTestCase):
DEFAULT_SUBDOMAIN = ""
def test_unregister_remote_push_user_params(self) -> None:
token = "111222"
token_kind = PushDeviceToken.GCM
endpoint = '/api/v1/remotes/push/unregister'
result = self.api_post(self.server_uuid, endpoint, {'token_kind': token_kind})
self.assert_json_error(result, "Missing 'token' argument")
result = self.api_post(self.server_uuid, endpoint, {'token': token})
self.assert_json_error(result, "Missing 'token_kind' argument")
# We need the root ('') subdomain to be in use for this next
# test, since the push bouncer API is only available there:
realm = get_realm("zulip")
realm.string_id = ""
realm.save()
result = self.api_post(self.example_email("hamlet"), endpoint, {'token': token,
'user_id': 15,
'token_kind': token_kind},
subdomain="")
self.assert_json_error(result, "Must validate with valid Zulip server API key")
def test_register_remote_push_user_paramas(self) -> None:
token = "111222"
user_id = 11
token_kind = PushDeviceToken.GCM
endpoint = '/api/v1/remotes/push/register'
result = self.api_post(self.server_uuid, endpoint, {'user_id': user_id, 'token_kind': token_kind})
self.assert_json_error(result, "Missing 'token' argument")
result = self.api_post(self.server_uuid, endpoint, {'user_id': user_id, 'token': token})
self.assert_json_error(result, "Missing 'token_kind' argument")
result = self.api_post(self.server_uuid, endpoint, {'token': token, 'token_kind': token_kind})
self.assert_json_error(result, "Missing 'user_id' argument")
result = self.api_post(self.server_uuid, endpoint, {'user_id': user_id, 'token': token, 'token_kind': 17})
self.assert_json_error(result, "Invalid token type")
result = self.api_post(self.example_email("hamlet"), endpoint, {'user_id': user_id,
'token_kind': token_kind,
'token': token})
self.assert_json_error(result, "Account is not associated with this subdomain",
status_code=401)
# We need the root ('') subdomain to be in use for this next
# test, since the push bouncer API is only available there:
realm = get_realm("zulip")
realm.string_id = ""
realm.save()
result = self.api_post(self.example_email("hamlet"), endpoint, {'user_id': user_id,
'token_kind': token_kind,
'token': token})
self.assert_json_error(result, "Must validate with valid Zulip server API key")
result = self.api_post(self.server_uuid, endpoint, {'user_id': user_id,
'token_kind': token_kind,
'token': token},
subdomain="zulip")
self.assert_json_error(result, "Invalid subdomain for push notifications bouncer",
status_code=401)
# We do a bit of hackery here to the API_KEYS cache just to
# make the code simple for sending an incorrect API key.
from zerver.lib.test_classes import API_KEYS
API_KEYS[self.server_uuid] = 'invalid'
result = self.api_post(self.server_uuid, endpoint, {'user_id': user_id,
'token_kind': token_kind,
'token': token})
self.assert_json_error(result, "Zulip server auth failure: key does not match role 1234-abcd",
status_code=401)
del API_KEYS[self.server_uuid]
credentials = "%s:%s" % ("5678-efgh", 'invalid')
api_auth = 'Basic ' + base64.b64encode(credentials.encode |
zoowii/pia-cloud | cloudweb/app/db.py | Python | mit | 85 | 0.011765 | from core import db
fro | m auth import models
from admin import models
db.create_al | l() |
plang85/rough_surfaces | examples/example_contact.py | Python | mit | 1,319 | 0.001516 | import numpy as np
import rough_surfaces.params as rp
import rough_surfaces.generate as rg
import rough_surfaces.analyse as ra
import rough_surfaces.surface as rs
import rough_surfaces.contact as rc
import matplotlib.pyplot as plt
from matplotlib import rcParams
import rough_surfaces.plot as rplt
# TODO dont do this
rcParams['savefig.dpi'] = 300
rcParams['legend.loc'] = 'upper right'
rcParams['image.cmap'] = 'hot'
N_power_of_two = 9
surface_params = rp.SelfAffineParameters()
surface = rg.make_self_affine(surface_params, N_power_of_two | , seed=0)
E, nu = 1.0E+9, 0.3
dxy = 1.0E-3
nominal_stress = 1.0E7
contact = rc.contact_FFT(surface, nominal_stress, E, nu, verbose=2, err_lim=1.0E-8)
if 1:
fig, ax = plt.subplots()
rplt.traces(ax, surface, [contact.u], 128)
unit_den = '(m)'
ax.set_xlabel('x ' + unit_den)
ax.set_ylabel('y ' + unit_den)
plt.legend()
plt.show()
if 1:
fig, ax = plt.subplots()
N = surface.shape[0]
L = rs.length(surface)
x = np.linspace(-L/2., L/2., N)
XX, YY = np.meshgrid(x | , x)
pressure_plot = ax.pcolor(XX, YY, contact.p)
ax.axis('equal')
unit_den = '(m)'
ax.set_xlabel('x ' + unit_den)
ax.set_ylabel('y ' + unit_den)
cbar = plt.colorbar(pressure_plot)
cbar.set_label('Pressure (Pa)', rotation=270)
plt.show()
|
Jacobious52/PythonLab | osxpasscrack.py | Python | gpl-2.0 | 886 | 0.029345 | import itertools
import subprocess
import sys
#http://pastebin.com/zj72xk4N
#run when system password box is showing eg. keychain password dialog
#apple script for automating dialog box input
sys_script = '''
tell application "System Events" to tell process "SecurityAgent"
set value of text field 1 of window 1 to $(PASS)
click button 1 of group 1 of window 1
end tell
'''
#fill this array with chars for combination
keys = ['s','t','a','r','t']
def automate_ | login():
for l in xrange(0, len(keys)+1):
for subset in itertools.permutations(keys, l):
guess = ''.join(subset)
tmp = sys_script.replace('$(PASS)', '"%s"' % guess)
try:
subprocess.check_output('osascript -e \'%s\'' % tmp, shell=True)
sys.stdout.write('\rtrying %s ' % guess)
sys.stdout.flush()
except subprocess.CalledProcessError:
print('\nfailed')
return
retur | n
automate_login() |
noironetworks/group-based-policy | gbpservice/nfp/core/sequencer.py | Python | apache-2.0 | 4,161 | 0 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import six
from gbpservice.nfp.core import log as nfp_logging
LOG = nfp_logging.getLogger(__name__)
deque = collections.deque
class SequencerEmpty(Exception):
pass
class Seq | uencerBusy(Exception):
pass
"""Sequences the events. """
class EventSequencer(object):
class Sequencer(object):
def __init__(self):
# Events not scheduled are queued
self._waitq = deque()
# Currently scheduled event
self._scheduled = None
def _is_busy(self):
if self._scheduled:
raise Sequen | cerBusy
def _is_empty(self):
if not len(self._waitq):
raise SequencerEmpty
def sequence(self, event):
self._waitq.append(event)
def run(self):
"""Run to get event to be scheduled.
If sequencer is busy - i.e, an event is already
scheduled and in progress raises busy except.
If sequencer is empty - i.e, no event in sequencer
raises empty except.
"""
self._is_busy()
self._is_empty()
# Pop the first element in the queue - FIFO
self._scheduled = self._waitq.popleft()
return self._scheduled
def is_scheduled(self, event):
if self._scheduled:
return self._scheduled.desc.uuid == event.desc.uuid and (
self._scheduled.id == event.id)
return True
def release(self):
self._scheduled = None
def pop(self):
self.release()
events = list(self._waitq)
self._waitq.clear()
return events
def __init__(self):
# Sequence of related events
# {key: sequencer()}
self._sequencer = {}
def sequence(self, key, event):
try:
self._sequencer[key].sequence(event)
except KeyError:
self._sequencer[key] = self.Sequencer()
self._sequencer[key].sequence(event)
message = "Sequenced event - %s" % (event.identify())
LOG.debug(message)
def run(self):
events = []
# Loop over copy and delete from original
sequencers = dict(self._sequencer)
for key, sequencer in six.iteritems(sequencers):
try:
event = sequencer.run()
if event:
message = "Desequenced event - %s" % (
event.identify())
LOG.debug(message)
event.sequence = False
events.append(event)
except SequencerBusy as exc:
pass
except SequencerEmpty as exc:
exc = exc
message = "Sequencer empty"
LOG.debug(message)
del self._sequencer[key]
return events
def pop(self):
events = []
sequencers = dict(self._sequencer)
for key, sequencer in six.iteritems(sequencers):
events += sequencer.pop()
return events
def release(self, key, event):
try:
message = "(event - %s) checking to release" % (event.identify())
LOG.debug(message)
if self._sequencer[key].is_scheduled(event):
message = "(event - %s) Releasing sequencer" % (
event.identify())
LOG.debug(message)
self._sequencer[key].release()
except KeyError:
return
|
mo-g/iris | lib/iris/tests/unit/fileformats/pp_rules/test__reshape_vector_args.py | Python | gpl-3.0 | 5,242 | 0 | # (C) British Crown Copyright 2014 - 2015, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
"""
Unit tests for
:func:`iris.fileformats.pp_rules._reshape_vector_args`.
"""
from __future__ import (absolute_import, division, print_function)
from six.moves import (filter, input, map, range, zip) # noqa
# Import iris.tests first so that some things can be initialised before
# importing anything else.
import iris.tests as tests
import numpy as np
from iris.fileformats.pp_rules import _reshape_vector_args
class TestEmpty(tests.IrisTest):
def test(self):
result = _reshape_vector_args([])
self.assertEqual(result, [])
class TestSingleArg(tests.IrisTest):
def _check(self, result, expected):
self.assertEqual(len(result), len(expected))
for result_arr, expected_arr in zip(result, expected):
self.assertArrayEqual(result_arr, expected_arr)
def test_nochange(self):
points = np.array([[1, 2, 3], [4, 5, 6]])
result = _reshape_vector_args([(points, (0, 1))])
expected = [points]
self._check(result, expected)
def test_bad_dimensions(self):
points = np.array([[1, 2, 3], [4, 5, 6]])
with self.assertRaisesRegexp(ValueError, 'Length'):
_reshape_vector_args([(points, (0, 1, 2))])
def test_scalar(self):
points = 5
result = _reshape_vector_args([(points, ())])
expected = [points]
self._check(result, expected)
def test_nonarray(self):
points = [[1, 2, 3], [4, 5, 6]]
result = _reshape_vector_args([(points, (0, 1))])
e | xpected = [np.array(points)]
self._check(result, expected)
def test_transpose(self):
points = np.array([[1, 2, 3], [4, 5, 6]])
result = _reshape_vector_args([(points, (1, 0))])
expected = [points.T]
self._check(result, expected)
def test_extend(self):
points = np.array([[1, 2, 3, 4], [21, 22, 23, 24], [31, 32, 33, 34]])
| result = _reshape_vector_args([(points, (1, 3))])
expected = [points.reshape(1, 3, 1, 4)]
self._check(result, expected)
class TestMultipleArgs(tests.IrisTest):
def _check(self, result, expected):
self.assertEqual(len(result), len(expected))
for result_arr, expected_arr in zip(result, expected):
self.assertArrayEqual(result_arr, expected_arr)
def test_nochange(self):
a1 = np.array([[1, 2, 3], [4, 5, 6]])
a2 = np.array([[0, 2, 4], [7, 8, 9]])
result = _reshape_vector_args([(a1, (0, 1)), (a2, (0, 1))])
expected = [a1, a2]
self._check(result, expected)
def test_array_and_scalar(self):
a1 = [[1, 2, 3], [3, 4, 5]]
a2 = 5
result = _reshape_vector_args([(a1, (0, 1)), (a2, ())])
expected = [a1, np.array([[5]])]
self._check(result, expected)
def test_transpose(self):
a1 = np.array([[1, 2, 3], [4, 5, 6]])
a2 = np.array([[0, 2, 4], [7, 8, 9]])
result = _reshape_vector_args([(a1, (0, 1)), (a2, (1, 0))])
expected = [a1, a2.T]
self._check(result, expected)
def test_incompatible(self):
# Does not enforce compatibility of results.
a1 = np.array([1, 2])
a2 = np.array([1, 2, 3])
result = _reshape_vector_args([(a1, (0,)), (a2, (0,))])
expected = [a1, a2]
self._check(result, expected)
def test_extend(self):
a1 = np.array([[1, 2, 3], [4, 5, 6]])
a2 = np.array([11, 12, 13])
result = _reshape_vector_args([(a1, (0, 1)), (a2, (1,))])
expected = [a1, a2.reshape(1, 3)]
self._check(result, expected)
def test_extend_transpose(self):
a1 = np.array([[1, 2, 3], [4, 5, 6]])
a2 = np.array([11, 12, 13])
result = _reshape_vector_args([(a1, (1, 0)), (a2, (1,))])
expected = [a1.T, a2.reshape(1, 3)]
self._check(result, expected)
def test_double_extend(self):
a1 = np.array([[1, 2, 3], [4, 5, 6]])
a2 = np.array(1)
result = _reshape_vector_args([(a1, (0, 2)), (a2, ())])
expected = [a1.reshape(2, 1, 3), a2.reshape(1, 1, 1)]
self._check(result, expected)
def test_triple(self):
a1 = np.array([[1, 2, 3, 4]])
a2 = np.array([3, 4])
a3 = np.array(7)
result = _reshape_vector_args([(a1, (0, 2)), (a2, (1,)), (a3, ())])
expected = [a1.reshape(1, 1, 4),
a2.reshape(1, 2, 1),
a3.reshape(1, 1, 1)]
self._check(result, expected)
if __name__ == "__main__":
tests.main()
|
jgoodell/chess-algebra | tests.py | Python | apache-2.0 | 1,483 | 0.002697 | from unittest import TestCase
from chess import get_potential_moves
class ChessTestCase(TestCase):
def setup(self):
pass
def teardown(self):
pass
def test_knight(self):
response = get_potential_moves('knight', 'd2')
response = [each.strip() for each in response.split(',')]
possible_moves = ['b1', 'f1', 'b3', 'f3', 'c4', 'e4']
self.assertEqual(len(response), len(possible_moves))
for each in possible_moves:
self.assertTrue(each in response)
def test_rook(self):
response = get_potential_moves('rook', 'd5')
response = [each.strip() for each in response.split(',')]
possible_moves = ['a5', 'b5', 'c5', 'e5', 'f5', 'g5', 'h5',
'd1', 'd2', 'd3', 'd4', 'd6', 'd7', 'd8']
self.assertEqual(len(res | ponse), len(possible_moves))
for each in possible_moves:
self.assertTrue(each in response)
def test_queen(self):
response = get_potential_moves | ('queen', 'd4')
response = [each.strip() for each in response.split(',')]
possible_moves = ['a4', 'b4', 'c4', 'e4', 'f4', 'g4', 'h4',
'd1', 'd2', 'd3', 'd5', 'd6', 'd7', 'd8',
'a7', 'b6', 'c5', 'e3', 'f2', 'g1',
'a1', 'b2', 'c3', 'e5', 'f6', 'g7', 'h8']
for each in possible_moves:
self.assertTrue(each in response)
|
lol/BCI-BO-old | plot_iv2a.py | Python | gpl-3.0 | 4,270 | 0.016862 | import numpy as np
import matplotlib.pyplot as plt
import math
from pylab import figure
from my_plotter import *
import os
import sys
sys.path.append('./BCI_Framework')
import Main
import Single_Job_runner as SJR
import os
import re
if __name__ == '__main__':
bciciv1 | = Main.Main('BCI_Framework','BCICIV2a','RANDOM_FOREST', 'BP', 'ALL', -1, 'python')
res_path = bciciv1.config.configuration['resu | lts_opt_path_str']
classifiers_dict = {'Boosting':0, 'LogisticRegression_l1':1, 'LogisticRegression_l2':2, 'RANDOM_FOREST':3,'SVM_linear':4, 'SVM_rbf':5 }
features_dict = {'BP':0, 'logbp':1, 'wackerman':2,'BPCSP':3, 'logbpCSP':4, 'wackermanCSP':5}
results = np.zeros((len(classifiers_dict),len(features_dict), bciciv1.config.configuration["number_of_subjects"]))
discarded_periods = np.empty((len(classifiers_dict),len(features_dict), bciciv1.config.configuration["number_of_subjects"]), dtype='S10')
subjects_dict = {}
for ind, subj in enumerate(bciciv1.config.configuration["subject_names_str"]):
subjects_dict.update({subj:ind})
for dirname, dirnames, filenames in os.walk(res_path):
# for subdirname in dirnames:
# fold_name = os.path.join(dirname, subdirname)
# print fold_name
for filename in filenames:
# slash_indices = re.search('0', filename)
file_name = os.path.join(dirname, filename)
backslash_indices = [m.start() for m in re.finditer("\\\\", file_name)]
underline_indices = [m.start() for m in re.finditer("_", file_name)]
feature_ext_name = file_name[backslash_indices[-2]+1:backslash_indices[-1]]
classifier_name = file_name[backslash_indices[-3]+1:backslash_indices[-2]]
subj = file_name[underline_indices[-1]+1:]
# print feature_ext_name, classifier_name, subj
with open(file_name,'r') as my_file:
error = float(my_file.readline())
accuracy = 100 - error*100
results[classifiers_dict[classifier_name], features_dict[feature_ext_name],subjects_dict[subj]] = accuracy
# print file_name[backslash_indices[-1]+1:underline_indices[1]]
discarded_periods[classifiers_dict[classifier_name], features_dict[feature_ext_name],subjects_dict[subj]] = file_name[backslash_indices[-1]+1:underline_indices[2]]
# print backslash_indices
for feature in features_dict.keys():
f_ind = features_dict[feature]
feature_ext_y = []
labels = []
for subject in subjects_dict.keys():
subj_ind = subjects_dict[subject]
feature_ext_y.append(tuple(results[:,f_ind,subj_ind]))
labels.append(feature + '_' + subject)
# plotter( feature_ext_y, math.floor(np.min(feature_ext_y) - 1), math.floor(np.max(feature_ext_y) + 1), feature, labels)
plotter( feature_ext_y, 15, 87, feature, labels)
for subject in subjects_dict.keys():
for feature in features_dict.keys():
print subject, feature, discarded_periods[:, features_dict[feature],subjects_dict[subject]]
# BP_y = [(72.96,78.62,78.62,76.11,79.25,79.88), (64.45,65.38,65.75,65.00,67.04,66.67), (69.45,71.86,74.26,72.04,69.75,72.6)]
# labels = ['BP_O3','BP_S4','BP_X11']
# plotter( BP_y, 64, 81, 'BP', labels)
# logBP_y = [(74.22,79.25,79.25,77.36,81.77,81.77), (62.23,66.49,66.30,65.38,66.86,66.86), (69.82,72.97,73.15,71.86,74.63,74.63)]
# labels = ['LOGBP_O3','LOGBP_S4','LOGBP_X11']
# plotter( logBP_y, 61, 84, 'logBP', labels)
# wackermann_y = [(56.61,57.24,58.24,54.72,54.72,59.75), (57.97,57.6,59.82,55.75,57.97,58.71), (60,50,57.24,61.49,60.56,62.23)]
# labels = ['wackerman_O3','wackerman_S4','wackerman_X11']
# plotter( wackermann_y, 49, 65, 'wackerman', labels)
# y_RF = [(77.98,76.72,76.72,79.87), (70.74,74.44,80.92,75.18),(75.92,73.51,77.03,78.33),(76.11,77.36,58.5, 54.72), (65,65.38,53.34,55.75), (72.04,71.86,60,61.49)]
# labels = ['BO_RF_O3','BO_RF_S4','BO_RF_X11','RF_grid_search_O3','RF_grid_search_S4','RF_grid_search_X11']
# BO_plotter( y_RF, 49, 83, 'BO_RF', labels)
plt.show() |
fredwulei/fredsneverland | fredsneverland/timeline/urls.py | Python | mit | 130 | 0.007692 | from django.conf.urls import url
from timeline import views
urlpatterns = [
url(r'^$', views.timelines, name='timelines'), |
] | |
naveensan1/nuage-openstack-neutron | nuage_neutron/plugins/nuage_ml2/securitygroup.py | Python | apache-2.0 | 12,033 | 0 | # Copyright 2016 NOKIA
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron._i18n import _
from neutron.callbacks import events
from neutron.callbacks import registry
from neutron.callbacks import resources
from neutron.db.common_db_mixin import CommonDbMixin
from neutron.extensions import securitygroup as ext_sg
from neutron_lib import exceptions as n_exc
from neutron_lib.plugins import directory
from oslo_log import helpers as log_helpers
from oslo_log import log as logging
from oslo_utils import excutils
from nuage_neutron.plugins.common import base_plugin
from nuage_neutron.plugins.common import constants
from nuage_neutron.plugins.common import exceptions as nuage_exc
from nuage_neutron.plugins.common.time_tracker import TimeTracker
from nuage_neutron.plugins.common import utils as nuage_utils
from nuage_neutron.vsdclient.common import cms_id_helper
from nuage_neutron.vsdclient import restproxy
LOG = logging.getLogger(__name__)
class NuageSecurityGroup(base_plugin.BaseNuagePlugin,
CommonDbMixin):
def __init__(self):
super(NuageSecurityGroup, self).__init__()
self._l2_plugin = None
@property
def core_plugin(self):
if self._l2_plugin is None:
self._l2_plugin = directory.get_plugin()
return self._l2_plugin
def register(self):
self.nuage_callbacks.subscribe(self.post_port_create,
resources.PORT, constants.AFTER_CREATE)
self.nuage_callbacks.subscribe(self.post_port_update,
resources.PORT, constants.AFTER_UPDATE)
self.nuage_callbacks.subscribe(self.post_port_delete,
resources.PORT, constants.AFTER_DELETE)
registry.subscribe(self.pre_delete_security_group,
resources.SECURITY_GROUP,
events.BEFORE_DELETE)
registry.subscribe(self.pre_create_security_group_rule,
resources.SECURITY_GROUP_RULE,
events.BEFORE_CREATE)
registry.subscribe(self.post_create_security_group_rule,
resources.SECURITY_GROUP_RULE,
events.AFTER_CREATE)
registry.subscribe(self.pre_delete_security_group_rule,
resources.SECURITY_GROUP_RULE,
events.BEFORE_DELETE)
@nuage_utils.handle_nuage_api_error
@log_helpers.log_method_call
@TimeTracker.tracked
def pre_delete_security_group(self, resource, event, trigger, **kwargs):
self.vsdclient.delete_nuage_secgroup(kwargs['security_group_id'])
@nuage_utils.handle_nuage_api_error
@log_helpers.log_method_call
@TimeTracker.tracked
def pre_create_security_group_rule(self, resource, event, trigger,
**kwargs):
self.vsdclient.validate_nuage_sg_rule_definition(
kwargs['security_group_rule'])
@nuage_utils.handle_nuage_api_error
@log_helpers.log_method_call
@TimeTracker.tracked
def post_create_security_group_rule(self, resource, event, trigger,
**kwargs):
remote_sg = None
context = kwargs['context']
sg_rule = kwargs['security_group_rule']
sg_id = sg_rule['security_group_id']
if sg_rule.get('remote_group_id'):
remote_sg = self.core_plugin.get_security_group(
context, sg_rule.get('remote_group_id'))
try:
nuage_policygroup = self.vsdclient.get_sg_policygroup_mapping(
sg_id)
if nuage_policygroup:
sg_params = {
'sg_id': sg_id,
'neutron_sg_rule': sg_rule,
'policygroup': nuage_policygroup
}
if remote_sg:
sg_params['remote_group_name'] = remote_sg['name']
self.vsdclient.create_nuage_sgrule(sg_params)
except Exception:
with excutils.save_and_reraise_exception():
self.core_plugin.delete_security_group_rule(context,
sg_rule['id'])
@nuage_utils.handle_nuage_api_error
@log_helpers.log_method_call
@TimeTracker.tracked
def pre_delete_security_group_rule(self, resource, event, trigger,
**kwargs):
context = kwargs['context']
id = kwargs['security_group_rule_id']
local_sg_rule = self.core_plugin.get_security_group_rule(context, id)
self.vsdclient.delete_nuage_sgrule([local_sg_rule])
@TimeTracker.tracked
def post_port_create(self, resource, event, trigger, **kwargs):
context = kwargs['context']
port = kwargs['port']
subnet_mapping = kwargs['subnet_mapping']
if subnet_mapping['nuage_managed_subnet']:
return
vsd_subnet = self.vsdclient.get_nuage_subnet_by_id(subnet_mapping)
if port[ext_sg.SECURITYGROUPS]:
self._process_port_security_group(context,
port,
kwargs['vport'],
port[ext_sg.SECURITYGROUPS],
vsd_subnet)
@TimeTracker.tracked
def post_port_update(self, resource, event, trigger, **kwargs):
update_sg = True
context = kwargs['context']
updated_port = kwargs['updated_port']
original_port = kwargs['original_port']
rollbacks = kwargs['rollbacks']
subnet_mapping = kwargs['subnet_mapping']
if subnet_mapping['nuage_managed_subnet']:
return
new_sg = (set(updated_port.get(ext_sg.SECURITYGROUPS)) if
updated_port.get(ext_sg.SECURITYGROUPS) else set())
orig_sg = (set(original_port.get(ext_sg.SECURITYGROUPS)) if
original_port.get(ext_sg.SECURITYGROUPS) else set())
if not new_sg and new_sg == orig_sg:
update_sg = False
if update_sg:
vsd_subnet = self.vsdclient.get_nuage_subnet_by_id(subnet_mapping)
self._process_port_security_group(context,
updated_port,
kwargs['vport'],
new_sg,
vsd_subnet)
rollbacks.append((self._process_port_security_group,
[context, updated_port, kwargs['vport'],
original_port[ext_sg.SECURITYGROUPS],
vsd_subnet],
{}))
deleted_sg_ids = (set(original_port[ext_sg.SECURITYGROUPS]) -
set(updated_port[ext_sg.SECURITYGR | OUPS]))
self.vsdclient.check_unused_policygroups(deleted_sg_ids)
@TimeTracker.tracked
def post_port_delete(self, resource, event, trigger, **kwargs):
port = kwargs['port']
subnet_mapping = kwargs['subnet_mapping | ']
if subnet_mapping['nuage_managed_subnet']:
return
securitygroups = port.get(ext_sg.SECURITYGROUPS, [])
successful = False
attempt = 1
while not successful:
try:
self.vsdclient.check_unused_policygroups(securitygroups)
successful = True
except restproxy.RESTProxyError as e:
msg = e.msg.lower() |
Phy-David-Zhang/UnivMathSys | Technology/TopModuleInit/__init__.py | Python | gpl-3.0 | 329 | 0.009119 | # Initialization of All Modules of UnivMathSys
# Copyright (C) | 2016 Zhang Chang-kai #
# Contact via: phy.zhangck@gmail.com #
# General Public License ver | sion 3.0 #
'''Initialization of All Modules'''
from Foundation import *
from Elementary import *
from Structure import *
# End of Initialization of All Modules
|
elbeardmorez/quodlibet | quodlibet/gdist/__init__.py | Python | gpl-2.0 | 7,360 | 0 | # -*- coding: utf-8 -*-
# Copyright 2007 Joe Wreschnig
# 2012-2016 Christoph Reiter
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""distutils extensions for GTK+/GObject/Unix
This module contains a Distribution subclass (GDistribution) which
implements build and install commands for operations related to
Python GTK+ and GObject support. This includes installation
of man pages and gettext/intltool support.
Also supports setuptools but needs to be imported after setuptools
(which does some monkey patching)
"""
import sys
from distutils.core import setup
from .shortcuts import build_shortcuts, install_shortcuts
from .man import install_man
from .po import build_mo, install_mo, po_stats, update_po, create_po
from .icons import install_icons
from .search_provider import install_search_provider
from .dbus_services import build_dbus_services, install_dbus_services
from .appdata import build_appdata, install_appdata
from .coverage import coverage_cmd
from .docs import build_sphinx
from .scripts import build_scripts
from .tests import quality_cmd, distcheck_cmd, test_cmd
from .clean import clean
from .zsh_completions import install_zsh_completions
from .util import get_dist_class, Distribution
distutils_build = get_dist_class("build")
class build(distutils_build):
"""Override the default build with new subcommands."""
sub_commands = distutils_build.sub_commands + [
("build_mo",
lambda self: self.distribution.has_po()),
("build_shortcuts",
lambda self: self.distribution.has_shortcuts()),
("build_dbus_services",
lambda self: self.distribution.has_dbus_services()),
("build_appdata",
lambda self: self.distribution.has_appdata()),
]
distutils_install = get_dist_class("install")
class install(distutils_install):
"""Override the default install with new subcommands."""
user_options = distutils_install.user_options + [
("mandir=", None, "destination directory for man pages. "
"Defaults to $PREFIX/share/man"),
]
sub_commands = distutils_install.sub_commands + [
("install_shortcuts", lambda self: self.distribution.has_shortcuts()),
("install_man", lambda self: self.distribution.has_man_pages()),
("install_mo", lambda self: self.distribution.has_po()),
("install_icons", lambda self: self.distribution.need_icon_install()),
("install_search_provider",
lambda self: self.distribution.need_search_provider()),
("install_dbus_services",
lambda self: self.distribution.has_dbus_services()),
("install_appdata",
lambda self: self.distribution.has_appdata()),
("install_zsh_completions",
lambda self: self.distribution.has_zsh_completions()),
]
def initialize_options(self):
distutils_install.initialize_options(self)
self.mandir = None
is_osx = (sys.platform == "darwin")
class GDistribution(Distribution):
"""A Distribution with support for GTK+-related options
The GDistribution class adds a number of commads and parameters
related to GTK+ and GObject Python programs and libraries.
Parameters (to distutils.core.setup):
po_directory -- directory where .po files are contained
po_package -- package name for translation files
shortcuts -- list of .desktop files to build/install
dbus_services -- list of .service files to build/install
man_pages -- list of man pages to install
appdata -- list of appdata files to install
Using the translation features requires intltool.
Example:
from distutils.core import setup
from gdist import GDistribution
setup(distclass=GDistribution, ...)
"""
shortcuts = []
appdata = []
dbus_services = []
po_directory = None
man_pages = []
po_package = None
search_provider = None
coverage_options = {}
zsh_completions = []
def __init__(self, *args, **kwargs):
Distribution.__init__(self, *args, **kwargs)
self.cmdclass.setdefault("build_mo", build_mo)
self.cmdclass.setdefault("build_shortcuts", build_shortcuts)
self.cmdclass.setdefault("build_dbus_services", build_dbus_services)
self.cmdclass.setdefault("build_appdata", build_appdata)
self.cmdclass.setdefault("build_scripts", build_scripts)
self.cmdclass.setdefault("install_icons", install_icons)
self.cmdcla | ss.setdefault("install_shortcuts", install_shortcuts)
self.cmdclass.setdefault("install_dbus_services",
install_dbus_services)
self.cmdclass.setdefault("install_man", install_man)
self.cmdclass.setdefault("install_mo", install_mo)
self.cmdclass.setdefault("install_search_provider",
install_s | earch_provider)
self.cmdclass.setdefault("install_appdata", install_appdata)
self.cmdclass.setdefault(
"install_zsh_completions", install_zsh_completions)
self.cmdclass.setdefault("build", build)
self.cmdclass.setdefault("install", install)
self.cmdclass.setdefault("po_stats", po_stats)
self.cmdclass.setdefault("update_po", update_po)
self.cmdclass.setdefault("create_po", create_po)
self.cmdclass.setdefault("coverage", coverage_cmd)
self.cmdclass.setdefault("build_sphinx", build_sphinx)
self.cmdclass.setdefault("quality", quality_cmd)
self.cmdclass.setdefault("distcheck", distcheck_cmd)
self.cmdclass.setdefault("test", test_cmd)
self.cmdclass.setdefault("quality", quality_cmd)
self.cmdclass.setdefault("clean", clean)
def has_po(self):
return bool(self.po_directory)
def has_shortcuts(self):
return not is_osx and bool(self.shortcuts)
def has_appdata(self):
return not is_osx and bool(self.appdata)
def has_man_pages(self):
return bool(self.man_pages)
def has_dbus_services(self):
return not is_osx and bool(self.dbus_services)
def has_zsh_completions(self):
return bool(self.zsh_completions)
def need_icon_install(self):
return not is_osx
def need_search_provider(self):
return not is_osx
__all__ = ["GDistribution", "setup"]
|
AlphaLambdaMuPi/CamDrone | camera3.py | Python | mit | 937 | 0.002134 | #! /u | sr/bin/env python3
import asyncio
import subprocess
import numpy as np
import time
c | omm = None
class Camera:
def __init__(self, notify):
self._process = None
self._now_pos = np.array([0., 0., 0.])
self._running = False
self._notify = notify
@asyncio.coroutine
def connect(self):
self._process = yield from asyncio.create_subprocess_exec(
'python2', 'camera.py',
stdin=asyncio.subprocess.PIPE,
stdout=asyncio.subprocess.PIPE
)
self._running = True
@asyncio.coroutine
def run(self):
while self._running:
data = yield from self._process.stdout.readline()
print(data)
self._now_pos = np.array(list(map(float, data.split())))
yield from self._notify(time.time(), self._now_pos)
def stop(self):
self._running = False
self._process.terminate()
|
itarozzi/classerman | src/ui/mainwindow_ui.py | Python | gpl-3.0 | 16,000 | 0.003313 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'src/ui/mainwindow.ui'
#
# Created: Fri Feb 15 16:08:54 2013
# by: PyQt4 UI code generator 4.9.3
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
_fromUtf8 = lambda s: s
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName(_fromUtf8("MainWindow"))
MainWindow.resize(1024, 768)
self.centralwidget = QtGui.QWidget(MainWindow)
self.centralwidget.setEnabled(True)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.centralwidget.sizePolicy().hasHeightForWidth())
self.centralwidget.setSizePolicy(sizePolicy)
self.centralwidget.setObjectName(_fromUtf8("centralwidget"))
self.verticalLayout = QtGui.QVBoxLayout(self.centralwidget)
self.verticalLayout.setObjectName(_fromUtf8("verticalLayout"))
self.horizontalLayout_2 = QtGui.QHBoxLayout()
self.horizontalLayout_2.setObjectName(_fromUtf8("horizontalLayout_2"))
self.label_3 = QtGui.QLabel(self.centralwidget)
self.label_3.setMaximumSize(QtCore.QSize(200, 200))
self.label_3.setText(_fromUtf8(""))
self.label_3.setPixmap(QtGui.QPixmap(_fromUtf8(":/logo/pixmaps/logo.jpg")))
self.label_3.setScaledContents(True)
self.label_3.setObjectName(_fromUtf8("label_3"))
self.horizontalLayout_2.addWidget(self.label_3)
self.verticalLayout_2 = QtGui.QVBoxLayout()
self.verticalLayout_2.setObjectName(_fromUtf8("verticalLayout | _2"))
self.label_2 = QtGui.QLabel(self.centralwidget)
font = QtGui.QFont()
font.setPointSize(20)
self.label_2.setFont(font)
| self.label_2.setAlignment(QtCore.Qt.AlignCenter)
self.label_2.setObjectName(_fromUtf8("label_2"))
self.verticalLayout_2.addWidget(self.label_2)
self.labelServerId = QtGui.QLabel(self.centralwidget)
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(0, 0, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(118, 116, 113))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(118, 116, 113))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Text, brush)
self.labelServerId.setPalette(palette)
font = QtGui.QFont()
font.setPointSize(16)
font.setBold(True)
font.setWeight(75)
self.labelServerId.setFont(font)
self.labelServerId.setAlignment(QtCore.Qt.AlignCenter)
self.labelServerId.setObjectName(_fromUtf8("labelServerId"))
self.verticalLayout_2.addWidget(self.labelServerId)
self.labelYear = QtGui.QLabel(self.centralwidget)
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(0, 0, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(118, 116, 113))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText, brush)
self.labelYear.setPalette(palette)
font = QtGui.QFont()
font.setPointSize(37)
font.setBold(True)
font.setWeight(75)
self.labelYear.setFont(font)
self.labelYear.setTextFormat(QtCore.Qt.PlainText)
self.labelYear.setAlignment(QtCore.Qt.AlignCenter)
self.labelYear.setObjectName(_fromUtf8("labelYear"))
self.verticalLayout_2.addWidget(self.labelYear)
self.horizontalLayout_2.addLayout(self.verticalLayout_2)
self.label = QtGui.QLabel(self.centralwidget)
self.label.setMaximumSize(QtCore.QSize(200, 200))
self.label.setText(_fromUtf8(""))
self.label.setPixmap(QtGui.QPixmap(_fromUtf8(":/logo/pixmaps/Stampa-silicone-tondo-fi55.png")))
self.label.setScaledContents(True)
self.label.setObjectName(_fromUtf8("label"))
self.horizontalLayout_2.addWidget(self.label)
self.verticalLayout.addLayout(self.horizontalLayout_2)
self.line = QtGui.QFrame(self.centralwidget)
self.line.setFrameShadow(QtGui.QFrame.Raised)
self.line.setLineWidth(4)
self.line.setFrameShape(QtGui.QFrame.HLine)
self.line.setFrameShadow(QtGui.QFrame.Sunken)
self.line.setObjectName(_fromUtf8("line"))
self.verticalLayout.addWidget(self.line)
spacerItem = QtGui.QSpacerItem(20, 40, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
self.verticalLayout.addItem(spacerItem)
self.horizontalLayout = QtGui.QHBoxLayout()
self.horizontalLayout.setObjectName(_fromUtf8("horizontalLayout"))
self.btnNewYear = QtGui.QToolButton(self.centralwidget)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Maximum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(11)
sizePolicy.setHeightForWidth(self.btnNewYear.sizePolicy().hasHeightForWidth())
self.btnNewYear.setSizePolicy(sizePolicy)
self.btnNewYear.setMinimumSize(QtCore.QSize(0, 200))
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.btnNewYear.setFont(font)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(_fromUtf8(":/img/pixmaps/planner.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.btnNewYear.setIcon(icon)
self.btnNewYear.setIconSize(QtCore.QSize(128, 128))
self.btnNewYear.setToolButtonStyle(QtCore.Qt.ToolButtonTextUnderIcon)
self.btnNewYear.setAutoRaise(False)
self.btnNewYear.setArrowType(QtCore.Qt.NoArrow)
self.btnNewYear.setObjectName(_fromUtf8("btnNewYear"))
self.horizontalLayout.addWidget(self.btnNewYear)
self.btnCloseYear = QtGui.QToolButton(self.centralwidget)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(11)
sizePolicy.setHeightForWidth(self.btnCloseYear.sizePolicy().hasHeightForWidth())
self.btnCloseYear.setSizePolicy(sizePolicy)
self.btnCloseYear.setMinimumSize(QtCore.QSize(0, 200))
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.btnCloseYear.setFont(font)
self.btnCloseYear.setAutoFillBackground(False)
icon1 = QtGui.QIcon()
icon1.addPixmap(QtGui.QPixmap(_fromUtf8(":/img/pixmaps/save.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.btnCloseYear.setIcon(icon1)
self.btnCloseYear.setIconSize(QtCore.QSize(128, 128))
self.btnCloseYear.setToolButtonStyle(QtCore.Qt.ToolButtonTextUnderIcon)
self.btnCloseYear.setObjectName(_fromUtf8("btnCloseYear"))
self.horizontalLayout.addWidget(self.btnCloseYear)
self.btnTeachers = QtGui.QToo |
huangciyin/youtube-dl | youtube_dl/downloader/f4m.py | Python | unlicense | 10,503 | 0.001238 | from __future__ import unicode_literals
import base64
import io
import itertools
import os
import time
import xml.etree.ElementTree as etree
from .common import FileDownloader
from .http import HttpFD
from ..utils import (
struct_pack,
struct_unpack,
compat_urlparse,
format_bytes,
encodeFilename,
sanitize_open,
)
class FlvReader(io.BytesIO):
"""
Reader for Flv files
The file format is documented in https://www.adobe.com/devnet/f4v.html
"""
# Utility functions for reading numbers and strings
def read_unsigned_long_long(self):
return struct_unpack('!Q', self.read(8))[0]
def read_unsigned_int(self):
return struct_unpack('!I', self.read(4))[0]
def read_unsigned_char(self):
return struct_unpack('!B', self.read(1))[0]
def read_string(self):
res = b''
while True:
char = self.read(1)
if char == b'\x00':
break
res += char
return res
def read_box_info(self):
"""
Read a box and return the info as a tuple: (box_size, box_type, box_data)
"""
real_size = size = self.read_unsigned_int()
box_type = self.read(4)
header_end = 8
if size == 1:
real_size = self.read_unsigned_long_long()
header_end = 16
return real_size, box_type, self.read(real_size-header_end)
def read_asrt(self):
# version
self.read_unsigned_char()
# flags
self.read(3)
quality_entry_count = self.read_unsigned_char()
# QualityEntryCount
for i in range(quality_entry_count):
self.read_string()
segment_run_count = self.read_unsigned_int()
segments = []
for i in range(segment_run_count):
first_segment = self.read_unsigned_int()
fragments_per_segment = self.read_unsigned_int()
segments.append((first_segment, fragments_per_segment))
return {
'segment_run': segments,
}
def read_afrt(self):
# version
self.read_unsigned_char()
# flags
self.read(3)
# time scale
self.read_unsigned_int()
quality_entry_count = self.read_unsigned_char()
# QualitySegmentUrlModifiers
for i in range(quality_entry_count):
self.read_string()
fragments_count = self.read_unsigned_int()
fragments = []
for i in range(fragments_count):
first = self.read_unsigned_int()
first_ts = self.read_unsigned_long_long()
duration = self.read_unsigned_int()
if duration == 0:
discontinuity_indicator = self.read_unsigned_char()
else:
discontinuity_indicator = None
fragments.append({
'first': first,
'ts': first_ts,
'duration': duration,
'discontinuity_indicator': discontinuity_indicator,
})
return {
'fragments': fragments,
}
def read_abst(self):
# version
self.read_unsigned_char()
# flags
self.read(3)
self.read_unsigned_int() # BootstrapinfoVersion
# Profile,Live,Update,Reserved
self.read(1)
# time scale
self.read_unsigned_int()
# CurrentMediaTime
self.read_unsigned_long_long()
# SmpteTimeCodeOffset
self.read_unsigned_long_long()
self.read_string() # MovieIdentifier
server_count = self.read_unsigned_char()
# ServerEntryTable
for i in range(server_count):
self.read_string()
quality_count = self.read_unsigned_char()
# QualityEntryTable
for i in range(quality_count):
self.read_string()
# DrmData
self.read_string()
# MetaData
self.read_string()
segments_count = self.read_unsigned_char()
segments = []
for i in range(segments_count):
box_size, box_type, box_data = self.read_box_info()
assert box_type == b'asrt'
segment = FlvReader(box_data).read_asrt()
segments.append(segment)
fragments_run_count = self.read_unsigned_char()
fragments = []
for i in range(fragments_run_count):
box_size, box_type, box_data = self.read_box_info()
assert box_type == b'afrt'
fragments.append(FlvReader(box_data).read_afrt())
return {
'segments': segments,
'fragments': fragments,
}
def read_bootstrap_info(self):
total_size, box_type, box_data = self.read_box_info()
assert box_type == b'abst'
return FlvReader(box_data).read_abst()
def read_bootstrap_info(bootstrap_bytes):
return FlvReader(bootstrap_bytes).read_bootstrap_info()
def build_fragments_list(boot_info):
""" Return a list of (segment, fragment) for each fragment in the video """
res = []
segment_run_table = boot_info['segments'][0]
# I've only found videos with one segment
segment_run_entry = segment_run_table['segment_run'][0]
n_frags = segment_run_entry[1]
fragment_run_entry_table = boot_info['fragments'][0]['fragments']
first_frag_number = fragment_run_entry_table[0]['first']
for (i, frag_number) in zip(range(1, n_frags+1), itertools.count(first_frag_number)):
res.append((1, frag_number))
return res
def write_flv_header(stream, metadata):
"""Writes the FLV header and the metadata to stream"""
# FLV header
stream.write(b'FLV\x01')
stream.write(b'\x05')
stream.write(b'\x00\x00\x00\x09')
# FLV File body
stream.write(b'\x00\x00\x00\x00')
# FLVTAG
# Script data
stream.write(b'\x12')
# Size of the metadata with 3 bytes
stream.write(struct_pack('!L', len(metadata))[1:])
stream.write(b'\x00\x00\x00\x00\x00\x00\x00')
stream.write(metadata)
# Magic numbers extracted from the output files produced by AdobeHDS.php
#(https://github.com/K-S-V/Scripts)
stream.write(b'\x00\x00\x01\x73')
def _add_ns(prop):
return '{http://ns.adobe.com/f4m/1.0}%s' % prop
class HttpQuietDownloader(HttpFD):
def to_screen(self, *args, **kargs):
pass
class F4mFD(FileDownloader):
"""
A downloader for f4m manifests or AdobeHDS.
"""
def real_download(self, filename, info_dict):
man_url = info_dict['url']
self.to_screen('[download] Downloading f4m manifest')
manifest = self.ydl.urlopen(man_url).read()
self.report_destination(filename)
http_dl = HttpQuietDownloader(self.ydl,
{
'continuedl': True,
'quiet': True,
'nopro | gress': True,
'test': self.params.get('test', False),
})
doc = etree.fromstring(manifest)
formats = [(int(f.attrib.get('bitrate', -1)), f) for f in doc.findall(_add_ns('media'))]
formats | = sorted(formats, key=lambda f: f[0])
rate, media = formats[-1]
base_url = compat_urlparse.urljoin(man_url, media.attrib['url'])
bootstrap = base64.b64decode(doc.find(_add_ns('bootstrapInfo')).text)
metadata = base64.b64decode(media.find(_add_ns('metadata')).text)
boot_info = read_bootstrap_info(bootstrap)
fragments_list = build_fragments_list(boot_info)
if self.params.get('test', False):
# We only download the first fragment
fragments_list = fragments_list[:1]
total_frags = len(fragments_list)
tmpfilename = self.temp_name(filename)
(dest_stream, tmpfilename) = sanitize_open(tmpfilename, 'wb')
write_flv_header(dest_stream, metadata)
# This dict stores the download progress, it's updated by the progress
# hook
state = {
'downloaded_bytes': 0,
'frag_counter': 0,
}
start = time.time()
def frag_progress_hook(status):
frag_total_bytes = status.get('total_bytes', 0)
estimated_size = ( |
rohitranjan1991/home-assistant | tests/components/tplink/test_diagnostics.py | Python | mit | 1,931 | 0.001554 | """Tests for the diagnostics data provided by the TP-Link integration."""
import json
from aiohttp import ClientSession
from kasa import SmartDevice
import pytest
from homeassistant.core import HomeAssistant
from . import _mocked_bulb, _mocked_plug, initializ | e_config_entry_for_device
from tests.common import load_fixture
from tests.components.diagnostics import get_diagnostics_for_config_entry
@pytest.mark.parametrize(
"mocked_dev,fixture_file,sysinfo_vars",
[
(
_mocked_bulb(),
"tplink-diagnostics-data-bulb-kl130.json",
["mic_mac", "dev | iceId", "oemId", "hwId", "alias"],
),
(
_mocked_plug(),
"tplink-diagnostics-data-plug-hs110.json",
["mac", "deviceId", "oemId", "hwId", "alias", "longitude_i", "latitude_i"],
),
],
)
async def test_diagnostics(
hass: HomeAssistant,
hass_client: ClientSession,
mocked_dev: SmartDevice,
fixture_file: str,
sysinfo_vars: list[str],
):
"""Test diagnostics for config entry."""
diagnostics_data = json.loads(load_fixture(fixture_file, "tplink"))
mocked_dev._last_update = diagnostics_data["device_last_response"]
config_entry = await initialize_config_entry_for_device(hass, mocked_dev)
result = await get_diagnostics_for_config_entry(hass, hass_client, config_entry)
assert isinstance(result, dict)
assert "device_last_response" in result
# There must be some redactions in place, so the raw data must not match
assert result["device_last_response"] != diagnostics_data["device_last_response"]
last_response = result["device_last_response"]
# We should always have sysinfo available
assert "system" in last_response
assert "get_sysinfo" in last_response["system"]
sysinfo = last_response["system"]["get_sysinfo"]
for var in sysinfo_vars:
assert sysinfo[var] == "**REDACTED**"
|
RedXBeard/gitwatcher-ui | shortcuts.py | Python | mit | 4,040 | 0.000248 | import os
import re
from subprocess import Popen, PIPE
from kivy.uix.popup import Popup
def run_syscall(cmd):
"""
run_syscall; handle sys calls this function used as shortcut.
::cmd: String, shell command is expected.
"""
p = Popen(cmd, shell=True, stdout=PIPE, stderr=PIPE)
out, err = p.communicate()
return out.rstrip()
def striptags(text):
"""
striptags; markuped text should be cleared to use
most of times this function is used as shortcuts.
::text: String; markuped text is expected
"""
return re.sub(r'\[[^>]*?\]', '', text)
def create_popup(title, content):
"""
create_popup; couple of actions' result displayed as popup,
this function used as shortcut.
::title: String.
::content: Label, kivy Label class expected.
"""
popup = Popup(title=title, content=content,
size_hint=(None, None), size=(300, 100))
return popup
def diff_formatter(text):
"""
diff_formatter; diff text formats with this function lines starts with '+'
line colored with green if starts with '-' then line should be
colored with red others should keep with black. diff datas such as
commiter, commit date, commit message, commit log id short one are
collecting and result returned.
::text: String
"""
def replacer(text, search, color):
return text
# convertion should wait for a while.
result_text = ""
location = 0
while location != -1:
tmp_location = text.find(search)
if tmp_location != -1:
result_text += text[:tmp_location]
| line_end = text[tmp_location + 2:].find("\n")
if line_end > 0:
result_text += "\n[color=%s]%s[/color]" % \
(color,
text[tmp_location + 1:tmp_location + 2 + line_end])
else:
resul | t_text += "\n[color=%s]%s[/color]" % \
(color, text[tmp_location + 1:])
text = ""
location = tmp_location + 2 + line_end
text = text[location:]
else:
result_text += text
location = -1
return result_text
green = "\n+"
red = "\n-"
tmp_text = text
result_text = replacer(replacer(tmp_text, green, "00ff00"), red, "ff0000")
commit, merge, author, date = "", "", "", ""
data = '<<\n'.join(result_text.split("<<\n")[:1]).strip()
if data.startswith('sha'):
diff = '<<\n'.join(result_text.split("<<\n")[1:]).strip()
message = data.split('>>')[1].strip()
commit = data.split('author:')[0].split(
'sha:(')[1].replace(')', '').strip()
author = data.split('date:')[0].split(
'author:(')[1].replace(')', '').strip()
date = data.split('message:')[0].split(
'date:(')[1].replace(')', '').strip()
else:
diff = data
message, commit, author, date = "", "", "", ""
return diff, message, commit, author, date
def findparent(curclass, targetclass):
"""
findparent; each classes has a parent, in an action
parent classes methods in generally are used to
reach needed class this function is used as shortcut.
until target class and current class names are equal
recursion continues.
::curclass: class, current class
::targetclass: class, target class
"""
reqclass = curclass
if type(targetclass) in [unicode, str]:
targetclass_name = targetclass
else:
targetclass_name = str(targetclass().__class__).\
split('.')[1].replace("'>", "")
while True:
cls = str(reqclass.__class__).split('.')[1].replace("'>", "")
if cls == targetclass_name:
break
elif cls == 'core':
reqclass = None
break
reqclass = reqclass.parent
return reqclass
|
kubevirt/client-python | test/test_v1_virtual_machine_instance_replica_set_status.py | Python | apache-2.0 | 1,109 | 0.002705 | # coding: utf-8
"""
KubeVirt API
This is KubeVirt API an add-on for Kubernetes.
OpenAPI spec version: 1.0.0
Contact: kubevirt-dev@googlegroups.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absol | ute_import
import os
import sys
import unittest
import kubevirt
from kubevirt.rest import ApiException
from kubevirt.models.v1_virtual_machine_instance_replica_set_status import V1VirtualMachineInstanceReplicaSetStatus
class TestV1VirtualMachineInstanceReplicaSetStatus(unittest.TestCase):
""" V1VirtualMachineInstanceReplicaSetStatus unit test stubs """ |
def setUp(self):
pass
def tearDown(self):
pass
def testV1VirtualMachineInstanceReplicaSetStatus(self):
"""
Test V1VirtualMachineInstanceReplicaSetStatus
"""
# FIXME: construct object with mandatory attributes with example values
#model = kubevirt.models.v1_virtual_machine_instance_replica_set_status.V1VirtualMachineInstanceReplicaSetStatus()
pass
if __name__ == '__main__':
unittest.main()
|
kerel-fs/skylines | skylines/lib/geo.py | Python | agpl-3.0 | 737 | 0 | # -*- coding: utf-8 -*-
import math
EARTH_RADIUS = 6367009
METERS_PER_DEGREE = 111319.0
FEET_PER_METER = 3.2808399
def geographic_distance(loc1, loc2):
"""
Calculate the great circle distance between two points
on the earth (specified in decimal degrees)
"""
# convert decimal degrees to radians
lat1, lon1, lat2, lon2 = map(math.radians,
[loc1.latitude, loc1.longitude,
loc2.latitude, loc2.longitude])
# haversine formula
| dlon = lon2 - lon1
dlat = lat2 - lat1
a = math.sin(dlat / 2) ** 2 + math.cos(lat1) * \
math.cos(lat2) * math.sin(dlon / 2) ** 2
| c = 2 * math.asin(math.sqrt(a))
return EARTH_RADIUS * c
|
graphite-project/ceres | ceres.py | Python | apache-2.0 | 31,634 | 0.010084 | # Copyright 2011 Chris Davis
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# Ceres requires Python 2.7 or newer
import itertools
import os
import struct
import json
import errno
from math import isnan
from os.path import isdir, exists, join, dirname, abspath, getsize, getmtime
from glob import glob
from bisect import bisect_left
izip = getattr(itertools, 'izip', zip)
try:
import fcntl
CAN_LOCK = True
except ImportError:
CAN_LOCK = False
LOCK_WRITES = False
TIMESTAMP_FORMAT = "!L"
TIMESTAMP_SIZE = struct.calcsize(TIMESTAMP_FORMAT)
DATAPOINT_FORMAT = "!d"
DATAPOINT_SIZE = struct.calcsize(DATAPOINT_FORMAT)
NAN = float('nan')
PACKED_NAN = struct.pack(DATAPOINT_FORMAT, NAN)
MAX_SLICE_GAP = 80
DEFAULT_TIMESTEP = 60
DEFAULT_NODE_CACHING_BEHAVIOR = 'all'
DEFAULT_SLICE_CACHING_BEHAVIOR = 'none'
SLICE_AGGREGATION_METHODS = ['average', 'sum', 'last', 'max', 'min']
SLICE_PERMS = 0o644
DIR_PERMS = 0o755
class CeresTree(object):
"""Represents a tree of Ceres metrics contained within a single path on disk
This is the primary Ceres API.
:param root: The directory root of the Ceres tree
.. note:: Use :func:`createTree` to initialize and instantiate a new CeresTree
.. seealso:: :func:`setDefaultNodeCachingBehavior` to adjust caching behavior
"""
def __init__(self, root):
if isdir(root):
self.root = abspath(root)
else:
raise ValueError("Invalid root directory '%s'" % root)
self.nodeCache = {}
self.nodeCachingBehavior = DEFAULT_NODE_CACHING_BEHAVIOR
def __repr__(self):
return "<CeresTree[0x%x]: %s>" % (id(self), self.root)
__str__ = __repr__
@classmethod
def createTree(cls, root, **props):
"""Create and returns a new Ceres tree with the given properties
:param root: The root directory of the new Ceres tree
:param \*\*props: Arbitrary key-value properties to store as tree metadata
:returns: :class:`CeresTree`
"""
ceresDir = join(root, '.ceres-tree')
if not isdir(ceresDir):
os.makedirs(ceresDir, DIR_PERMS)
for prop, value in props.items():
propFile = join(ceresDir, prop)
with open(propFile, 'w') as fh:
fh.write(str(value))
return cls(root)
def walk(self, **kwargs):
"""Iterate through the nodes contained in this :class:`CeresTree`
:param \*\*kwargs: Options to pass to :func:`os.walk`
:returns: An iterator yielding :class:`CeresNode` objects
"""
for (fsPath, subdirs, filenames) in os.walk(self.root, **kwargs):
if CeresNode.isNodeDir(fsPath):
nodePath = self.getNodePath(fsPath)
yield CeresNode(self, nodePath, fsPath)
def getFilesystemPath(self, nodePath):
"""Get the on-disk path of a Ceres node given a metric name
:param nodePath: A metric name e.g. ``carbon.agents.graphite-a.cpuUsage``
:returns: The Ceres node path on disk"""
return join(self.root, nodePath.replace('.', os.sep))
def getNodePath(self, fsPath):
"""Get the metric name of a Ceres node given the on-disk path
:param fsPath: The filesystem path of a Ceres node
:returns: A metric name
:raises ValueError: When `fsPath` is not a path within the :class:`CeresTree`
"""
fsPath = abspath(fsPath)
if not fsPath.startswith(self.root):
raise ValueError("path '%s' not beneath tree root '%s'" % (fsPath, self.root))
nodePath = fsPath[len(self.root):].strip(os.sep).replace(os.sep, '.')
return nodePath
def hasNode(self, nodePath):
"""Returns whether the Ceres tree contains the given metric
:param nodePath: A metric name e.g. ``carbon.agents.graphite-a.cpuUsage``
:returns: `True` or `False`"""
return isdir(self.getFilesystemPath(nodePath))
def setNodeCachingBehavior(self, behavior):
"""Set node caching behavior.
| :param behavior: See :func:`getNode` for valid behavior values
"""
behavior = behavior.lower()
if | behavior not in ('none', 'all'):
raise ValueError("invalid caching behavior '%s'" % behavior)
self.nodeCachingBehavior = behavior
self.nodeCache = {}
def getNode(self, nodePath):
"""Returns a Ceres node given a metric name. Because nodes are looked up in
every read and write, a caching mechanism is provided. Cache behavior is set
using :func:`setNodeCachingBehavior` and defaults to the value set in
``DEFAULT_NODE_CACHING_BEHAVIOR``
The following behaviors are available:
* `none` - Node is read from the filesystem at every access.
* `all` (default) - All nodes are cached.
:param nodePath: A metric name
:returns: :class:`CeresNode` or `None`
"""
if self.nodeCachingBehavior == 'all':
if nodePath not in self.nodeCache:
fsPath = self.getFilesystemPath(nodePath)
if CeresNode.isNodeDir(fsPath):
self.nodeCache[nodePath] = CeresNode(self, nodePath, fsPath)
else:
return None
return self.nodeCache[nodePath]
elif self.nodeCachingBehavior == 'none':
fsPath = self.getFilesystemPath(nodePath)
if CeresNode.isNodeDir(fsPath):
return CeresNode(self, nodePath, fsPath)
else:
return None
else:
raise ValueError("invalid caching behavior configured '%s'" % self.nodeCachingBehavior)
def find(self, nodePattern, fromTime=None, untilTime=None):
"""Find nodes which match a wildcard pattern, optionally filtering on
a time range
:param nodePattern: A glob-style metric wildcard
:param fromTime: Optional interval start time in unix-epoch.
:param untilTime: Optional interval end time in unix-epoch.
:returns: An iterator yielding :class:`CeresNode` objects
"""
for fsPath in glob(self.getFilesystemPath(nodePattern)):
if CeresNode.isNodeDir(fsPath):
nodePath = self.getNodePath(fsPath)
node = self.getNode(nodePath)
if fromTime is None and untilTime is None:
yield node
elif node.hasDataForInterval(fromTime, untilTime):
yield node
def createNode(self, nodePath, **properties):
"""Creates a new metric given a new metric name and optional per-node metadata
:param nodePath: The new metric name.
:param \*\*properties: Arbitrary key-value properties to store as metric metadata.
:returns: :class:`CeresNode`
"""
return CeresNode.create(self, nodePath, **properties)
def store(self, nodePath, datapoints):
"""Store a list of datapoints associated with a metric
:param nodePath: The metric name to write to e.g. ``carbon.agents.graphite-a.cpuUsage``
:param datapoints: A list of datapoint tuples: ``[(timestamp, value), ...]``
"""
node = self.getNode(nodePath)
if node is None:
raise NodeNotFound("The node '%s' does not exist in this tree" % nodePath)
node.write(datapoints)
def fetch(self, nodePath, fromTime, untilTime):
"""Fetch data within a given interval from the given metric
:param nodePath: The metric name to fetch from
:param fromTime: Requested interval start time in unix-epoch.
:param untilTime: Requested interval end time in unix-epoch.
:returns: :class:`TimeSeriesData`
:raises: :class:`NodeNotFound`, :class:`InvalidRequest`
"""
node = self.getNode(nodePath)
if not node:
raise NodeNotFound("the node '%s' does not exist in this tree" % nodePath)
return node.read(fromTime, untilTime)
class CeresNode(object):
"""A :class:`CeresNode` represents a single time-series metric of a given `timeStep`
(its seconds-per-point resolution) and containing arbitrary key-value metadata.
A :class:`CeresNode` is associated with its most precise `timeStep`. Th |
repotvsupertuga/tvsupertuga.repository | script.module.streamlink.base/resources/lib/streamlink/plugins/telefe.py | Python | gpl-2.0 | 1,939 | 0.004126 | import re
from streamlink.plugin import Plugin
from streamlink.plugin.api import useragents
from streamlink.stream import HLSStream, HTTPStream
from streamlink.utils import parse_json
class Telefe(Plugin):
_url_re = re.compile(r'https?://telefe.com/.+')
@classmethod
def can_handle_url(cls, url):
return cls._url_re.match(url)
def _get_streams(self):
res = self.session.http.get(self.url, headers={'User-Agent': useragents.CHROME})
video_search = res.text
video_search = video_search[video_search.index('{"top":{"view":"PlayerContainer","model":{'):]
video_search = video_search[: video_search.index('}]}}') + 4] + "}"
video_url_found_hls = ""
video_url_found_http = ""
json_video_search = parse_json(video_search)
json_video_search_sources = json_video_search["top"]["model"]["videos"][0]["sources"]
self.logger.debug('Video ID found: {0}', json_video_search["top"]["model"]["id"])
for current_video_source in json_video_search_sources:
if "HLS" in current_video_source["type"]:
video_url_found_hls = "http://telefe.com" + current_video_source["url"]
self.logger.debug("HLS content available")
if "HTTP" in current_video_source["type"]:
video_url_found_http = "http://telefe.com" + current_video_source["url"]
self.logger.debug("HTTP con | tent available")
self.session.http.headers = {
'Referer': self.url, 'User-Agent': useragents.CHROME, 'X-Requested-With': 'ShockwaveFlash/25.0.0.148'
}
if video_url_found_hls:
hls_streams = HLSStream.parse_variant_playlist(self.session, video_url_found_hls)
for s in hls_streams.items():
yield s
if video_url_found_http:
yield "http", HTTPStream(self.session, video_url_found_http)
__plugin__ = Telef | e
|
VladimirFilonov/moscowdjango | moscowdjango/settings_production.py | Python | bsd-3-clause | 780 | 0 | # Django settings for moscowdjango project.
from .settings import *
DEBUG = False
EMBEDLY_KEY = os.environ.get('EMBEDLY_KEY')
SECRET_KEY = os.environ.get('SECRET_KEY')
# Amazon credentials
AWS_ACCESS_KEY_ID = os.environ.get('AWS_ACCESS_KEY_ID')
AWS_SECRET_ACCESS | _KEY = os.environ.get('AWS_SECRET_ACCESS_KEY')
AWS_STORAGE_BUCKET_NAME = 'moscowdjango'
AWS_QUERYSTRING_AUTH = False
AWS_CALLING_FORMAT = 2 # SUBDOMAIN
AWS_S3_SECURE_URLS = True
# Media & static
DEFAULT_FILE_STORAGE = 'moscowdjango.ama | zon.DefaultStorage'
STATICFILES_STORAGE = 'whitenoise.django.GzipManifestStaticFilesStorage'
DEFAULT_S3_PATH = "media"
MEDIA_ROOT = '/%s/' % DEFAULT_S3_PATH
MEDIA_URL = 'https://%s.s3.amazonaws.com/media/' % AWS_STORAGE_BUCKET_NAME
# Django compressor
COMPRESS_ENABLED = False
|
earthquakesan/pyrdf2nt | pyrdf2nt/converter.py | Python | mit | 1,329 | 0.006772 | from __future__ import print_function
import os
import subprocess
class PyRDF2Ntriples(object):
def __init__(self):
pass
def convertTTL2NT(self, filepath):
#Save to the same folder with .nt extension
(outputDir, outputFile) = os.path.split(os.path.abspath(filepath))
outputFile = outputFile.split(".")[0] + ".nt"
outputPath = os.path.join(outputDir, outputFile)
self.serdiTTL2NT(filepath, outputPath)
def serdiTTL2NT(self, inpath, outpath):
| serdiCommand = "serdi -i turtle -o ntriples -b -q %s" % | (inpath)
p = self.execute(serdiCommand)
f = open(outpath, "wb+")
for line in p.stdout.readlines():
print(line, file=f, end="")
f.close()
def executeRapper(self, inpath, outpath, serialization):
pass
def execute(self, command):
p = subprocess.Popen(command.split(), stdout=subprocess.PIPE, stderr=subprocess.PIPE)
return p
if __name__ == "__main__":
currentPath = os.path.dirname(os.path.realpath(__file__))
testTtlPath = os.path.abspath(os.path.join(currentPath, "../test/data/test.ttl"))
#instantiate class
rdf2nt = PyRDF2Ntriples()
#Test Ttl to nt conversion
rdf2nt.convertTTL2NT(testTtlPath)
import ipdb; ipdb.set_trace()
print("pass")
|
punalpatel/st2 | contrib/examples/actions/pythonactions/yaml_string_to_object.py | Python | apache-2.0 | 168 | 0 | import yaml
from st2common.runners.base_action import Act | ion
class YamlStringToObject(Action):
def run(self, yaml_st | r):
return yaml.safe_load(yaml_str)
|
coddingtonbear/django-neighborhoods | neighborhoods/importer.py | Python | mit | 3,743 | 0.000534 | import logging
import os.path
import shutil
import tempfile
import urllib2
import zipfile
from django.contrib.gis.gdal import DataSource, OGRGeometry, OGRGeomType
from django.db import transaction
from django.db.utils import IntegrityError
logger = logging.getLogger('neighborhoods.importer')
class NeighborhoodShapefileImporterException(Exception):
pass
class NeighborhoodShapefileImporter(object):
def __init__(self, url):
self.url = url
@transaction.commit_on_success
def process(self):
shapefile_dir = self._get_temporary_shapefile_dir_from_url(self.url)
self._insert_from_shapefile(shapefile_dir)
shutil.rmtree(shapefile_dir)
def _cleanup_temporary_directory(self, directory):
shutil.rmtree(directory)
def _get_multipolygon_geometry_from_row(self, row):
if row.geom_type.django == 'PolygonField':
geom = OGRGeometry(OGRGeomType('MultiPolygon'))
geom.add(row.geom)
geom.coord_dim = 2
return geom
elif row.geom_type.django == 'MultiPolygonField':
return geom
def transform_geometry(self, geom):
return geom
def _insert_from_shapefile(self, shapefile_dir):
shapefile_path = self._get_shapefile_path_from_directory(shapefile_dir)
source = DataSource(shapefile_path)
for row in source[0]:
geom = s | elf._get_multipolygon_geometry_from_row(row)
geom = self.transform_geometry(geom)
if not geom:
logger.warning(
"Unable to convert row %s %s into MultiPolygon" % (
row.fid,
repr(row)
)
| )
continue
place = self.get_neighborhood(row, geom)
sid = transaction.savepoint()
try:
place.save()
transaction.savepoint_commit(sid)
logger.info(
"(%s) %s, %s, %s Imported Successfully" % (
row.fid,
place.name,
place.city,
place.state,
)
)
except IntegrityError:
transaction.savepoint_rollback(sid)
logger.warning(
"(%s) %s, %s, %s Already Exists" % (
row.fid,
place.name,
place.city,
place.state,
)
)
def _get_shapefile_path_from_directory(self, directory):
shapefile_path = None
for path in os.listdir(directory):
basename, extension = os.path.splitext(path)
if extension == '.shp':
shapefile_path = os.path.join(
directory,
path
)
if not shapefile_path:
raise NeighborhoodShapefileImporterException(
"No shapefile was found in the data extracted!"
)
return shapefile_path
def _get_temporary_shapefile_dir_from_url(self, url):
temporary_directory = tempfile.mkdtemp()
with tempfile.TemporaryFile() as temporary_file:
zip_file_stream = urllib2.urlopen(url)
temporary_file.write(
zip_file_stream.read()
)
zip_file_stream.close()
archive = zipfile.ZipFile(temporary_file, 'r')
archive.extractall(temporary_directory)
return temporary_directory
|
kawamon/hue | desktop/core/ext-py/nose-1.3.7/functional_tests/support/issue771/test.py | Python | apache-2.0 | 272 | 0.011029 | from nose.plugins.attrib import attr
from unittest import TestCase
@attr("b")
def test_b():
assert 1 == 1
class TestBas | e(TestCase):
def test_a(self):
assert 1 == 1
class TestDerived(TestBas | e):
@attr("a")
def test_a(self):
assert 1 == 1
|
corumcorp/redsentir | redsentir/foro/migrations/0003_comentario_fecha.py | Python | gpl-3.0 | 453 | 0 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-1 | 0-02 22:23
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('foro', '0002_comentario_usuario'),
]
operations = [
migrations.AddField(
model_name='comentario',
name='fecha',
field=models.DateTimeField(auto_now=True),
| ),
]
|
ScholarTools/pypub | tests/nrg_test.py | Python | mit | 3,751 | 0.000267 | # Standard imports
import os
import sys
import pickle
# Third party imports
import nose
# Local imports
from pypub.scrapers import nature_nrg as nrg
from pypub.paper_info import PaperInfo
class TestNature(object):
def __init__(self):
self.curpath = str(os.path.dirname(os.path.abspath(__file__)))
self.link = 'http://www.nature.com/nrg/journal/v15/n5/full/nrg3686.html'
self.doi = '10.1038/nrg3686'
# Make a PaperInfo object from the live site information
try:
pi = PaperInfo(url=self.link, doi=self.doi, scraper_obj='nature')
pi.populate_info()
except Exception:
self.pi = None
self.entry_dict = None
else:
self.pi = pi
self.entry_dict = self.pi.entry.__dict__
# Load saved version of the PaperInfo object
saved_dir = os.path.join(self.curpath, 'saved_info')
saved_file_path = os.path.join(saved_dir, 'sp_info.txt')
self.saved_pi = pickle.load(open(saved_file_path, 'rb'))
# Make the saved versions into dicts
self.saved_entry_dict = self.saved_pi.entry.__dict__
# Testing return types
def test_entry_type(self):
assert type(self.pi.entry) is nrg.NatureEntry
def test_references_type(self):
assert type(self.pi.references) is list
def test_reflist_type(self):
assert type(self.pi.references[0]) is nrg.NatureRef
# Testing scraped soup against saved site version
def test_nature_saved_entry(self):
for x in self.saved_entry_dict.keys():
if x == 'authors':
continue
if self.saved_entry_dict[x] != self.entry_dict[x]:
print('\nDifference found.')
print('Key: ' + str(x))
print('Saved value: ' + str(self.saved_entry_dict[x]))
print('Live value: ' + str(self.entry_dict[x]))
assert False
# Compare authors separately
live_authors = self.pi.entry.authors
saved_authors = self.saved_pi.entry.authors
# First check number of authors
if len(live_authors) != len(saved_authors):
assert False
# Then make sure the values of each author object are equal
for z in enumerate(live_authors):
live = z[1]
saved | = saved_authors[z[0]]
if live.__dict__ != saved.__dict__:
print('\nDifference found.')
print('Key: authors')
print('Saved value: ' + str(saved))
print('Live value: ' + str(live))
assert False
assert True
def test_nat | ure_saved_refs(self):
for y in range(len(self.pi.references)):
saved_ref_dict = self.saved_pi.references[y].__dict__
live_ref_dict = self.pi.references[y].__dict__
for x in live_ref_dict.keys():
if saved_ref_dict[x] != saved_ref_dict[x]:
print('\nDifference found.')
print('Key: ' + str(x))
print('Saved value: ' + str(self.saved_pi.references[y]))
print('Live value: ' + str(self.pi.references[y]))
print('Specific difference:')
print('Saved value: ' + str(saved_ref_dict[x]))
print('Live value: ' + str(live_ref_dict[x]))
assert False
assert True
def test_nature_pdf_link(self):
if self.pi.pdf_link != self.saved_pi.pdf_link:
assert False
else:
assert True
if __name__ == '__main__':
module_name = sys.modules[__name__].__file__
result = nose.run(argv=[sys.argv[0], module_name, '-v'])
|
jameslkey/CERM-Morse-Code-Exhibit | tests/test_config.py | Python | lgpl-3.0 | 2,346 | 0.007673 | # coding=utf-8
"""
CERMMorse : test_config
5/7/2017 : 11:32 PM
Author : James L. Key
"""
from unittest import TestCase
from readconfig import Config
__author__ = 'James L. Key'
__project__ = 'CERMMorse'
class TestConfig(TestCase):
def setUp(self):
self.conf = Config(configpath='../data/config.json')
self.conf.getconfig()
def evalcolor(self):
color = self.conf.Color
r = color[0]
g = color[1]
b = color[2]
| if (r not in range(0, 2)) | (g not in range(0, 2)) | (b not in range(0, 2)):
return False
else:
return True
def test_getconfig(self):
self.assertIsInstance(self. | conf.LCDPin1, int, 'Config LCDPin1 is not an Integer!!')
self.assertIn(self.conf.LCDPin1, range(0, 4), 'Config LCDPin1 is not in I2C Range!!')
self.assertIsInstance(self.conf.LCDPin2, int, 'Config LCDPin2 is not an Integer!!')
self.assertIn(self.conf.LCDPin2, range(0, 4), 'Config LCDPin1 is not in I2C Range!!')
self.assertIsInstance(self.conf.RelayPin, int, 'Config RelayPin is not an Integer!!')
self.assertIn(self.conf.RelayPin, range(0, 27), 'Config LCDPin1 is not in GPIO Range!!')
self.assertIsInstance(self.conf.MotionDetPin, int, 'Config MotionDetPin is not an Integer!!')
self.assertIn(self.conf.MotionDetPin, range(0, 27), 'Config LCDPin1 is not in GPIO Range!!')
self.assertIsInstance(self.conf.WPM, int, 'Config WPM is not an Integer!!')
self.assertGreaterEqual(self.conf.WPM, 1, 'Config WPM is not Greater than 1!!')
self.assertIsInstance(self.conf.MaxWPM, int, 'Config MaxWPM is not an Integer!!')
self.assertGreaterEqual(self.conf.MaxWPM, self.conf.WPM, 'Config MaxWPM is not Greater or Equal to WPM!!')
self.assertLess(self.conf.MaxWPM, 31, 'Config MaxWPM is Greater than 30WPM -- Seriously? !!')
self.assertIsInstance(self.conf.SpeedAdjust, bool, 'Config SpeedAdjust is not Boolean!!')
self.assertIsInstance(self.conf._Colorstr, str, 'Config Stored Color String is not a String!!')
self.assertTrue(self.evalcolor(),
'Parsed Color is not valid - value of number is not (0 or 1) and in form (#, #, #)')
self.assertIsInstance(self.conf.ParagraphSep, str, 'Config ParagraphSep is not a String!!')
|
alandmoore/pystump | includes/lookups.py | Python | gpl-3.0 | 856 | 0 | bg_image_modes = ('stretch', 'tile', 'center', 'right', 'left')
transitions_jquery_ui = (
'blind', 'bounce', 'clip', 'drop', ' | explode', 'fade', 'fold',
'highlight', 'puff', 'pulsate', 'scale', 'shake', 'size', 'slide'
)
transitions_animatecss = (
'bounceIn',
'bounceInDown',
'bounceInLeft',
'bounceInRight',
'bounceInUp',
'fadeIn',
'fadeInDown',
'fadeInDownBig',
'fadeInLeft',
'fadeInLeftBig',
'fadeInRight',
'fadeInRightBig',
'fadeInUp',
'fadeInUpBig',
| 'flipInX',
'flipInY',
'lightSpeedIn',
'rotateIn',
'rotateInDownLeft',
'rotateInDownRight',
'rotateInUpLeft',
'rotateInUpRight',
'rollIn',
'zoomIn',
'zoomInDown',
'zoomInLeft',
'zoomInRight',
'zoomInUp',
'slideInDown',
'slideInLeft',
'slideInRight',
'slideInUp',
)
|
saelo/willie-modules | asm.py | Python | mit | 4,715 | 0.009334 | # coding=utf8
"""
asm.py - (dis)assembly features.
(c) 2014 Samuel Groß
"""
from willie import web
from willie.module import commands, nickname_commands, example
from random import choice
from binascii import hexlify, unhexlify
import string
import re
import os
from subprocess import Popen, PIPE
@commands('disas', 'disas64', 'disassemble', 'disassemble64')
@example('.disas 66556689e590c9c3')
def disassemble(bot, trigger):
"""Disassemble x86 machine code."""
if not trigger.group(2):
| return bot.reply('Nothing to disassemble')
try:
arg = trigger.group(2)
# remove all 0x
while "0x | " in arg:
arg = arg.replace("0x","")
# remove everything except hex
arg = re.sub(r"[^a-fA-F0-9]", r"", arg)
code = unhexlify(arg)
except Exception:
return bot.say('Invalid hex sequence')
bits = 64 if '64' in trigger.group(1) else 32
filename = '/tmp/' + ''.join( choice(string.ascii_lowercase) for i in range(10)) + '.bin'
with open(filename, 'wb') as f:
f.write(code)
result = Popen(['ndisasm', '-b', str(bits), '-o', '0x1000', filename], stdout=PIPE).stdout.read()
os.remove(filename)
for line in result.split('\n'):
bot.say(line)
@commands('as', 'as64', 'assemble', 'assemble64')
@example('.as push ebp; mov ebp, esp; jmp 0x14')
def assemble(bot, trigger):
"""Assemble x86 instructions."""
code = trigger.group(2)
if not code:
return bot.reply('Nothing to assemble')
bits = 64 if '64' in trigger.group(1) else 32
filename = '/tmp/' + ''.join(choice(string.ascii_lowercase) for i in range(10)) + '.asm'
with open(filename, 'w') as f:
f.write('BITS %i\n' % bits + re.sub(r';\s*', ';\n', code))
p = Popen(['nasm', '-f', 'bin', '-o', filename[:-4], filename], stderr=PIPE)
p.wait()
os.remove(filename)
for line in p.stderr.read().split('\n'):
bot.say(line)
if p.returncode == 0:
with open(filename[:-4], 'rb') as f:
raw = f.read()
hex = hexlify(raw)
if hex:
bot.say(hex)
os.remove(filename[:-4])
def x86jmp(bot, instr):
"""Display information about a x86 conditional jump."""
if instr not in jxx:
return bot.say('I can\'t find anything about that instruction, sorry')
bot.say('%s : %s' % (instr, jxx[instr]))
def x86instr(bot, instr):
"""Display information about any x86 instruction thats no a conditional jump."""
raw = web.get('http://www.felixcloutier.com/x86/')
match = re.search('<tr><td><a href="./(?P<page>[A-Z:]*).html">%s</a></td><td>(?P<desc>[^<]*)</td></tr>' % instr, raw)
if not match:
return bot.say('I can\'t find anything about that instruction, sorry')
bot.say('%s : %s -- %s' % (instr, match.group('desc'), 'http://www.felixcloutier.com/x86/%s' % match.group('page')))
@commands('x86', 'instr', 'instruction')
def instruction(bot, trigger):
"""Display information about an x86 instruction."""
instr = trigger.group(2)
if not instr:
return bot.reply('Give me an instruction')
instr = instr.strip().upper()
if 'J' == instr[0] and not instr == 'JMP':
return x86jmp(bot, instr)
x86instr(bot, instr)
jxx = {
'JA' : 'Jump if above (CF=0 and ZF=0)',
'JAE' : 'Jump if above or equal (CF=0)',
'JB' : 'Jump if below (CF=1)',
'JBE' : 'Jump if below or equal (CF=1 or ZF=1)',
'JC' : 'Jump if carry (CF=1)',
'JCXZ' : 'Jump if CX register is 0',
'JECXZ': 'Jump if ECX register is 0',
'JRCXZ': 'Jump if RCX register is 0',
'JE' : 'Jump if equal (ZF=1)',
'JG' : 'Jump if greater (ZF=0 and SF=OF)',
'JGE' : 'Jump if greater or equal (SF=OF)',
'JL' : 'Jump if less (SF!=OF)',
'JLE' : 'Jump if less or equal (ZF=1 or SF!=OF)',
'JNA' : 'Jump if not above (CF=1 or ZF=1)',
'JNAE' : 'Jump if not above or equal (CF=1)',
'JNB' : 'Jump if not below (CF=0)',
'JNBE' : 'Jump if not below or equal (CF=0 and ZF=0)',
'JNC' : 'Jump if not carry (CF=0)',
'JNE' : 'Jump if not equal (ZF=0)',
'JNG' : 'Jump if not greater (ZF=1 or SF!=OF)',
'JNGE' : 'Jump if not greater or equal (SF!=OF)',
'JNL' : 'Jump if not less (SF=OF)',
'JNLE' : 'Jump if not less or equal (ZF=0 and SF=OF)',
'JNO' : 'Jump if not overflow (OF=0)',
'JNP' : 'Jump if not parity (PF=0)',
'JNS' : 'Jump if not sign (SF=0)',
'JNZ' : 'Jump if not zero (ZF=0)',
'JO' : 'Jump if overflow (OF=1)',
'JP' : 'Jump if parity (PF=1)',
'JPE' : 'Jump if parity even (PF=1)',
'JPO' : 'Jump if parity odd (PF=0)',
'JS' : 'Jump if sign (SF=1)'
}
|
spulec/moto | moto/kms/responses.py | Python | apache-2.0 | 19,277 | 0.002905 | import base64
import json
import os
import re
from moto.core import ACCOUNT_ID
from moto.core.responses import BaseResponse
from moto.kms.utils import RESERVED_ALIASES
from .models import kms_backends
from .exceptions import (
NotFoundException,
ValidationException,
AlreadyExistsException,
NotAuthorizedException,
)
class KmsResponse(BaseResponse):
@property
def parameters(self):
params = json.loads(self.body)
for key in ("Plaintext", "CiphertextBlob"):
if key in params:
params[key] = base64.b64decode(params[key].encode("utf-8"))
return params
@property
def kms_backend(self):
return kms_backends[self.region]
def _display_arn(self, key_id):
if key_id.startswith("arn:"):
return key_id
if key_id.startswith("alias/"):
id_type = ""
else:
id_type = "key/"
return "arn:aws:kms:{region}:{account}:{id_type}{key_id}".format(
region=self.region, account=ACCOUNT_ID, id_type=id_type, key_id=key_id
)
def _validate_cmk_id(self, key_id):
"""Determine whether a CMK ID exists.
- raw key ID
- key ARN
"""
is_arn = key_id.startswith("arn:") and ":key/" in key_id
is_raw_key_id = re.match(
r"^[A-F0-9]{8}-[A-F0-9]{4}-[A-F0-9]{4}-[A-F0-9]{4}-[A-F0-9]{12}$",
key_id,
re.IGNORECASE,
)
if not is_arn and not is_raw_key_id:
raise NotFoundException("Invalid keyId {key_id}".format(key_id=key_id))
cmk_id = self.kms_backend.get_key_id(key_id)
if cmk_id not in self.kms_backend.keys:
raise NotFoundException(
"Key '{key_id}' does not exist".format(key_id=self._display_arn(key_id))
)
def _validate_alias(self, key_id):
"""Determine whether an alias exists.
- alias name
- alias ARN
"""
error = NotFoundException(
"Alias {key_id} is not found.".format(key_id=self._display_arn(key_id))
)
is_arn = key_id.startswith("arn:") and ":alias/" in key_id
is_name = key_id.startswith("alias/")
if not is_arn and not is_name:
raise error
alias_name = self.kms_backend.get_alias_name(key_id)
cmk_id = self.kms_backend.get_key_id_from_alias(alias_name)
if cmk_id is None:
raise error
def _validate_key_id(self, key_id):
"""Determine whether or not a key ID exists.
- raw key ID
- key ARN
- alias name
- alias ARN
| """
is_alias_arn = key_id.startswith("arn:") and ":alias/" in key_id
is_alias_name = key_id.startswith("alias/")
if is_al | ias_arn or is_alias_name:
self._validate_alias(key_id)
return
self._validate_cmk_id(key_id)
def create_key(self):
"""https://docs.aws.amazon.com/kms/latest/APIReference/API_CreateKey.html"""
policy = self.parameters.get("Policy")
key_usage = self.parameters.get("KeyUsage")
customer_master_key_spec = self.parameters.get("CustomerMasterKeySpec")
description = self.parameters.get("Description")
tags = self.parameters.get("Tags")
key = self.kms_backend.create_key(
policy, key_usage, customer_master_key_spec, description, tags, self.region
)
return json.dumps(key.to_dict())
def update_key_description(self):
"""https://docs.aws.amazon.com/kms/latest/APIReference/API_UpdateKeyDescription.html"""
key_id = self.parameters.get("KeyId")
description = self.parameters.get("Description")
self._validate_cmk_id(key_id)
self.kms_backend.update_key_description(key_id, description)
return json.dumps(None)
def tag_resource(self):
"""https://docs.aws.amazon.com/kms/latest/APIReference/API_TagResource.html"""
key_id = self.parameters.get("KeyId")
tags = self.parameters.get("Tags")
self._validate_cmk_id(key_id)
result = self.kms_backend.tag_resource(key_id, tags)
return json.dumps(result)
def untag_resource(self):
"""https://docs.aws.amazon.com/kms/latest/APIReference/API_UntagResource.html"""
key_id = self.parameters.get("KeyId")
tag_names = self.parameters.get("TagKeys")
self._validate_cmk_id(key_id)
result = self.kms_backend.untag_resource(key_id, tag_names)
return json.dumps(result)
def list_resource_tags(self):
"""https://docs.aws.amazon.com/kms/latest/APIReference/API_ListResourceTags.html"""
key_id = self.parameters.get("KeyId")
self._validate_cmk_id(key_id)
tags = self.kms_backend.list_resource_tags(key_id)
tags.update({"NextMarker": None, "Truncated": False})
return json.dumps(tags)
def describe_key(self):
"""https://docs.aws.amazon.com/kms/latest/APIReference/API_DescribeKey.html"""
key_id = self.parameters.get("KeyId")
self._validate_key_id(key_id)
key = self.kms_backend.describe_key(self.kms_backend.get_key_id(key_id))
return json.dumps(key.to_dict())
def list_keys(self):
"""https://docs.aws.amazon.com/kms/latest/APIReference/API_ListKeys.html"""
keys = self.kms_backend.list_keys()
return json.dumps(
{
"Keys": [{"KeyArn": key.arn, "KeyId": key.id} for key in keys],
"NextMarker": None,
"Truncated": False,
}
)
def create_alias(self):
"""https://docs.aws.amazon.com/kms/latest/APIReference/API_CreateAlias.html"""
return self._set_alias()
def update_alias(self):
"""https://docs.aws.amazon.com/kms/latest/APIReference/API_UpdateAlias.html"""
return self._set_alias(update=True)
def _set_alias(self, update=False):
alias_name = self.parameters["AliasName"]
target_key_id = self.parameters["TargetKeyId"]
if not alias_name.startswith("alias/"):
raise ValidationException("Invalid identifier")
if alias_name in RESERVED_ALIASES:
raise NotAuthorizedException()
if ":" in alias_name:
raise ValidationException(
"{alias_name} contains invalid characters for an alias".format(
alias_name=alias_name
)
)
if not re.match(r"^[a-zA-Z0-9:/_-]+$", alias_name):
raise ValidationException(
"1 validation error detected: Value '{alias_name}' at 'aliasName' "
"failed to satisfy constraint: Member must satisfy regular "
"expression pattern: ^[a-zA-Z0-9:/_-]+$".format(alias_name=alias_name)
)
if self.kms_backend.alias_exists(target_key_id):
raise ValidationException("Aliases must refer to keys. Not aliases")
if update:
# delete any existing aliases with that name (should be a no-op if none exist)
self.kms_backend.delete_alias(alias_name)
if self.kms_backend.alias_exists(alias_name):
raise AlreadyExistsException(
"An alias with the name arn:aws:kms:{region}:{account_id}:{alias_name} "
"already exists".format(
region=self.region, account_id=ACCOUNT_ID, alias_name=alias_name
)
)
self._validate_cmk_id(target_key_id)
self.kms_backend.add_alias(target_key_id, alias_name)
return json.dumps(None)
def delete_alias(self):
"""https://docs.aws.amazon.com/kms/latest/APIReference/API_DeleteAlias.html"""
alias_name = self.parameters["AliasName"]
if not alias_name.startswith("alias/"):
raise ValidationException("Invalid identifier")
self._validate_alias(alias_name)
self.kms_backend.delete_alias(alias_name)
return json.dumps(None)
def list_aliases(self):
"""https://docs.aws.amazon.com/kms/latest/APIReference/API_ListAliases.html"""
|
schriftgestalt/robofab | Tools/buildRoboFabDistroFromSVN.py | Python | bsd-3-clause | 5,807 | 0.008783 | """
Build the zipped robofab + dependency distros for RoboFab.
Check out fresh copies of the code from SVN.
Compile into zips.
Write optional html page.
"""
import os, glob, time
def getRevision(url):
""" Ask svn for the revision."""
cmd = "svn info \"%s\""%url
d = os.popen(cmd)
data = d.read()
lines = data.split("\n")
for l in lines:
if l.find("Revision:")==0:
rev = l.split(' ')[-1]
#print "svn rev:", rev
return rev
return "svn: no revision found"
def checkoutPackage(url, stagingFolder, verbose=True):
""" checkoutPackage"""
cwd = os.getcwd()
if not os.path.exists(stagingFolder):
os.makedirs(stagingFolder)
os.chdir(stagingFolder)
cmd = "svn export \"%s\" . --force"%(url)
d = os.popen(cmd)
if verbose:
print d.read()
else:
d.read()
d.close()
#d = os.popen("svnversion")
#revision = d.read()
#os.chdir(cwd)
#return revision.strip()
def buildProducts(products, buildFolder=None, deleteBuilds=False, verbose=True):
""" Build the different distro products.
- checkout a clean version from svn
- add svn revision number to folder
- zip folder
"""
versions = {}
cleanup = []
filenames = [] # collect filenames of the new zips
if buildFolder is None:
buildFolder = os.path.join(os.path.dirname(__file__), "build")
if verbose:
print "\tNo build folder specified, using", buildFolder
for productName, packages in products.items():
cwd = os.getcwd()
if verbose:
print "cwd", cwd
stagingFolder = os.path.join(buildFolder, productName%"temp")
for url, name in packages:
checkoutPackage(url, os.path.join(stagingFolder, name), verbose)
versions[name] = getRevision(url)
finalFolder = os.path.join(buildFolder, productName%versions.get('RoboFab', "?"))
filenames.append(os.path.basename(finalFolder))
d = os.popen("mv \"%s\" \"%s\""%(stagingFolder, finalFolder))
if verbose:
print d.read()
else:
d.read()
os.chdir(finalFolder)
d = os.popen("zip -r \"%s\" *"%finalFolder)
if verbose:
print d.read()
else:
d.read()
cleanup.append(finalFolder)
d.close()
if deleteBuilds:
for path in cleanup:
if verbose:
print "cleaning", path
d = os.popen("rm -r \"%s\""%(path))
if verbose:
print d.read()
else:
d.read()
return filenames, versions.get("RoboFab")
downloadPageTemplate = """<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01//EN"
"http://www.w3.org/TR/html4/strict.dtd">
<html lang="en">
<head>
<link href="http://robofab.com/default.css" type="text/css" rel="stylesheet" />
<meta http-equiv="Content-Type" content="text/html; charset=utf-8">
<title>RoboFab Downloads</title>
<meta name="generator" content="TextMate http://macromates.com/">
<meta name="author" content="Erik van Blokland">
<!-- Date: 2012-08-18 -->
</head>
<body>
<div id="modellogo">
<img src="http://robofab.com/img/drawmodel_header.jpg" width="595" height="112" />
</div>
<div class="content">
<h1>Download RoboFab</h1>
<p>This page lists the current and (some) older distributions of RoboFab. These distributions contain packages from other developers. License info for these packages is contained in the distribution. This page is automatically generated.</p>
<p><a href="http://code.robofab.com/changeset/%s">Changeset for revision %s on code.robofab.com.</a></p>
<p><a href="http://robofab.com" target="_new">Back to the RoboFab site</a></p>
<h2>Current distribution</h2>
<ul>%s</ul>
<h2>Old distributions</h2>
<ul>%s</ul>
<h2>Generated</h2>
<p>%s</p>
</div>
</body>
</html>
"""
def buildDownloadPage(folder, new=None, changeSet=None):
""" Build a new download page for the zips available in folder/."""
if new is None:
new = []
htmlPath = os.path.join(folder, "index.html")
timeStamp = str(time.asctime(time.localtime(time.time())))
# collect .zip names
newZips = []
oldZips = []
for n in glob.glob(os.path.join(folder, "*.zip")):
name = os.path.basename(n)
isNew = False
for testName in new:
if name.find(testName)==0:
isNew = True
break
if isNew:
newZips.append(name)
else:
oldZips.append(n | ame)
newZips.sort()
oldZips.sort()
oldZips.reverse()
oldZips = oldZips[:200]
newLinks = "\n\t".join(["<li><a href=\"%s\">%s</a></li>"%(n,n) for n in newZips])
oldLinks = "\n\t".join(["<li><a href=\"%s\">%s</a></li>"%(n,n) for n in oldZips])
html = downloadPageTemplate%(changeSet, changeSet, newLinks, oldLinks, timeStamp)
f = open(htmlPath, 'w')
f.write(html)
f.close()
if __name__ == "__main__":
| robofabProducts = {
'RoboFab_%s_plusAllDependencies':[
("http://svn.robofab.com/trunk", "RoboFab"),
("http://svn.typesupply.com/packages/vanilla/trunk", "Vanilla"),
("http://svn.typesupply.com/packages/dialogKit/trunk", "DialogKit"),
("https://fonttools.svn.sourceforge.net/svnroot/fonttools/trunk/", "FontTools")
],
'RoboFab_%s_plusFontTools':[
("http://svn.robofab.com/trunk", "RoboFab"),
("https://fonttools.svn.sourceforge.net/svnroot/fonttools/trunk/", "FontTools")
],
'RoboFab_%s_only':[
("http://svn.robofab.com/trunk", "RoboFab"),
],
}
newProducts, revision = buildProducts(robofabProducts)
|
acmaheri/sms-tools | lectures/7-Sinusoidal-plus-residual-model/plots-code/hpsModel-sax-phrase.py | Python | agpl-3.0 | 1,834 | 0.008179 | import numpy as np
import matplotlib.pyplot as plt
from scipy.signal import hamming, hanning, triang, blackmanharris, resample
import math
import sys, os, time
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../software/models/'))
import utilFunctions as UF
import hpsModel as HPS
(fs, x) = UF.wavread(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../sounds/sax-phrase-short.wav'))
w = np.blackman(601)
N = 1024
t = -100
nH = 100
minf0 = 350
maxf0 = 700
f0et = 5
minSineDur = .1
harmDevSlope = 0.01
Ns = 512
H = Ns/4
stocf = .2
hfreq, hmag, hphase, mYst = HPS.hpsModelAnal(x, fs, w, N, H, t, nH, minf0, maxf0, f0et, harmDevSlope, minSineDur, Ns, stocf)
y, yh, yst = HPS.hpsModelSynth(hfreq, hmag, hphase, mYst, Ns, H, fs)
maxplotfreq = 10000.0
plt.figure(1, figsize=(9, 7))
plt.subplot(311)
plt.plot(np.arange(x.size)/float(fs), x, 'b')
plt.autoscale(tight=True)
plt.title('x (sax-phrase-short.wav)')
plt.subplot(312)
numFrames = int(mYst[:,0].size)
sizeEnv = int(mYst[0,:].size)
frmTime = H*np.arange(numFrames)/float(fs)
binFreq = (.5*fs)*np.arange(sizeEnv*maxplotfreq/(.5*fs))/sizeEnv
plt.pcolormesh(frmTime, binFreq, np.transpose(mYst[:,:sizeEnv*maxplotfreq/(.5*fs)+1]))
harms = hfreq*np.less(hfreq,maxplotfreq)
harms[harms==0] = np.nan
numFrames = int(harms[:,0].size)
frmTime = H*np.arange(nu | mFrames)/float(fs)
plt.plot(frmTime, harms, color='k', ms=3, alpha=1)
plt.autoscale(tight=True)
plt.title('harmonics + stochastic')
plt.subplot(313)
plt.plot(np.arange(y.size)/float(fs), y, 'b')
plt.autoscale(tight=True)
plt.title('y')
plt.tight_layout()
plt.savefig('hpsModel-sax-phrase.png')
UF.wavwrite(y, fs, 'sax-phrase-hps | -synthesis.wav')
UF.wavwrite(yh, fs, 'sax-phrase-harmonic.wav')
UF.wavwrite(yst, fs, 'sax-phrase-stochastic.wav')
plt.show()
|
kenorb/BitTorrent | auto-update/sign_file.py | Python | gpl-3.0 | 1,204 | 0.001661 | #!/usr/bin/env python
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License |
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from common import sig_ext, pri_key_path
import sys
import time
import pickle
from sha import sha
from Crypto.Util import randp | ool
r = randpool.KeyboardRandomPool()
r.randomize()
private_key_file = open(pri_key_path, 'rb')
key = pickle.load(private_key_file)
for f in sys.argv[1:]:
c = open(f, 'rb').read()
h = sha(c).digest()
r.add_event()
signature = key.sign(h, r.get_bytes(2**4))
if key.verify(h, signature):
signature_file = open(f+sig_ext, 'wb')
pickle.dump(signature, signature_file, protocol=2)
|
atodorov/anaconda | pyanaconda/modules/storage/storage.py | Python | gpl-2.0 | 13,962 | 0.000573 | #
# Kickstart module for the storage.
#
# Copyright (C) 2018 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
from pyanaconda.core.signal import Signal
from pyanaconda.core.dbus import DBus
from pyanaconda.modules.common.base import KickstartService
from pyanaconda.modules.common.constants.services import STORAGE
from pyanaconda.modules.common.containers import TaskContainer
from pyanaconda.modules.common.errors.storage import InvalidStorageError
from pyanaconda.modules.common.structures.requirement import Requirement
from pyanaconda.modules.storage.bootloader import BootloaderModule
from pyanaconda.modules.storage.checker import StorageCheckerModule
from pyanaconda.modules.storage.dasd import DASDModule
from pyanaconda.modules.storage.devicetree import DeviceTreeModule
from pyanaconda.modules.storage.disk_initialization import DiskInitializationModule
from pyanaconda.modules.storage.disk_selection import DiskSelectionModule
from pyanaconda.modules.storage.fcoe import FCOEModule
from pyanaconda.modules.storage.installation import MountFilesystemsTask, ActivateFilesystemsTask, \
WriteConfigurationTask
from pyanaconda.modules.storage.iscsi import ISCSIModule
from pyanaconda.modules.storage.kickstart import StorageKickstartSpecification
from pyanaconda.modules.storage.nvdimm import NVDIMMModule
from pyanaconda.modules.storage.partitioning.constants import PartitioningMethod
from pyanaconda.modules.storage.partitioning.factory import PartitioningFactory
from pyanaconda.modules.storage.partitioning.validate import StorageValidateTask
from pyanaconda.modules.storage.reset import ScanDevicesTask
from pyanaconda.modules.storage.snapshot import SnapshotModule
from pyanaconda.modules.storage.storage_interface import StorageInterface
from pyanaconda.modules.storage.teardown import UnmountFilesystemsTask, TeardownDiskImagesTask
from pyanaconda.modules.storage.zfcp import ZFCPModule
from pyanaconda.storage.initialization import enable_installer_mode, create_storage
from pyanaconda.anaconda_loggers import get_module_logger
log = get_module_logger(__name__)
class StorageService(KickstartService):
"""The Storage service."""
def __init__(self):
super().__init__()
# Initialize Blivet.
enable_installer_mode()
# The storage model.
self._current_storage = None
self._storage_playground = None
self.storage_changed = Signal()
# The created partitioning modules.
self._created_partitioning = []
self.created_partitioning_changed = Signal()
# The applied partitioning module.
self._applied_partitioning = None
self.applied_partitioning_changed = Signal()
self.partitioning_reset = Signal()
# Initialize modules.
self._modules = []
self._storage_checker_module = StorageCheckerModule()
self._add_module(self._storage_checker_module)
self._device_tree_module = DeviceTreeModule()
self._add_module(self._device_tree_module)
self._disk_init_module = DiskInitializationModule()
self._add_module(self._disk_init_module)
self._disk_selection_module = DiskSelectionModule()
self._add_module(self._disk_selection_module)
self._snapshot_module = SnapshotModule()
self._add_module(self._snapshot_module)
self._bootloader_module = BootloaderModule()
self._add_module(self._bootloader_module)
self._fcoe_module = FCOEModule()
self._add_module(self._fcoe_module)
self._iscsi_module = ISCSIModule()
self._add_module(self._iscsi_module)
self._nvdimm_module = NVDIMMModule()
self._add_module(self._nvdimm_module)
self._dasd_module = DASDModule()
self._add_module(self._dasd_module)
self._zfcp_module = ZFCPModule()
self._add_module(self._zfcp_module)
# Connect modules to signals.
self.storage_changed.connect(
self._device_tree_module.on_storage_changed
)
self.storage_changed.connect(
self._disk_init_module.on_storage_changed
)
self.storage_changed.connect(
self._disk_selection_module.on_storage_changed
)
self.storage_changed.connect(
self._snapshot_module.on_storage_changed
)
self.storage_changed.connect(
self._bootloader_module.on_storage_changed
)
self.storage_changed.connect(
self._dasd_module.on_storage_changed
)
self._disk_init_module.format_unrecognized_enabled_changed.connect(
self._dasd_module.on_format_unrecognized_enabled_changed
)
self._disk_init_module.format_ldl_enabled_changed.connect(
self._dasd_module.on_format_ldl_enabled_changed
)
self._disk_selection_module.protected_devices_changed.connect(
self.on_protected_devices_changed
)
def _add_module(self, storage_module):
"""Add a base kickstart module."""
self._modules.append(storage_module)
def publish(self):
"""Publish the module."""
TaskContainer.set_namespace(STORAGE.namespace)
for kickstart_module in self._modules:
kickstart_module.publish()
DBus.publish_object(STORAGE.object_path, StorageInterface(self))
DBus.register_service(STORAGE.service_name)
@property
def kickstart_specification(self):
"""Return the kickstart specification."""
return StorageKickstartSpecification
def process_kickstart(self, data):
"""Process the kickstart data."""
# Process the kickstart data in modules.
for kickstart_module in self._modules:
kickstart_module.process_kickstart(data)
# Set the default filesystem type.
if data.autopart.autopart and data.autopart.fstype:
self.storage.set_default_fstype(data.autopart.fstype)
# Create a new partitioning module.
partitioning_method = PartitioningFactory.get_method_for_kickstart(data)
if partitioning_method:
partitioning_module = self.create_partitioning(partitioning_method)
partitioning_module.process_kickstart(data)
def setup_kickstart(self, data):
"""Set up the kickstart data."""
for kickstart_module in self._modules:
kickstart_module.setup_kickstart(data)
| if self.applied_partitioning:
self.applied_partitioning.setup_kickstart(data)
@property
def storage(self):
"""The storage model.
:return: an | instance of Blivet
"""
if self._storage_playground:
return self._storage_playground
if not self._current_storage:
self._set_storage(create_storage())
return self._current_storage
def _set_storage(self, storage):
"""Set the current storage model.
The current storage is the latest model of
the system’s storage configuration created
by scanning all devices.
:param storage: a storage
"""
self._current_storage = storage
if self._storage_playground:
return
self.storage_changed.emit(storage)
log.debug("The storage |
dezelin/scons | scons-local/SCons/Tool/packaging/msi.py | Python | mit | 20,208 | 0.013856 | """SCons.Tool.packaging.msi
The msi packager.
"""
#
# Copyright (c) 2001 - 2014 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
__revision__ = "src/engine/SCons/Tool/packaging/msi.py 2014/07/05 09:42:21 garyo"
import os
import SCons
from SCons.Action import Action
from SCons.Builder import Builder
from xml.dom.minidom import *
from xml.sax.saxutils import escape
from SCons.Tool.packaging import stripinstallbuilder
#
# Utility functions
#
def convert_to_id(s, id_set):
""" Some parts of .wxs need an Id attribute (for example: The File and
Directory directives. The charset is limited to A-Z, a-z, digits,
underscores, periods. Each Id must begin with a letter or with a
underscore. Google for "CNDL0015" for information about this.
Requirements:
* the string created must only contain chars from the target charset.
* the string created must have a minimal editing distance from the
original string.
* the string created must be unique for the whole .wxs file.
Observation:
* There are 62 chars in the charset.
Idea:
* filter out forbidden characters. Check for a collision with the help
of the id_set. Add the number of the number of the collision at the
end of the created string. Furthermore care for a correct start of
the string.
"""
charset = 'ABCDEFGHIJKLMNOPQRSTUVWXYabcdefghijklmnopqrstuvwxyz0123456789_.'
if s[0] in '0123456789.':
s += '_'+s
id = [c for c in s if c in charset]
# did we already generate an id for this file?
try:
return id_set[id][s]
except KeyError:
# no we did not so initialize with the id
if id not in id_set: id_set[id] = { s : id }
# there is a collision, generate an id which is unique by appending
# the collision number
else: id_set[id][s] = id + str(len(id_set[id]))
return id_set[id][s]
def is_dos_short_file_name(file):
""" examine if the given file is in the 8.3 form.
"""
fname, ext = os.path.splitext(file)
proper_ext = len(ext) == 0 or (2 <= len(ext) <= 4) # the ext contains the dot
proper_fname = file.isupper() and len(fname) <= 8
return proper_ext and proper_fname
def gen_dos_short_file_name(file, filename_set):
""" see http://support.microsoft.com/default.aspx?scid=kb;en-us;Q142982
These are no complete 8.3 dos short names. The ~ char is missing and
replaced with one character from the filename. WiX warns about such
filenames, since a collision might occur. Google for "CNDL1014" for
more information.
"""
# guard this to not confuse the generation
if is_dos_short_file_name(file):
return file
fname, ext = os.path.splitext(file) # ext contains the dot
# first try if it suffices to convert to upper
file = file.upper()
if is_dos_short_file_ | name(file):
return file
# strip forbidden characters.
forbidden = '."/[]:;=, '
fname = [c for c in fname if c not in forbidden]
# check if we already generated a filename with the same number:
# thisis1.txt, th | isis2.txt etc.
duplicate, num = not None, 1
while duplicate:
shortname = "%s%s" % (fname[:8-len(str(num))].upper(),\
str(num))
if len(ext) >= 2:
shortname = "%s%s" % (shortname, ext[:4].upper())
duplicate, num = shortname in filename_set, num+1
assert( is_dos_short_file_name(shortname) ), 'shortname is %s, longname is %s' % (shortname, file)
filename_set.append(shortname)
return shortname
def create_feature_dict(files):
""" X_MSI_FEATURE and doc FileTag's can be used to collect files in a
hierarchy. This function collects the files into this hierarchy.
"""
dict = {}
def add_to_dict( feature, file ):
if not SCons.Util.is_List( feature ):
feature = [ feature ]
for f in feature:
if f not in dict:
dict[ f ] = [ file ]
else:
dict[ f ].append( file )
for file in files:
if hasattr( file, 'PACKAGING_X_MSI_FEATURE' ):
add_to_dict(file.PACKAGING_X_MSI_FEATURE, file)
elif hasattr( file, 'PACKAGING_DOC' ):
add_to_dict( 'PACKAGING_DOC', file )
else:
add_to_dict( 'default', file )
return dict
def generate_guids(root):
""" generates globally unique identifiers for parts of the xml which need
them.
Component tags have a special requirement. Their UUID is only allowed to
change if the list of their contained resources has changed. This allows
for clean removal and proper updates.
To handle this requirement, the uuid is generated with an md5 hashing the
whole subtree of a xml node.
"""
from hashlib import md5
# specify which tags need a guid and in which attribute this should be stored.
needs_id = { 'Product' : 'Id',
'Package' : 'Id',
'Component' : 'Guid',
}
# find all XMl nodes matching the key, retrieve their attribute, hash their
# subtree, convert hash to string and add as a attribute to the xml node.
for (key,value) in needs_id.items():
node_list = root.getElementsByTagName(key)
attribute = value
for node in node_list:
hash = md5(node.toxml()).hexdigest()
hash_str = '%s-%s-%s-%s-%s' % ( hash[:8], hash[8:12], hash[12:16], hash[16:20], hash[20:] )
node.attributes[attribute] = hash_str
def string_wxsfile(target, source, env):
return "building WiX file %s"%( target[0].path )
def build_wxsfile(target, source, env):
""" compiles a .wxs file from the keywords given in env['msi_spec'] and
by analyzing the tree of source nodes and their tags.
"""
file = open(target[0].abspath, 'w')
try:
# Create a document with the Wix root tag
doc = Document()
root = doc.createElement( 'Wix' )
root.attributes['xmlns']='http://schemas.microsoft.com/wix/2003/01/wi'
doc.appendChild( root )
filename_set = [] # this is to circumvent duplicates in the shortnames
id_set = {} # this is to circumvent duplicates in the ids
# Create the content
build_wxsfile_header_section(root, env)
build_wxsfile_file_section(root, source, env['NAME'], env['VERSION'], env['VENDOR'], filename_set, id_set)
generate_guids(root)
build_wxsfile_features_section(root, source, env['NAME'], env['VERSION'], env['SUMMARY'], id_set)
build_wxsfile_default_gui(root)
build_license_file(target[0].get_dir(), env)
# write the xml to a file
file.write( doc.toprettyxml() )
# call a user specified function
if 'CHANGE_SPECFILE' in env:
env['CHANGE_SPECFILE'](target, source)
except KeyError, e:
raise SCons.Errors.UserError( '"%s" package field for MSI is missing.' % e.args[0] )
#
# setup function
#
def create_default_directory_layout(root, NAME, VERSION, VENDOR, filename_set):
""" Create the wix def |
sysadminmatmoz/pmis | change_management/__init__.py | Python | agpl-3.0 | 173 | 0 | # -*- coding: utf-8 -*-
# Copyrigh | t 2017 Matmoz d.o.o. (<http://www.matmoz.si>).
# License AGPL-3.0 or later (https://www. | gnu.org/licenses/agpl.html).
from . import models
|
avehtari/GPy | GPy/plotting/plotly_dep/plot_definitions.py | Python | bsd-3-clause | 15,302 | 0.004901 | #===============================================================================
# Copyright (c) 2015, Max Zwiessele
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of GPy.plotting.matplot_dep.plot_definitions nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#===============================================================================
import numpy as np
from ..abstract_plotting_library import AbstractP | lottingLibrary
from .. import Tango
from . import defaults
from plotly import tools
from plotly i | mport plotly as py
from plotly.graph_objs import Scatter, Scatter3d, Line,\
Marker, ErrorX, ErrorY, Bar, Heatmap, Trace,\
Annotations, Annotation, Contour, Font, Surface
from plotly.exceptions import PlotlyDictKeyError
SYMBOL_MAP = {
'o': 'dot',
'v': 'triangle-down',
'^': 'triangle-up',
'<': 'triangle-left',
'>': 'triangle-right',
's': 'square',
'+': 'cross',
'x': 'x',
'*': 'x', # no star yet in plotly!!
'D': 'diamond',
'd': 'diamond',
}
class PlotlyPlots(AbstractPlottingLibrary):
def __init__(self):
super(PlotlyPlots, self).__init__()
self._defaults = defaults.__dict__
self.current_states = dict()
def figure(self, rows=1, cols=1, specs=None, is_3d=False, **kwargs):
if specs is None:
specs = [[{'is_3d': is_3d}]*cols]*rows
figure = tools.make_subplots(rows, cols, specs=specs, **kwargs)
return figure
def new_canvas(self, figure=None, row=1, col=1, projection='2d',
xlabel=None, ylabel=None, zlabel=None,
title=None, xlim=None,
ylim=None, zlim=None, **kwargs):
#if 'filename' not in kwargs:
# print('PlotlyWarning: filename was not given, this may clutter your plotly workspace')
# filename = None
#else:
# filename = kwargs.pop('filename')
if figure is None:
figure = self.figure(is_3d=projection=='3d')
figure.layout.font = Font(family="Raleway, sans-serif")
if projection == '3d':
figure.layout.legend.x=.5
figure.layout.legend.bgcolor='#DCDCDC'
return (figure, row, col), kwargs
def add_to_canvas(self, canvas, traces, legend=False, **kwargs):
figure, row, col = canvas
def append_annotation(a, xref, yref):
if 'xref' not in a:
a['xref'] = xref
if 'yref' not in a:
a['yref'] = yref
figure.layout.annotations.append(a)
def append_trace(t, row, col):
figure.append_trace(t, row, col)
def recursive_append(traces):
if isinstance(traces, Annotations):
xref, yref = figure._grid_ref[row-1][col-1]
for a in traces:
append_annotation(a, xref, yref)
elif isinstance(traces, (Trace)):
try:
append_trace(traces, row, col)
except PlotlyDictKeyError:
# Its a dictionary of plots:
for t in traces:
recursive_append(traces[t])
elif isinstance(traces, (dict)):
for t in traces:
recursive_append(traces[t])
elif isinstance(traces, (tuple, list)):
for t in traces:
recursive_append(t)
recursive_append(traces)
figure.layout['showlegend'] = legend
return canvas
def show_canvas(self, canvas, filename=None, **kwargs):
figure, _, _ = canvas
if len(figure.data) == 0:
# add mock data
figure.append_trace(Scatter(x=[], y=[], name='', showlegend=False), 1, 1)
from ..gpy_plot.plot_util import in_ipynb
if in_ipynb():
return py.iplot(figure, filename=filename)#self.current_states[hex(id(figure))]['filename'])
else:
return py.plot(figure, filename=filename)#self.current_states[hex(id(figure))]['filename'])
def scatter(self, ax, X, Y, Z=None, color=Tango.colorsHex['mediumBlue'], cmap=None, label=None, marker='o', marker_kwargs=None, **kwargs):
try:
marker = SYMBOL_MAP[marker]
except:
#not matplotlib marker
pass
marker_kwargs = marker_kwargs or {}
if 'symbol' not in marker_kwargs:
marker_kwargs['symbol'] = marker
if Z is not None:
return Scatter3d(x=X, y=Y, z=Z, mode='markers',
showlegend=label is not None,
marker=Marker(color=color, colorscale=cmap, **marker_kwargs),
name=label, **kwargs)
return Scatter(x=X, y=Y, mode='markers', showlegend=label is not None,
marker=Marker(color=color, colorscale=cmap, **marker_kwargs),
name=label, **kwargs)
def plot(self, ax, X, Y, Z=None, color=None, label=None, line_kwargs=None, **kwargs):
if 'mode' not in kwargs:
kwargs['mode'] = 'lines'
if Z is not None:
return Scatter3d(x=X, y=Y, z=Z, showlegend=label is not None, line=Line(color=color, **line_kwargs or {}), name=label, **kwargs)
return Scatter(x=X, y=Y, showlegend=label is not None, line=Line(color=color, **line_kwargs or {}), name=label, **kwargs)
def plot_axis_lines(self, ax, X, color=Tango.colorsHex['mediumBlue'], label=None, marker_kwargs=None, **kwargs):
if X.shape[1] == 1:
annotations = Annotations()
for i, row in enumerate(X):
annotations.append(
Annotation(
text='',
x=row[0], y=0,
yref='paper',
ax=0, ay=20,
arrowhead=2,
arrowsize=1,
arrowwidth=2,
arrowcolor=color,
showarrow=True,
#showlegend=i==0,
#label=label,
))
return annotations
elif X.shape[1] == 2:
marker_kwargs.setdefault('symbol', 'diamond')
opacity = kwargs.pop('opacity', .8)
return Scatter3d(x=X[:, 0], y=X[:, 1], z=np.zeros(X.shape[0]),
mode='markers',
projection=dict(z=dict(show=True, opacity=opacity)),
marker=Marker(color=color, **marker_kwargs or {}),
opacity=0,
name=label,
showlegend=label is not None, **kwargs)
def barplot(self, canvas, x, height, width=0.8, bottom=0, color=Tang |
namco1992/voicetools | voicetools/api.py | Python | apache-2.0 | 4,729 | 0.001269 | # coding: utf-8
import wolframalpha
from .exceptions import APIError
from .clients import turingclient, baiduclient
from .utils import get_mac_address, get_audio_info
class Wolfram(object):
"""A client for request Wolfram.
Attributes:
key: The key string got from https://www.wolframalpha.com.
"""
def __init__(self, key):
self.key = key
def ask_wolfram(self, question):
client = wolframalpha.Client(self.key)
res = client.query(question)
if len(res.pods) > 0:
pod = res.pods[1]
if pod.text:
texts = pod.text
else:
raise APIError('Wolfram API failed.')
# to skip ascii character in case of error
texts = texts.encode('ascii', 'ignore')
return texts
else:
raise APIError('Wolfram API failed.')
class TuringRobot(object):
"""A client for request Turing Robot.
Attributes:
key: The key string got from http://www.tuling123.com.
"""
def __init__(self, key):
self.key = key
def ask_turing(self, question):
params = {
'key': self.key,
'info': question
}
ret = turingclient.query_turing(params)
code = ret.get('code')
if code == 100000:
return ret['text'].encode('utf-8')
else:
raise APIError('Cannot handle this ret code: %s' % code)
class BaiduVoice(object):
"""A client for request Turing Robot.
Attributes:
token: The token string got from https://openapi.baidu.com/oauth/2.0/token.
cuid: Unique identification of user, default is MAC address.
"""
def __init__(self, token):
self.token = token
self.cuid = get_mac_address()
def asr(self, file_, format_='wav',
cuid=None, ptc | =1, lan='zh'):
"""Constructs and sends an Automatic Speech Recognition request.
Args:
file_: the open file with methods write(), close(), tell(), seek()
set through the __init__() metho | d.
format_:(optional) the audio format, default is 'wav'
cuid:(optional) Unique identification of user, default is MAC address.
ptc:(optional) nbest results, the number of results.
lan:(optional) language, default is 'zh'.
Returns:
A list of recognition results.
Raises:
ValueError
RecognitionError
VerifyError
APIError
QuotaError
"""
if format_ != 'wav':
raise ValueError('Unsupported audio format')
params = {
'format': format_,
'token': self.token,
'cuid': cuid or self.cuid,
'ptc': ptc,
'lan': lan
}
try:
audio_info = get_audio_info(file_)
except Exception, e:
raise e
params['len'], params['rate'] = audio_info['nframes'], audio_info['framerate']
return baiduclient.asr(audio_info['content'], params)
def tts(self, tex, lan='zh', ctp=1,
cuid=None, spd=5, pit=5, vol=5, per=0):
"""Constructs and sends an Text To Speech request.
Args:
tex: The text for conversion.
lan:(optional) language, default is 'zh'.
ctp:(optional) Client type, default is 1.
cuid:(optional) Unique identification of user, default is MAC address.
spd:(optional) speed, range 0-9, default is 5.
pit:(optional) pitch, range 0-9, default is 5.
vol:(optional) volume, range 0-9, default is 5.
per:(optional) voice of male or female, default is 0 for female voice.
Returns:
A binary string of MP3 format audio.
Raises:
ValueError
VerifyError
APIError
"""
params = {
'tex': tex,
'lan': lan,
'tok': self.token,
'ctp': ctp,
'cuid': cuid or self.cuid,
'spd': spd,
'pit': pit,
'vol': vol,
'per': per
}
return baiduclient.tts(params)
@staticmethod
def get_baidu_token(api_key, secret_key):
"""Get Baidu Voice Service token by api key and secret.
Functions of other args of response are not confirmed, so the whole
response dict will be returned, you can access the token by ret['access_token'].
"""
params = {
'grant_type': 'client_credentials',
'client_id': api_key,
'client_secret': secret_key
}
return baiduclient.get_token(params)
|
chenkianwee/pyliburo | py4design/pyoptimise/nsga2.py | Python | gpl-3.0 | 35,056 | 0.008814 | # ==================================================================================================
#
# Copyright (c) 2016, Chen Kian Wee (chenkianwee@gmail.com)
#
# This file is part of py4design
#
# py4design is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# py4design is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with py4design. If not, see <http://www.gnu.org/licenses/>.
#
# ==================================================================================================
import os
import math
import random
import xml.dom.minidom
from xml.dom.minidom import Document
import analyse_xml
#================================================================================
def frange(start, end=None, inc=None):
"""
A range function, that does accept float increments.
Parameters
----------
start : float
The starting number of the sequence.
end : float, optional
Generate numbers up to, but not including this number, Default = None. When None, end == start and start = 0.0.
inc : float, optional
The difference between each number in the sequence, Default = None. When None, inc = 1.0.
Returns
-------
sequence of floats : list of floats
A list of float.
"""
if end == None:
end = start + 0.0
start = 0.0
else: start += 0.0 # force it to be a float
if inc == None:
inc = 1.0
count = int(math.ceil((end - start) / inc))
L = [None,] * count
L[0] = start
for i in xrange(1,count):
L[i] = L[i-1] + inc
return L
#================================================================================
class Gene(object):
"""
An object that contains all the gene information for running a NSGA2 optimisation.
Parameters
----------
gene_type : str
The type of the gene. There are four options: "int_range", "int_choice", "float_range", "float_choice".
value_range : list of int/floats
List of ints/floats. If the gene_type is "int_range" or "float_range", the list has three elements. The first element is the starting number,
the second element is the ending number (not included in the sequence), and the last element is the difference between each number in the sequence.
If the gene_type is "int_choice" or "float_choice", the list is made up of all the possible options.
Attributes
----------
gene_type : str
see Parameters.
value_range : list of int/floats
see Parameters.
position : int
The position of the gene in the genotype. If the position is 0 the gene is in the first postion.
"""
def __init__(self, gene_type, value_range):
"""Initialises the Gene class"""
self.gene_type = gene_type
self.value_range = value_range
self.position = None
class GenotypeMeta(object):
"""
An object that contains all the meta information of a genotype for running a NSGA2 optimisation.
Attributes
----------
gene_list : list of Gene class instances
The list of Gene class instances that will be developed in the Genotype class.
"""
def __init__(self):
"""Initialises the GenotypeMeta class"""
self.gene_list = []
def add_gene(self, gene):
"""
This function adds a gene to the gene list.
Parameters
----------
gene : Gene class instance
The gene to be added to the Genotype class.
"""
self.gene_list.append(gene)
def gene_position(self):
"""
This function assigns a position to each gene in the gene list.
"""
gene_list = self.gene_list
cnt = 0
for g in gene_list:
g.position = cnt
cnt = cnt + 1
def length(self):
"""
This function returns the number of genes in the gene list.
Returns
-------
length : int
The number of genes in the gene list.
"""
length = len(self.gene_list)
return length
#================================================================================
class Genotype(object):
"""
An object that contains all the information and methods for developing a genotype for an individual.
Parameters
----------
genotype_meta : GenotypeMeta class instance
The meta information of the genotype.
Attributes
----------
genotype_meta : GenotypeMeta class instance
See Parameters.
values : list of floats/ints
The Genotype values of an individual to develop the individual's phenotype.
"""
def __init__(self, genotype_meta):
"""Initialises the Genotype class"""
self.genotype_meta = genotype_meta
self.values = []
def randomise(self):
"""This function randomly generates the Genotype values"""
genes = self.genotype_meta.gene_list
for gene in genes:
gene_type = gene.gene_type
value_range = gene.value_range
if gene_type == "float_range":
if len(value_range) == 3:
gene_range = frange(value_range[0], value_range[1], value_range[2])#random.uniform( value_range[0], value_range[1])
gene_value = random.choice(gene_range)
self.values.append(gene_value)
if len(value_range) == 2:
gene_value = random.uniform(value_range[0], value_range[1])
self.values.append(gene_value)
if gene_type == "float_choice":
gene_value = random.choice(value_range)
self.values.append(gene_value)
if gene_type == "int_range":
gene_value = random.randrange(value_range[0], value_range[1], value_range[2])
self.values.append(gene_value)
if gene_type == "int_choice":
gene_value = random.choice(value_range)
self.values.append(gene_value)
def mutate(self, mutation_prob):
"""
This function mutates the genotype values.
Parameters
----------
mutation_prob : float
The mutation probability, the probability is between 0 to 1.
"""
gene_list = self.genotype_meta.gene_list
for c in range(len(gene_list)):
roll = random.random()
if roll <= mutation_prob:
gene_type = None
value_range = None
for gene in gene_list:
if gene.position == c:
gene_type = gene.gene_type
value_range = gene.value_range
if gene_type == "float_range":
if len(value_range) == 3:
self.values[c] = random.choice(frange(value_range[0], value_range[1], value_range[2]))
if len(value_range) == 2:
self.values[c] = random.uniform(value_range[0], value_range[1])
if gene_type == "float_choice":
self.values[c] = random.choice(value_range)
if gene_type == "int_r | ange":
self.values[c] = random.randrange(value_range[0], value_range[1], value_range[2])
if gene_type == "int_choice":
self.values[c] = random.choice(value_ | range)
def read_str( |
sanguinariojoe/sonsilentsea | scripts/showMouse.py | Python | gpl-3.0 | 38 | 0 | import | Rasterizer | as r
r.showMouse(1)
|
lewisodriscoll/sasview | src/sas/sascalc/data_util/ordereddict.py | Python | bsd-3-clause | 3,441 | 0.002906 | #!/usr/bin/env python
"""Backport from python2.7 to python <= 2.6."""
from itertools import repeat as _repeat, chain as _chain, starmap as _starmap
try:
from itertools import izip_longest as _zip_longest
except ImportError:
from itertools import izip
def _zip_longest(*args, **kwds):
# izip_longest('ABCD', 'xy', fillvalue='-') --> Ax By C- D-
fillvalue = kwds.get('fillvalue')
def sentinel(counter = ([fillvalue]*(len(args)-1)).pop):
yield counter() # yields the fillvalue, or raises IndexError
fillers = _repeat(fillvalue)
iters = [_chain(it, sentinel(), fillers) for it in args]
try:
for tup in izip(*iters):
yield tup
except IndexError:
pass
class OrderedDict(dict):
def __init__(self, *args, **kwds):
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
if not hasattr(self, '_keys'):
self._keys = []
self.update(*args, **kwds)
def clear(self):
del self._keys[:]
dict.clear(self)
def __setitem__(self, key, value):
if key not in self:
self._keys.append(key)
dict.__setitem__(self, key, value)
def __delitem__(self, key):
dict.__delitem__(self, key)
self._keys.remove(key)
def __iter__(self):
return iter(self._keys)
def __reversed__(self):
return reversed(self._keys)
def popitem(self):
if not self:
raise KeyError('dictionary is empty')
key = self._keys.pop()
value = dict.pop(self, key)
return key, value
def __reduce__(self):
items = [[k, self[k]] for k in self]
inst_dict = vars(self).copy()
inst_dict.pop('_keys', None)
return (self.__class__, (items,), inst_dict)
def setdefault(self, key, default=None):
try:
return self[key]
except KeyError:
self[key] = default
return default
def update(self, other=(), **kwds):
if hasattr(other, "keys"):
for key in other.keys():
self[key] = other[key]
else:
for key, value in other:
self[key] = value
for key, value in kwds.items():
self[key] = value
__marker = object()
def pop(self, key, default=__marker):
try:
value = self[key]
except KeyError:
if default is self.__marker:
raise
return default
else:
del self[key]
return value
def keys(self):
ret | urn list(self)
def values(self):
return [self[key] for key in self]
def items(self):
return [ | (key, self[key]) for key in self]
def __repr__(self):
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, list(self.items()))
def copy(self):
return self.__class__(self)
@classmethod
def fromkeys(cls, iterable, value=None):
d = cls()
for key in iterable:
d[key] = value
return d
def __eq__(self, other):
if isinstance(other, OrderedDict):
return all(p==q for p, q in _zip_longest(self.items(), other.items()))
return dict.__eq__(self, other)
# End class OrderedDict
|
gateway4labs/labmanager | alembic/versions/105c1c44ff70_login_is_not_nullabl.py | Python | bsd-2-clause | 832 | 0.014423 | """login is not nullable
Revision ID: 105c1c44ff70
Revises: 2003c675a267
Create Date: 201 | 3-12-09 10:52:50.646000
"""
# revisi | on identifiers, used by Alembic.
revision = '105c1c44ff70'
down_revision = '2003c675a267'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import mysql
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.alter_column('labmanager_users', 'login',
existing_type=mysql.VARCHAR(length=50),
nullable=False)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.alter_column('labmanager_users', 'login',
existing_type=mysql.VARCHAR(length=50),
nullable=True)
### end Alembic commands ###
|
tvaddonsco/tva-release-repo | matrix/plugin.video.realizerx/resources/lib/modules/pyperclip/clipboards.py | Python | gpl-3.0 | 3,970 | 0 | import sys
import subprocess
from .exceptions import PyperclipException
EXCEPT_MSG = """
Pyperclip could not find a copy/paste mechanism for your system.
For more information, please visit https://pyperclip.readthedocs.org """
PY2 = sys.version_info[0] == 2
text_type = str if PY2 else str
def init_osx_clipboard():
def copy_osx(text):
p = subprocess.Popen(['pbcopy', 'w'],
stdin=subprocess.PIPE, close_fds=True)
p.communicate(input=text)
def paste_osx():
p = subprocess.Popen(['pbpaste', 'r'],
stdout=subprocess.PIPE, close_fds=True)
stdout, stderr = p.communicate()
return stdout
return copy_osx, paste_osx
def init_gtk_clipboard():
import gtk
def copy_gtk(text):
global cb
cb = gtk.Clipboard()
cb.set_text(text)
cb.store()
def paste_gtk():
clipboardContents = gtk.Clipboard().wait_for_text()
# for python 2, returns None if the clipboard is blank.
if clipboardContents is None:
return ''
else:
return clipboardContents
return copy_gtk, paste_gtk
def init_qt_clipboard():
# $DISPLAY should exist
from PyQt4.QtGui import QApplication
app = QApplication([])
def copy_qt(text):
cb = app.clipboard()
cb.setText(text)
def paste_qt():
cb = app.clipboard()
return text_type(cb.text())
return copy_qt, paste_qt
def init_xclip_clipboard():
def copy_xclip(text):
p = subprocess.Popen(['xcli | p', '-selection', 'c'],
stdin=subprocess.PIPE, close_fds=True)
p.communicate(input=text)
def paste_xclip():
p = subprocess.Popen(['x | clip', '-selection', 'c', '-o'],
stdout=subprocess.PIPE, close_fds=True)
stdout, stderr = p.communicate()
return stdout
return copy_xclip, paste_xclip
def init_xsel_clipboard():
def copy_xsel(text):
p = subprocess.Popen(['xsel', '-b', '-i'],
stdin=subprocess.PIPE, close_fds=True)
p.communicate(input=text)
def paste_xsel():
p = subprocess.Popen(['xsel', '-b', '-o'],
stdout=subprocess.PIPE, close_fds=True)
stdout, stderr = p.communicate()
return stdout
return copy_xsel, paste_xsel
def init_klipper_clipboard():
def copy_klipper(text):
p = subprocess.Popen(
['qdbus', 'org.kde.klipper', '/klipper', 'setClipboardContents',
text],
stdin=subprocess.PIPE, close_fds=True)
p.communicate(input=None)
def paste_klipper():
p = subprocess.Popen(
['qdbus', 'org.kde.klipper', '/klipper', 'getClipboardContents'],
stdout=subprocess.PIPE, close_fds=True)
stdout, stderr = p.communicate()
# Workaround for https://bugs.kde.org/show_bug.cgi?id=342874
# TODO: https://github.com/asweigart/pyperclip/issues/43
clipboardContents = stdout
# even if blank, Klipper will append a newline at the end
assert len(clipboardContents) > 0
# make sure that newline is there
assert clipboardContents.endswith('\n')
if clipboardContents.endswith('\n'):
clipboardContents = clipboardContents[:-1]
return clipboardContents
return copy_klipper, paste_klipper
def init_no_clipboard():
class ClipboardUnavailable(object):
def __call__(self, *args, **kwargs):
raise PyperclipException(EXCEPT_MSG)
if PY2:
def __nonzero__(self):
return False
else:
def __bool__(self):
return False
return ClipboardUnavailable(), ClipboardUnavailable()
|
projectatomic/atomic-reactor | tests/plugins/test_bump_release.py | Python | bsd-3-clause | 24,769 | 0.002059 | """
Copyright (c) 2015, 2019 Red Hat, Inc
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the LICENSE file for details.
"""
from __future__ import unicode_literals, absolute_import
import koji
import os
from copy import deepcopy
from textwrap import dedent
from atomic_reactor.plugins.pre_bump_release import BumpReleasePlugin
from atomic_reactor.plugins.pre_check_and_set_rebuild import CheckAndSetRebuildPlugin
from atomic_reactor.plugins.pre_fetch_sources import PLUGIN_FETCH_SOURCES_KEY
from atomic_reactor.plugins.pre_reactor_config import (ReactorConfigPlugin,
WORKSPACE_CONF_KEY,
ReactorConfig)
from atomic_reactor.util import df_parser
from atomic_reactor.constants import PROG
from tests.util import add_koji_map_in_workflow
from flexmock import flexmock
import time
import pytest
KOJI_SOURCE_NVR = "sources_nvr"
class MockedClientSessionGeneral(object):
def __init__(self, hub, opts=None):
pass
def getBuild(self, build_info):
return None
def krb_login(self, *args, **kwargs):
return True
class MockSource(object):
def __init__(self, | tmpdir, add_timestamp=None):
self.dockerfile_path = str(tmpdir.join('Dockerfile'))
self.path = str(tmpdir)
self.commit_id = None
if add_timestamp is not None:
self.config = flexmock(autorebuild=dict(add_timestamp_to_release=add_timestamp))
else:
self.config = flexmock(autorebuild=dict())
class TestBumpRelease(object):
def prepare(self,
| tmpdir,
labels=None,
include_target=True,
certs=False,
append=False,
reserve_build=False,
is_auto=False,
add_timestamp=None,
fetch_source=False,
scratch=None):
if labels is None:
labels = {}
workflow = flexmock()
setattr(workflow, 'builder', flexmock())
setattr(workflow, 'plugin_workspace', {})
setattr(workflow, 'reserved_build_id', None)
setattr(workflow, 'reserved_token ', None)
setattr(workflow, 'source', MockSource(tmpdir, add_timestamp))
setattr(workflow, 'prebuild_results', {CheckAndSetRebuildPlugin.key: is_auto})
setattr(workflow, 'user_params', {})
if scratch is not None:
workflow.user_params['scratch'] = scratch
if fetch_source:
workflow.prebuild_results[PLUGIN_FETCH_SOURCES_KEY] = {
'sources_for_nvr': KOJI_SOURCE_NVR
}
df = tmpdir.join('Dockerfile')
df.write('FROM base\n')
for key, value in labels.items():
df.write('LABEL {key}={value}\n'.format(key=key, value=value), mode='a')
setattr(workflow.builder, 'df_path', str(df))
kwargs = {
'tasker': None,
'workflow': workflow,
}
if include_target:
kwargs['target'] = 'foo'
if append:
kwargs['append'] = True
if certs:
tmpdir.join('cert').write('cert')
tmpdir.join('serverca').write('serverca')
workflow.plugin_workspace[ReactorConfigPlugin.key] = {}
workflow.plugin_workspace[ReactorConfigPlugin.key][WORKSPACE_CONF_KEY] =\
ReactorConfig({'version': 1})
add_koji_map_in_workflow(workflow, hub_url='', root_url='',
reserve_build=reserve_build,
ssl_certs_dir=str(tmpdir) if certs else None)
plugin = BumpReleasePlugin(**kwargs)
return plugin
def test_component_missing(self, tmpdir):
session = MockedClientSessionGeneral('')
flexmock(koji, ClientSession=session)
plugin = self.prepare(tmpdir)
with pytest.raises(RuntimeError):
plugin.run()
@pytest.mark.parametrize(('reserve_build', 'koji_build_status', 'init_fails'), [
(True, 'COMPLETE', True),
(True, 'FAILED', True),
(True, 'CANCELED', True),
(True, 'COMPLETE', False),
(True, 'FAILED', False),
(True, 'CANCELED', False),
(False, 'COMPLETE', False),
(False, 'FAILED', False),
(False, 'CANCELED', False),
])
@pytest.mark.parametrize('add_timestamp', [True, False])
@pytest.mark.parametrize('is_auto', [True, False])
@pytest.mark.parametrize('scratch', [True, False])
@pytest.mark.parametrize('build_exists', [True, False])
@pytest.mark.parametrize('release_label', [
'release',
'Release',
])
@pytest.mark.parametrize('user_provided_relese', [True, False])
def test_release_label_already_set(self, tmpdir, caplog, reserve_build, koji_build_status,
init_fails, add_timestamp, is_auto, scratch,
build_exists, release_label, user_provided_relese):
class MockedClientSession(object):
def __init__(self, hub, opts=None):
pass
def getBuild(self, build_info):
if build_exists:
return {'id': 12345, 'state': koji.BUILD_STATES[koji_build_status]}
return build_exists
def krb_login(self, *args, **kwargs):
return True
def CGInitBuild(self, cg_name, nvr_data):
if init_fails:
raise koji.GenericError('unable to pre-declare build {}'.format(nvr_data))
return {'build_id': 'reserved_build', 'token': 'reserved_token'}
session = MockedClientSession('')
flexmock(koji, ClientSession=session)
plugin = self.prepare(tmpdir, labels={release_label: '1',
'com.redhat.component': 'component',
'version': 'version'},
add_timestamp=add_timestamp, is_auto=is_auto,
reserve_build=reserve_build,
scratch=scratch)
if user_provided_relese:
plugin.workflow.user_params['release'] = 'release_provided'
refund_build = (reserve_build and koji_build_status != 'COMPLETE')
if build_exists and not scratch and not refund_build:
with pytest.raises(RuntimeError) as exc:
plugin.run()
assert 'build already exists in Koji: ' in str(exc.value)
return
if reserve_build and init_fails and not scratch:
with pytest.raises(RuntimeError) as exc:
plugin.run()
assert 'unable to pre-declare build ' in str(exc.value)
return
plugin.run()
if not user_provided_relese:
timestamp_msg = 'autorebuild with add_timestamp_to_release and release ' \
'set explicitly, appending timestamp:'
if is_auto and add_timestamp:
assert timestamp_msg in caplog.text
else:
assert 'not incrementing' in caplog.text
@pytest.mark.parametrize(('labels', 'all_wrong_labels'), [
({'com.redhat.component': 'component'},
{'version': 'missing'}),
({'BZComponent': 'component'},
{'version': 'missing'}),
({'version': 'version'},
{'com.redhat.component': 'missing'}),
({'Version': 'version'},
{'com.redhat.component': 'missing'}),
({},
{'com.redhat.component': 'missing', 'version': 'missing'}),
({'com.redhat.component': 'component', 'version': ''},
{'version': 'empty'}),
({'com.redhat.component': 'component', 'version': '$UNDEFINED'},
{'version': 'empty'}),
({'com.redhat.component': 'component', 'version': 'version', 'release': ''},
{'release': 'empty'}),
({'com.redhat.component': 'component', 'version': 'version', 'release': '$UNDEFINED'},
{'release': 'empty'}),
])
|
unintended/Cohen | tests/test_dbus.py | Python | mit | 3,086 | 0.000648 | # -*- coding: utf-8 -*-
# Licensed under the MIT license
# http://opensource.org/licenses/mit-license.php
# Copyright 2008, Frank Scholz <coherence@beebits.net>
"""
Test cases for L{dbus_service}
"""
import os
from twisted.trial import unittest
from twisted.internet import reactor
from twisted.internet.defer import Deferred
from coherence import __version__
from coherence.base import Coherence
from coherence.upnp.core import uuid
import coherence.extern.louie as louie
from tests import wrapped
try:
import dbus
from dbus.mainloop.glib import DBusGMainLoop
DBusGMainLoop(set_as_default=True)
import dbus.service
except ImportError:
dbus = None
BUS_NAME = 'org.Coherence'
OBJECT_PATH = '/org/Coherence'
class TestDBUS(unittest.TestCase):
if not dbus:
skip = "Python dbus-bindings not available."
elif reactor.__class__.__name__ != 'Glib2Reactor':
skip = ("This test needs a Glib2Reactor, please start trial "
"with the '-r glib2' option.")
def setUp(self):
louie.reset()
self.coherence = Coherence({'unittest': 'yes', 'logmode': 'error', 'use_dbus': 'yes', 'controlpoint': 'yes'})
self.bus = dbus.SessionBus()
self.coherence_service = self.bus.get_object(BUS_NAME, OBJECT_PATH)
self.uuid = str(uuid.UUID())
def tearDown(self):
def cleaner(r):
self.coherence.clear()
return r
dl = self.coherence.shutdown()
dl.addBoth(cleaner)
return dl
def test_dbus_version(self):
""" tests the version number request via dbus
"""
d = Deferred()
@wrapped(d)
def handle_version_reply(version):
self.assertEqual(version, __version__)
d.call | back(version)
self.coherence_service.version(dbus_interf | ace=BUS_NAME,
reply_handler=handle_version_reply,
error_handler=d.errback)
return d
def test_dbus_plugin_add_and_remove(self):
""" tests creation and removal of a backend via dbus
"""
d = Deferred()
@wrapped(d)
def add_it(uuid):
self.coherence_service.add_plugin(
'SimpleLight', {'name': 'dbus-test-light-%d' % os.getpid(), 'uuid': uuid},
dbus_interface=BUS_NAME,
reply_handler=handle_add_plugin_reply,
error_handler=d.errback)
@wrapped(d)
def handle_add_plugin_reply(uuid):
self.assertEqual(self.uuid, uuid)
reactor.callLater(2, remove_it, uuid)
@wrapped(d)
def remove_it(uuid):
self.coherence_service.remove_plugin(
uuid,
dbus_interface=BUS_NAME,
reply_handler=handle_remove_plugin_reply,
error_handler=d.errback)
@wrapped(d)
def handle_remove_plugin_reply(uuid):
self.assertEqual(self.uuid, uuid)
d.callback(uuid)
add_it(self.uuid)
return d
|
sahat/wagtail | wagtail/wagtaildocs/wagtail_hooks.py | Python | bsd-3-clause | 282 | 0.003546 | from django.conf.urls import inclu | de, url
from wagtail.wagtailadmin import hooks
from wagtail.wagtaildocs import admin_urls
de | f register_admin_urls():
return [
url(r'^documents/', include(admin_urls)),
]
hooks.register('register_admin_urls', register_admin_urls)
|
myarjunar/QGIS | python/plugins/processing/algs/qgis/JoinAttributes.py | Python | gpl-2.0 | 4,402 | 0.000454 | # -*- coding: utf-8 -*-
"""
***************************************************************************
JoinAttributes.py
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
from builtins import str
__author__ = 'Victor Olaya'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
from qgis.core import QgsFeature
from processing.core.GeoAlgorithm import GeoAlgorithm
from processing.core.parameters import ParameterVector
from processing.core.parameters import ParameterTable
from processing.core.parameters import ParameterTableField
from processing.core.outputs import OutputVector
from processing.tools import dataobjects, vector
pluginPath = os.path.split(os.path.split(os.path.dirname(__file__))[0])[0]
class JoinAttributes(GeoAlgorithm):
OUTPUT_LAYER = 'OUTPUT_LAYER'
INPUT_LAYER = 'INPUT_LAYER'
INPUT_LAYER_2 = 'INPUT_LAYER_2'
TABLE_FIELD = 'TABLE_FIELD'
TABLE_FIELD_2 = 'TABLE_FIELD_2'
def defineCharacteristics(self):
self.name, self.i18n_name = self.trAlgorithm('Join attributes table')
self.group, self.i18n_group = self.trAlgorithm('Vector general tools')
self.addParameter(ParameterVector(self.INPUT_LAYER,
self.tr('Input layer')))
self.addParameter(ParameterTable(self.INPUT_LAYER_2,
self.tr('Input layer 2'), False))
self.addParameter(ParameterTableField(self.TABLE_FIELD,
self.tr('Table field'), self.INPUT_LAYER))
self.addParameter(ParameterTableField(self.TABLE_FIELD_2,
self.tr('Table field 2'), self.INPUT_LAYER_2))
self.addOutput(OutputVector(self.OUTPUT_LAYER,
self.tr('Joined layer')))
def processAlgorithm(self, feedback):
input = self.getParameterValue(self.INPUT_LAYER)
input2 = self.getParameterValue(self.INPUT_LAYER_2)
output = self.getOutputFromName(self.OUTPUT_LAYER)
field = self.getParameterValue(self.TABLE_FIELD)
field2 = self.getParameterValue(self.TABLE_FIELD_2)
layer = dataobjects.getObjectFromUri(input)
joinField1Index = layer.fields().lookupField(field)
layer2 = dataobjects.getObjectFromUri(input2)
joinField2Index = layer2.fields().lookupField(field2)
outFields = vector.combineVectorFields(layer, layer2)
writer = output.getVectorWriter(outFields, layer.wkbType(),
layer.crs())
# Cache attributes of Layer 2
cache = {}
features = vector.features(layer2)
total = 100.0 / len(features)
for current, feat in enumerate(features):
attrs = feat.attributes()
joinValue2 = str(attrs[joinField2Index])
if joinValue2 not in cache:
cache[joinValu | e2] = attrs
feedback.setProgress(int(current * total))
# Create output vector layer with additional attribute
outFeat = QgsFeature()
features = vector.features(layer)
total = 100.0 / len(features)
for current, feat in enumerate(features): |
outFeat.setGeometry(feat.geometry())
attrs = feat.attributes()
joinValue1 = str(attrs[joinField1Index])
attrs.extend(cache.get(joinValue1, []))
outFeat.setAttributes(attrs)
writer.addFeature(outFeat)
feedback.setProgress(int(current * total))
del writer
|
rossplt/ross-django-utils | ross/settings.py | Python | mit | 3,090 | 0.001294 | """
Django settings for ross project.
Generated by 'django-admin startproject' using Django 1.10.6.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'jtn=n8&nq9jgir8_z1ck40^c1s22d%=)z5qsm*q(bku*_=^sg&'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib | .messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware' | ,
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'ross.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'ross.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
|
dhhxu/LBNL-ion7350-interface | setup.py | Python | gpl-2.0 | 3,243 | 0.002158 | """
Setup script
The ION 7350 interface requires several directories and files to work. If the
interface is to be run on a different machine, run this script to set up the
requisite directories and files.
"""
import os
from getpass import getpass
from LbnlIon7350Interface.utils import defaults
from LbnlIon7350Interface.utils import utils
def safe_mkdir(path):
"""
Creates a directory at path, if it doesn't exist already.
If path already exists, does nothing. Raises a ValueError
if path is not a valid directory name.
Params:
path string
"""
try:
os.makedirs(path)
except OSError:
if not os.path.isdir(path):
raise ValueError('Bad directory name')
def get_username():
"""
Prompt user for username. Returns the entered name.
"""
return raw_input("Username: ")
def get_password():
"""
Prompt user for password. The user will enter password twice.
If the passwords don't match, keep retrying. The method can be
interrupted; it is responsibility of the caller to handle this
interrupt.
Returns the entered password.
"""
while True:
pass1 = getpass("Password: ")
pass2 = getpass("Enter password again: ")
if not pass1 or not pass2:
print('No password entered. Please try again.')
continue
elif pass1 != pass2:
print('Passwords do not match. Pease try again.')
continue
elif pass1 == pass2:
return pass1
def make_creds_file(root):
"""
Prompt the user for user name and password to create a read-only file
containing login credentials. If the user cancels the operation anytime,
raises a | n IOError and no file is created.
root is the root directory of the interface program. It is needed so that
the credentials file is saved in the correct location.
The file contains two lines. The first is username. The second is password.
Params:
root string
"""
path = defaults.creds(root)
if utils.exists_file(path):
while True:
answer = raw_input('Credentials file found. Do you wan | t to overwrite (Y/N)? ')
if answer.lower() == 'y':
os.remove(path)
break
elif answer.lower() == 'n':
return
else:
print('Unknown input. Please try again.')
try:
user = get_username()
pwd = get_password()
except KeyboardInterrupt:
raise IOError('User cancelled operation')
with open(path, 'wb') as cf:
cf.write('%s\n%s\n' % (user, pwd))
os.chmod(path, 0400)
def main():
root = os.path.dirname(os.path.realpath(__file__))
try:
safe_mkdir(os.path.join(root, 'downloaded_data/archive'))
safe_mkdir(os.path.join(root, 'json_data/archive'))
log_dir = os.path.join(root, 'logs')
safe_mkdir(log_dir)
log_file = open(os.path.join(log_dir, "ion7350interface.log"), 'a+')
log_file.close()
make_creds_file(root)
except ValueError as err:
utils.error(str(err))
except IOError as ierr:
utils.error(str(ierr))
if __name__ == '__main__':
main()
|
nicholasserra/sentry | src/sentry/web/frontend/auth_logout.py | Python | bsd-3-clause | 457 | 0 | from __future__ import absolute_import
from django.contrib.auth import logout
from django.contrib.auth.models import AnonymousUser
from sentry.web.frontend.base import BaseView
from sentry.utils.auth import get_login_redirect
class AuthLogoutView(BaseView):
auth_requi | red = False
def handle(self, request):
rv = get_login_redi | rect(request)
logout(request)
request.user = AnonymousUser()
return self.redirect(rv)
|
Mandrilux/GOC_2017 | api/polydevs/parking/migrations/0008_auto_20170311_2109.py | Python | agpl-3.0 | 1,364 | 0.002199 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2017-03-11 21:09
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('parking', '0007_auto_20170311_2028'),
]
operations = [
migrations.AlterField(
model_name='parking',
name='book_for',
field=models.CharField(choices=[('Disabled', 'Disabled'), ('Default', 'Default | '), ('Electric', 'Electric')], default='Default', max_length=8),
),
migrations.AlterField(
model_name='parking',
nam | e='is_free',
field=models.BooleanField(default=True),
),
migrations.AlterField(
model_name='parking',
name='is_paying',
field=models.BooleanField(default=False),
),
migrations.AlterField(
model_name='parking',
name='size',
field=models.CharField(choices=[('Small', 'Small'), ('Medium', 'Medium'), ('Large', 'Large')], default='Medium', max_length=6),
),
migrations.AlterField(
model_name='parking',
name='vehicle_type',
field=models.CharField(choices=[('Car', 'Car'), ('MotoCycle', 'MotoCycle'), ('Truck', 'Truck')], default='Car', max_length=9),
),
]
|
esthermm/odoo-addons | sale_service_recurrence_configurator/models/sale.py | Python | agpl-3.0 | 7,124 | 0 | # -*- coding: utf-8 -*-
# (c) 2016 Alfredo de la Fuente - AvanzOSC
# License AGPL-3 - See http://www.gnu.org/licenses/agpl-3.0.html
from openerp import models, fields, api
class SaleOrder(models.Model):
_inherit = 'sale.order'
@api.multi
def onchange_template_id(self, template_id, partner=False,
fiscal_position=False):
res = super(SaleOrder, self).onchange_template_id(
template_id, partner=partner, fiscal_position=fiscal_position)
if (template_id and res.get('value', False) and
res.get('value')['order_line']):
res = self._catch_month_week_day_information(template_id, res)
return res
def _catch_month_week_day_information(self, template_id, res):
quote_obj = self.env['sale.quote.line']
order_lines = res.get('value')['order_line']
for line in order_lines:
if len(line) > 1:
dic = line[2]
cond = [('quote_id', '=', template_id)]
price_unit = dic.get('price_unit', False)
if price_unit:
cond.append(('price_unit', '=', price_unit))
discount = dic.get('discount', False)
if discount:
cond.append(('discount', '=', discount))
product_uom_qty = dic.get('product_uom_qty', False)
if product_uom_qty:
cond.append(('product_uom_qty', '=', product_uom_qty))
product_id = dic.get('product_id', False)
if product_id:
cond.append(('product_id', '=', product_id))
product_uom = dic.get('product_uom', False)
if product_uom:
cond.append(('product_uom_id', '=', product_uom))
website_description = dic.get('website_description', False)
if website_description:
cond.append(('website_description', '=',
website_description))
template = quote_obj.search(cond)
if len(template) > 1:
cond = [('quote_id', '=', template_id),
('product_template', '!=', False),
('product_id', '=', False),
('name', '=', dic.get('name'))]
template = quote_obj.search(cond, limit=1)
line = self._sale_line_with_sale_quote_information(
template, line)
if template.product_id:
line[2].update({'product_id': template.product_id.id})
return res
@api.multi
def _sale_line_with_sale_quote_information(self, template, line):
line[2].update({
'january': template.january,
'february': template.february,
'march': template.march,
'april': template.april,
'may': template.may,
'june': template.june,
'july': template.july,
'august': template.august,
'september': template.september,
'november': template.november,
'december': template.december,
'week1': template.week1,
'week2': template.week2,
'week3': template.week3,
'week4': template.week4,
'week5': template.week5,
'week6': template.week6,
'monday': template.monday,
'tuesday': template.tuesday,
'wednesday': template.wednesday,
'thursday': template.thursday,
'friday': template.friday,
'saturday': template.saturday,
'sunday': template.sunday})
return line
class SaleOrderLine(models.Model):
_inherit = 'sale.order.line'
recurring_service = fields.Boolean(
string='Recurring Service', related='product_id.recur | ring_service')
january = fields.Boolean('January')
february = fields.Boolean('February')
march | = fields.Boolean('March')
april = fields.Boolean('April')
may = fields.Boolean('May')
june = fields.Boolean('June')
july = fields.Boolean('July')
august = fields.Boolean('August')
september = fields.Boolean('September')
october = fields.Boolean('October')
november = fields.Boolean('November')
december = fields.Boolean('December')
week1 = fields.Boolean('Week 1')
week2 = fields.Boolean('Week 2')
week3 = fields.Boolean('Week 3')
week4 = fields.Boolean('Week 4')
week5 = fields.Boolean('Week 5')
week6 = fields.Boolean('Week 6')
monday = fields.Boolean('Monday')
tuesday = fields.Boolean('Tuesday')
wednesday = fields.Boolean('Wednesday')
thursday = fields.Boolean('Thursday')
friday = fields.Boolean('Friday')
saturday = fields.Boolean('Saturday')
sunday = fields.Boolean('Sunday')
class SaleQuoteLine(models.Model):
_inherit = 'sale.quote.line'
product_id = fields.Many2one(required=False)
product_template = fields.Many2one(
comodel_name='product.template', string='Product Template')
january = fields.Boolean('January')
february = fields.Boolean('February')
march = fields.Boolean('March')
april = fields.Boolean('April')
may = fields.Boolean('May')
june = fields.Boolean('June')
july = fields.Boolean('July')
august = fields.Boolean('August')
september = fields.Boolean('September')
october = fields.Boolean('October')
november = fields.Boolean('November')
december = fields.Boolean('December')
week1 = fields.Boolean('Week 1')
week2 = fields.Boolean('Week 2')
week3 = fields.Boolean('Week 3')
week4 = fields.Boolean('Week 4')
week5 = fields.Boolean('Week 5')
week6 = fields.Boolean('Week 6')
monday = fields.Boolean('Monday')
tuesday = fields.Boolean('Tuesday')
wednesday = fields.Boolean('Wednesday')
thursday = fields.Boolean('Thursday')
friday = fields.Boolean('Friday')
saturday = fields.Boolean('Saturday')
sunday = fields.Boolean('Sunday')
@api.multi
@api.onchange('product_template')
def onchange_product_template(self):
self.ensure_one()
if not self.product_template:
self.product_id = False
else:
self.product_uom_id = self.product_template.uom_id.id
self.name = self.product_template.name
if not self.product_template.attribute_line_ids:
self.product_id = (
self.product_template.product_variant_ids and
self.product_template.product_variant_ids[0])
return {'domain': {'product_id': [('product_tmpl_id', '=',
self.product_template.id)]}}
@api.multi
def on_change_product_id(self, product):
result = super(SaleQuoteLine, self).on_change_product_id(product)
if 'value' in result and product:
prod = self.env['product.product'].browse(product)
result['value']['product_template'] = prod.product_tmpl_id.id
return result
|
python-bonobo/bonobo | bonobo/nodes/basics.py | Python | apache-2.0 | 10,187 | 0.00265 | import functools
import html
import itertools
import pprint
from mondrian import term
from bonobo import settings
from bonobo.config import Configurable, Method, Option, use_context, use_no_input, use_raw_input
from bonobo.config.functools import transformation_factory
from bonobo.config.processors import ContextProcessor, use_context_processor
from bonobo.constants import NOT_MODIFIED
from bonobo.errors import UnrecoverableAttributeError
from bonobo.util.objects import ValueHolder
from bonobo.util.term import CLEAR_EOL
__all__ = [
"FixedWindow",
"Format",
"Limit",
"OrderFields",
"MapFields",
"PrettyPrinter",
"Rename",
"SetFields",
"Tee",
"UnpackItems",
"count",
"identity",
"noop",
]
def identity(x):
return x
class Limit(Configurable):
"""
Creates a Limit() node, that will only let go through the first n rows (defined by the `limit` option), unmodified.
.. attribute:: limit
Number of rows to let go through.
TODO: simplify into a closure building factory?
"""
limit = Option(positional=True, default=10)
@ContextProcessor
def counter(self, context):
yield ValueHolder(0)
def __call__(self, counter, *args, **kwargs):
counter += 1
if counter <= self.limit:
yield NOT_MODIFIED
def Tee(f):
@functools.wraps(f)
def wrapped(*args, **kwargs):
nonlocal f
f(*args, **kwargs)
return NOT_MODIFIED
return wrapped
def _shorten(s, w):
if w and len(s) > w:
s = s[0 : w - 3] + "..."
return s
class PrettyPrinter(Configurable):
max_width = Option(
int,
default=term.get_size()[0],
required=False,
__doc__="""
If set, truncates the output values longer than this to this width.
""",
)
filter = Method(
default=(
lambda self, index, key, value: (value is not None)
and (not isinstance(key, str) or not key.startswith("_"))
),
__doc__="""
A filter that determine what to print.
Default is to ignore any key starting with an underscore and none values.
""",
)
@ContextProcessor
def context(self, context):
context.setdefault("_jupyter_html", None)
yield context
if context._jupyter_html is not None:
from IPython.display import display, HTML
display(HTML("\n".join(["<table>"] + context._jupyter_html + ["</table>"])))
def __call__(self, context, *args, **kwargs):
if not settings.QUIET:
if term.isjupyter:
self.print_jupyter(context, *args, **kwargs)
return NOT_MODIFIED
if term.istty:
self.print_console(context, *args, **kwargs)
return NOT_MODIFIED
| self.print_quiet(context, *args, **kwargs)
return NOT_MODIFIED
def print_quiet(self, context, *args, **kwargs):
for index, (key, value) in enumerate(itertools.chain(enumerate(args), kwargs.items())):
if self.filter(index, key, value):
print(self.format_quiet(index, k | ey, value, fields=context.get_input_fields()))
def format_quiet(self, index, key, value, *, fields=None):
# XXX should we implement argnames here ?
return " ".join(((" " if index else "-"), str(key), ":", str(value).strip()))
def print_console(self, context, *args, **kwargs):
print("\u250c")
for index, (key, value) in enumerate(itertools.chain(enumerate(args), kwargs.items())):
if self.filter(index, key, value):
print(self.format_console(index, key, value, fields=context.get_input_fields()))
print("\u2514")
def format_console(self, index, key, value, *, fields=None):
fields = fields or []
if not isinstance(key, str):
if len(fields) > key and str(key) != str(fields[key]):
key = "{}{}".format(fields[key], term.lightblack("[{}]".format(key)))
else:
key = str(index)
prefix = "\u2502 {} = ".format(key)
prefix_length = len(prefix)
def indent(text, prefix):
for i, line in enumerate(text.splitlines()):
yield (prefix if i else "") + line + CLEAR_EOL + "\n"
repr_of_value = "".join(
indent(pprint.pformat(value, width=self.max_width - prefix_length), "\u2502" + " " * (len(prefix) - 1))
).strip()
return "{}{}{}".format(prefix, repr_of_value.replace("\n", CLEAR_EOL + "\n"), CLEAR_EOL)
def print_jupyter(self, context, *args):
if not context._jupyter_html:
context._jupyter_html = [
"<thead><tr>",
*map("<th>{}</th>".format, map(html.escape, map(str, context.get_input_fields() or range(len(args))))),
"</tr></thead>",
]
context._jupyter_html += ["<tr>", *map("<td>{}</td>".format, map(html.escape, map(repr, args))), "</tr>"]
@use_no_input
def noop(*args, **kwargs):
return NOT_MODIFIED
class FixedWindow(Configurable):
"""
Transformation factory to create fixed windows of inputs, as lists.
For example, if the input is successively 1, 2, 3, 4, etc. and you pass it through a ``FixedWindow(2)``, you'll get
lists of elements 2 by 2: [1, 2], [3, 4], ...
"""
length = Option(int, positional=True) # type: int
@ContextProcessor
def buffer(self, context):
buffer = yield ValueHolder([])
if len(buffer):
last_value = buffer.get()
last_value += [None] * (self.length - len(last_value))
context.send(*last_value)
@use_raw_input
def __call__(self, buffer, bag):
buffer.append(bag)
if len(buffer) >= self.length:
yield tuple(buffer.get())
buffer.set([])
@transformation_factory
def OrderFields(fields):
"""
Transformation factory to reorder fields in a data stream.
:param fields:
:return: callable
"""
fields = list(fields)
@use_context
@use_raw_input
def _OrderFields(context, row):
nonlocal fields
context.setdefault("remaining", None)
if not context.output_type:
context.remaining = list(sorted(set(context.get_input_fields()) - set(fields)))
context.set_output_fields(fields + context.remaining)
yield tuple(row.get(field) for field in context.get_output_fields())
return _OrderFields
@transformation_factory
def SetFields(fields):
"""
Transformation factory that sets the field names on first iteration, without touching the values.
:param fields:
:return: callable
"""
@use_context
@use_no_input
def _SetFields(context):
nonlocal fields
if not context.output_type:
context.set_output_fields(fields)
return NOT_MODIFIED
return _SetFields
@transformation_factory
def UnpackItems(*items, fields=None, defaults=None):
"""
>>> UnpackItems(0)
:param items:
:param fields:
:param defaults:
:return: callable
"""
defaults = defaults or {}
@use_context
@use_raw_input
def _UnpackItems(context, bag):
nonlocal fields, items, defaults
if fields is None:
fields = ()
for item in items:
fields += tuple(bag[item].keys())
context.set_output_fields(fields)
values = ()
for item in items:
values += tuple(bag[item].get(field, defaults.get(field)) for field in fields)
return values
return _UnpackItems
@transformation_factory
def Rename(**translations):
# XXX todo handle duplicated
fields = None
translations = {v: k for k, v in translations.items()}
@use_context
@use_raw_input
def _Rename(context, bag):
nonlocal fields, translations
if not fields:
fields = tuple(translations.get(field, field) for field in context.get_input_fields())
context.set_output_fields(fields)
return NOT_MODIFIED
|
LarsDu/DeepPixelMonster | dpixelmonster/kim_utils.py | Python | apache-2.0 | 7,612 | 0.015108 | """
Some codes from https://github.com/Newmu/dcgan_code
"""
from __future__ import division
import math
import json
import random
import pprint
import scipy.misc
import numpy as np
from time import gmtime, strftime
pp = pprint.PrettyPrinter()
get_stddev = lambda x, k_h, k_w: 1/math.sqrt(k_w*k_h*x.get_shape()[-1])
def get_image(image_path, input_height, input_width,
resize_height=64, resize_width=64,
is_crop=True, is_grayscale=False):
image = imread(image_path, is_grayscale)
return transform(image, input_height, input_width,
resize_height, resize_width, is_crop)
def save_images(images, size, image_path):
return imsave(inverse_transform(images), size, image_path)
def imread(path, is_grayscale = False):
if (is_grayscale):
return scipy.misc.imread(path, flatten = True).astype(np.float)
else:
return scipy.misc.imread(path).astype(np.float)
def merge_images(images, size):
return inverse_transform(images)
def merge(images, size):
h, w = images.shape[1], images.shape[2]
img = np.zeros((h * size[0], w * size[1], 3))
for idx, image in enumerate(images):
i = idx % size[1]
j = idx // size[1]
img[j*h:j*h+h, i*w:i*w+w, :] = image
return img
def imsave(images, size, path):
return scipy.misc.imsave(path, merge(images, size))
def center_crop(x, crop_h, crop_w,
resize_h=64, resize_w=64):
if crop_w is None:
crop_w = crop_h
h, w = x.shape[:2]
j = int(round((h - crop_h)/2.))
i = int(round((w - crop_w)/2.))
return scipy.misc.imresize(
x[j:j+crop_h, i:i+crop_w], [resize_h, resize_w])
def transform(image, input_height, input_width,
resize_height=64, resize_width=64, is_crop=True):
if is_crop:
cropped_image = center_crop(
image, input_height, input_width,
resize_height, resize_width)
else:
cropped_image = scipy.misc.imresize(image, [resize_height, resize_width])
return np.array(cropped_image)/127.5 - 1.
def inverse_transform(images):
return (images+1.)/2.
def to_json(output_path, *layers):
with open(output_path, "w") as layer_f:
lines = ""
for w, b, bn in layers:
layer_idx = w.name.split('/')[0].split('h')[1]
B = b.eval()
if "lin/" in w.name:
W = w.eval()
depth = W.shape[1]
else:
W = np.rollaxis(w.eval(), 2, 0)
depth = W.shape[0]
biases = {"sy": 1, "sx": 1, "depth": depth, "w": ['%.2f' % elem for elem in list(B)]}
if bn != None:
gamma = bn.gamma.eval()
beta = bn.beta.eval()
gamma = {"sy": 1, "sx": 1, "depth": depth, "w": ['%.2f' % elem for elem in list(gamma)]}
beta = {"sy": 1, "sx": 1, "depth": depth, "w": ['%.2f' % elem for elem in list(beta)]}
else:
gamma = {"sy": 1, "sx": 1, "depth": 0, "w": []}
beta = {"sy": 1, "sx": 1, "depth": 0, "w": []}
if "lin/" in w.name:
fs = []
for w in W.T:
fs.append({"sy": 1, "sx": 1, "depth": W.shape[0], "w": ['%.2f' % elem for elem in list(w)]})
lines += """
var layer_%s = {
"layer_type": "fc",
"sy": 1, "sx": 1,
"out_sx": 1, "out_sy": 1,
"stride": 1, "pad": 0,
"out_depth": %s, "in_depth": %s,
"biases": %s,
"gamma": %s,
"beta": %s,
"filters": %s
};""" % (layer_idx.split('_')[0], W.shape[1], W.shape[0], biases, gamma, beta, fs)
else:
fs = []
for w_ in W:
fs.append({"sy": 5, "sx": 5, "depth": W.shape[3], "w": ['%.2f' % elem for elem in list(w_.flatten())]})
lines += """
var layer_%s = {
"layer_type": "deconv",
"sy": 5, "sx": 5,
"out_sx": %s, "out_sy": %s,
"stride": 2, "pad": 1,
"out_depth": %s, "in_depth": %s,
"biases": %s,
"gamma": %s,
"beta": %s,
"filters": %s
};""" % (layer_idx, 2**(int(layer_idx)+2), 2**(int(layer_idx)+2),
W.shape[0], W.shape[3], biases, gamma, beta, fs)
layer_f.write(" ".join(lines.replace("'","").split()))
def make_gif(images, fname, duration=2, true_image=False):
import moviepy.editor as mpy
def make_frame(t):
try:
x = images[int(len(images)/duration*t)]
except:
x = images[-1]
if true_image:
return x.astype(np.uint8)
else:
return ((x+1)/2*255).astype(np.uint8)
clip = mpy.VideoClip(make_frame, duration=duration)
clip.write_gif(fname, fps = len(images) / duration)
def visualize(sess, dcgan, config, option):
if option == 0:
z_sample = np.random.uniform(-0.5, 0.5, size=(config.batch_size, dcgan.z_dim))
samples = sess.run(dcgan.sampler, feed_dict={dcgan.z: z_sample})
save_images(samples, [8, 8], './samples/test_%s.png' % strftime("%Y-%m-%d %H:%M:%S", gmtime()))
elif option == 1:
values = np.arange(0, 1, 1./config.batch_size)
for idx in xrange(100):
print(" [*] %d" % idx)
z_sample = np.zeros([config.batch_size, dcgan.z_dim])
for kdx, z in enumerate(z_sample):
z[idx] = values[kdx]
if config.dataset == "mnist":
y = np.random.choice(10, config.batch_size)
y_one_hot = np.zeros((config.batch_size, 10))
y_one_hot[np.arange(config.batch_size), y] = 1
samples = sess.run(dcgan.sampler, feed_dict={dcgan.z: z_sample, dcgan.y: y_one_hot})
else:
samples = sess.run(dcgan.sampler, feed_dict={dcgan.z: z_sample})
save_images(samples, [8, 8], './samples/test_arange_%s.png' % (idx))
elif option == 2:
values = np.arange(0, 1, 1./config.batch_size)
for idx in [random.randint(0, 99) for _ in xrange(100)]:
print(" [*] %d" % idx)
z = np.random.uniform(-0.2, 0.2, size=(dcgan.z_dim))
z_sample = np.tile(z, (config.batch_size, 1))
#z_sample = np.zeros([config.batch_size, dcgan.z_dim])
for kdx, z in enumerate(z_sample):
z[idx] = values[kdx]
if config.dataset == "mnist":
y = np.random.choice(10, config.batch_size)
y_one_hot = np.zeros((config.batch_size, 10))
y_one_hot[np.arange(config.batch_size), y] = 1
samples = sess.run(dcgan.sampler, feed_dict={dcgan.z: z_sample, dcgan.y: y_one_hot})
else:
samples = sess.run(dcgan.sampler, feed_dict={dcgan.z: z_sample})
try:
make_gif(samples, './samples/test_gif_%s.gif' % (idx))
except:
save_images(samples, [8, 8], './samples/test_%s.png' % strftime("%Y-%m-%d %H:%M:%S", gmtime()))
elif option == 3:
values = np.arange(0, 1, 1./config.batch_size)
for idx in xrange | (100):
print(" [*] %d" % idx)
z_sample = np.zeros([config.batch_size, dcgan.z_dim])
for kdx, z in enumerate(z_sample):
z[idx] = values[kdx]
samples = sess.run(dcgan.sampler, feed_dict={dcgan.z: z_sample})
make_gif(samples, './samples/test_gif_%s.gif' % (idx))
elif option == 4:
image_set = []
values = np.arange(0, | 1, 1./config.batch_size)
for idx in xrange(100):
print(" [*] %d" % idx)
z_sample = np.zeros([config.batch_size, dcgan.z_dim])
for kdx, z in enumerate(z_sample): z[idx] = values[kdx]
image_set.append(sess.run(dcgan.sampler, feed_dict={dcgan.z: z_sample}))
make_gif(image_set[-1], './samples/test_gif_%s.gif' % (idx))
new_image_set = [merge(np.array([images[idx] for images in image_set]), [10, 10]) \
for idx in range(64) + range(63, -1, -1)]
make_gif(new_image_set, './samples/test_gif_merged.gif', duration=8)
|
yanheven/console | horizon/middleware.py | Python | apache-2.0 | 8,472 | 0.000118 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Middleware provided and used by Horizon.
"""
import json
import logging
import time
from django.conf import settings
from django.contrib.auth import REDIRECT_FIELD_NAME # noqa
from django.contrib.auth.views import redirect_to_login # noq | a
from django.contrib import messages as django_messages
from django import http
from django.http import HttpResponseRedirect # noqa
from django import shortcuts
from django.utils.encoding import iri_to_uri # noqa
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _ |
from horizon import exceptions
from horizon.utils import functions as utils
LOG = logging.getLogger(__name__)
class HorizonMiddleware(object):
"""The main Horizon middleware class. Required for use of Horizon."""
logout_reason = None
def process_request(self, request):
"""Adds data necessary for Horizon to function to the request."""
request.horizon = {'dashboard': None,
'panel': None,
'async_messages': []}
if not hasattr(request, "user") or not request.user.is_authenticated():
# proceed no further if the current request is already known
# not to be authenticated
# it is CRITICAL to perform this check as early as possible
# to avoid creating too many sessions
return None
# Activate timezone handling
tz = request.session.get('django_timezone')
if tz:
timezone.activate(tz)
# Check for session timeout
try:
timeout = settings.SESSION_TIMEOUT
except AttributeError:
timeout = 1800
last_activity = request.session.get('last_activity', None)
timestamp = int(time.time())
# If we use cookie-based sessions, check that the cookie size does not
# reach the max size accepted by common web browsers.
if (
settings.SESSION_ENGINE ==
'django.contrib.sessions.backends.signed_cookies'
):
max_cookie_size = getattr(
settings, 'SESSION_COOKIE_MAX_SIZE', None)
session_cookie_name = getattr(
settings, 'SESSION_COOKIE_NAME', None)
session_key = request.COOKIES.get(session_cookie_name)
if max_cookie_size is not None and session_key is not None:
cookie_size = sum((
len(key) + len(value)
for key, value in request.COOKIES.iteritems()
))
if cookie_size >= max_cookie_size:
LOG.error(
'Total Cookie size for user_id: %(user_id)s is '
'%(cookie_size)sB >= %(max_cookie_size)sB. '
'You need to configure file-based or database-backed '
'sessions instead of cookie-based sessions: '
'http://docs.openstack.org/developer/horizon/topics/'
'deployment.html#session-storage'
% {
'user_id': request.session.get(
'user_id', 'Unknown'),
'cookie_size': cookie_size,
'max_cookie_size': max_cookie_size,
}
)
if (isinstance(last_activity, int)
and (timestamp - last_activity) > timeout):
request.session.pop('last_activity')
response = HttpResponseRedirect(
'%s?next=%s' % (settings.LOGOUT_URL, request.path))
self.logout_reason = _("Session timed out.")
utils.add_logout_reason(request, response, self.logout_reason)
return response
request.session['last_activity'] = timestamp
def process_exception(self, request, exception):
"""Catches internal Horizon exception classes such as NotAuthorized,
NotFound and Http302 and handles them gracefully.
"""
if isinstance(exception, (exceptions.NotAuthorized,
exceptions.NotAuthenticated)):
auth_url = settings.LOGIN_URL
next_url = iri_to_uri(request.get_full_path())
if next_url != auth_url:
field_name = REDIRECT_FIELD_NAME
else:
field_name = None
login_url = request.build_absolute_uri(auth_url)
response = redirect_to_login(next_url, login_url=login_url,
redirect_field_name=field_name)
if request.is_ajax():
response_401 = http.HttpResponse(status=401)
response_401['X-Horizon-Location'] = response['location']
return response_401
return response
# If an internal "NotFound" error gets this far, return a real 404.
if isinstance(exception, exceptions.NotFound):
raise http.Http404(exception)
if isinstance(exception, exceptions.Http302):
# TODO(gabriel): Find a way to display an appropriate message to
# the user *on* the login form...
return shortcuts.redirect(exception.location)
def process_response(self, request, response):
"""Convert HttpResponseRedirect to HttpResponse if request is via ajax
to allow ajax request to redirect url
"""
if request.is_ajax() and hasattr(request, 'horizon'):
queued_msgs = request.horizon['async_messages']
if type(response) == http.HttpResponseRedirect:
# Drop our messages back into the session as per usual so they
# don't disappear during the redirect. Not that we explicitly
# use django's messages methods here.
for tag, message, extra_tags in queued_msgs:
getattr(django_messages, tag)(request, message, extra_tags)
if response['location'].startswith(settings.LOGOUT_URL):
redirect_response = http.HttpResponse(status=401)
# This header is used for handling the logout in JS
redirect_response['logout'] = True
if self.logout_reason is not None:
utils.add_logout_reason(
request, redirect_response, self.logout_reason)
else:
redirect_response = http.HttpResponse()
# Copy cookies from HttpResponseRedirect towards HttpResponse
for cookie_name, cookie in response.cookies.iteritems():
cookie_kwargs = dict((
(key, value) for key, value in cookie.iteritems()
if key in ('max_age', 'expires', 'path', 'domain',
'secure', 'httponly') and value
))
redirect_response.set_cookie(
cookie_name, cookie.value, **cookie_kwargs)
redirect_response['X-Horizon-Location'] = response['location']
return redirect_response
if queued_msgs:
# TODO(gabriel): When we have an async connection to the
# client (e.g. websockets) this should be pushed to the
|
GaelVaroquaux/scikits.image | scikits/image/morphology/__init__.py | Python | bsd-3-clause | 63 | 0 | f | rom grey import *
from selem import *
from ccomp import label
| |
maxdl/Synapse.py | synapse/main.py | Python | mit | 24,280 | 0.002059 | import itertools
import os.path
import sys
import time
from . import core
from . import file_io
from . import geometry
from . import stringconv
from . import version
#
# Functions
#
def save_output(profileli, opt):
""" Save a summary of results of evaluated profiles
"""
def m(x, pixelwidth):
return geometry.to_metric_units(x, pixelwidth)
def m2(x, pixelwidth):
# For area units...
return geometry.to_metric_units(x, pixelwidth**2)
def na(x):
if x in (None, -1):
return "N/A"
else:
return x
def write_session_summary():
with file_io.FileWriter("session.summary", opt) as f:
f.writerow(["%s version:" % version.title,
"%s (Last modified %s %s, %s)"
% ((version.version,) + version.date)])
f.writerow(["Number of evaluated profiles:", len(eval_proli)])
if err_fli:
f.writerow(["Number of non-evaluated profiles:", len(err_fli)] | )
f.writerow(["Metric unit:", eval_proli[0].metric_unit])
f.writerow(["Spatial resolution:", opt.spatial_resolution, eval_proli[0].metric_unit])
f.writerow(["Shell width:", opt.shell_width, eval_proli[0].metric_unit])
f.writerow(["Interpoint distances calculated:",
stringconv.yes_or_no(opt.determine_in | terpoint_dists)])
if opt.determine_interpoint_dists:
f.writerow(["Interpoint distance mode:", opt.interpoint_dist_mode])
f.writerow(["Shortest interpoint distances:",
stringconv.yes_or_no(opt.interpoint_shortest_dist)])
f.writerow(["Lateral interpoint distances:",
stringconv.yes_or_no(opt.interpoint_lateral_dist)])
f.writerow(["Monte Carlo simulations performed:",
stringconv.yes_or_no(opt.run_monte_carlo)])
if opt.run_monte_carlo:
f.writerow(["Number of Monte Carlo runs:", opt.monte_carlo_runs])
f.writerow(["Monte Carlo simulation window:", opt.monte_carlo_simulation_window])
f.writerow(["Strict localization in simulation window:",
stringconv.yes_or_no(opt.monte_carlo_strict_location)])
f.writerow(["Clusters determined:", stringconv.yes_or_no(opt.determine_clusters)])
if opt.determine_clusters:
f.writerow(["Within-cluster distance:",
opt.within_cluster_dist, eval_proli[0].metric_unit])
if clean_fli:
f.writerow(["Input files processed cleanly:"])
f.writerows([[fn] for fn in clean_fli])
if nop_fli:
f.writerow(["Input files processed but which generated no point distances:"])
f.writerows([[fn] for fn in nop_fli])
if warn_fli:
f.writerow(["Input files processed but which generated "
"warnings (see log for details):"])
f.writerows([[fn] for fn in warn_fli])
if err_fli:
f.writerow(["Input files not processed or not included in "
"summary (see log for details):"])
f.writerows([[fn] for fn in err_fli])
def write_profile_summary():
with file_io.FileWriter("profile.summary", opt) as f:
f.writerow(["Postsynaptic element length",
"Presynaptic element length",
"Number of PSDs:",
"Total postsynaptic membrane length incl perforations:",
"Total postsynaptic membrane length excl perforations:",
"Total PSD area:",
"Particles (total)",
"Particles in PSD",
"Particles within %s %s of PSD"
% (opt.spatial_resolution, eval_proli[0].metric_unit),
"Shell particles strictly synaptic and postsynaptic",
"Shell particles strictly synaptic and postsynaptic "
"or associated with postsynaptic membrane",
"Synaptic particles associated w/ postsynaptic "
"membrane",
"Synaptic particles associated w/ presynaptic membrane",
"Perisynaptic particles associated w/ postsynaptic "
"membrane",
"Perisynaptic particles associated w/ presynaptic "
"membrane",
"Within-perforation particles associated w/ "
"postsynaptic membrane",
"Within-perforation particles associated w/ "
"presynaptic membrane",
"Presynaptic profile",
"Postsynaptic profile",
"ID",
"Input file",
"Comment"])
f.writerows([[m(pro.posel.length(), pro.pixelwidth),
m(pro.prsel.length(), pro.pixelwidth),
len(pro.psdli),
m(pro.total_posm.length(), pro.pixelwidth),
sum([m(psd.posm.length(), pro.pixelwidth)
for psd in pro.psdli]),
sum([m2(psd.psdposm.area(), pro.pixelwidth)
for psd in pro.psdli]),
len(pro.pli),
len([p for p in pro.pli if p.is_within_psd]),
len([p for p in pro.pli if p.is_associated_with_psd]),
len([p for p in pro.pli
if p.strict_lateral_location == "synaptic" and
p.axodendritic_location == "postsynaptic" and
p.is_within_postsynaptic_membrane_shell]),
len([p for p in pro.pli
if p.strict_lateral_location == "synaptic" and
(p.axodendritic_location == "postsynaptic" and
p.is_within_postsynaptic_membrane_shell) or
p.is_postsynaptic_membrane_associated]),
len([p for p in pro.pli
if p.lateral_location == "synaptic" and
p.is_postsynaptic_membrane_associated]),
len([p for p in pro.pli
if p.lateral_location == "synaptic" and
p.is_presynaptic_membrane_associated]),
len([p for p in pro.pli
if p.lateral_location == "perisynaptic" and
p.is_postsynaptic_membrane_associated]),
len([p for p in pro.pli
if p.lateral_location == "perisynaptic" and
p.is_presynaptic_membrane_associated]),
len([p for p in pro.pli
if p.lateral_location == "within perforation"
and p.is_postsynaptic_membrane_associated]),
len([p for p in pro.pli
if p.lateral_location == "within perforation"
and p.is_presynaptic_membrane_associated]),
pro.presyn_profile,
pro.postsyn_profile,
pro.id,
pro.comment,
os.path.basename(pro.inputfn)] for pro in eval_proli])
def write_point_summary(ptype):
if ptype == "particle":
pli = "pli"
pstr = "particle"
elif ptype == "random":
if not opt.use_random:
return
else:
pli = "randomli"
pstr = "point"
el |
Inboxen/Inboxen | inboxen/views/manifest.py | Python | agpl-3.0 | 1,459 | 0 | ##
# Copyright (C) 2018 Jessica Tallon & Matthew Molyneaux
#
# This program is free software: you can redistribute it and/or modify
# | it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICU | LAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
##
from django.conf import settings
from django.http import JsonResponse
from django.templatetags.static import static
from django.urls import reverse
from django.views.decorators.http import require_GET
@require_GET
def manifest(request):
data = {
"name": settings.SITE_NAME,
"short_name": settings.SITE_NAME,
"icons": [
{
"src": static("imgs/megmelon-icon-white.png"),
"sizes": "128x128",
"type": "image/png"
}
],
"theme_color": "#ffffff",
"background_color": "#ffffff",
"display": "browser",
"start_url": reverse("user-home"),
}
return JsonResponse(data)
|
NaPs/Docbucket | setup.py | Python | mit | 796 | 0 | from setuptools import setup, find_packages
import os
version = '1.0~dev'
base = os.path.dirname(__file__)
readme = open(os.path.join(base, 'README.rst')).read()
changelog = open(os.path.join(base, 'CHANGELOG.rst')).read()
setup(name='docbucket',
version=version,
desc | ription='',
long_description=readme + '\n' + changelog,
classifiers=[],
keywords='dms django',
author='Antoine Millet',
author_email='antoine@inaps.org',
| url='https://github.com/NaPs/Docbucket',
license='MIT',
data_files=(
('/etc/', ('etc/docbucket.conf',)),
),
scripts=['docbucketadm'],
packages=find_packages(exclude=['ez_setup', 'examples', 'tests']),
include_package_data=True,
zip_safe=True,
install_requires=[])
|
vaidap/zulip | zerver/tests/test_urls.py | Python | apache-2.0 | 6,615 | 0.001209 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import print_function
import importlib
import os
import six
import ujson
import django.core.urlresolvers
from django.test import TestCase
from typing import List, Optional
from zerver.lib.test_classes import ZulipTestCase
from zerver.models import Stream
from zproject import urls
class PublicURLTest(ZulipTestCase):
"""
Account creation URLs are accessible even when not logged in. Authenticated
URLs redirect to a page.
"""
def fetch(self, method, urls, expected_status):
# type: (str, List[str], int) -> None
for url in urls:
# e.g. self.client_post(url) if method is "post"
response = getattr(self, method)(url)
self.assertEqual(response.status_code, expected_status,
msg="Expected %d, received %d for %s to %s" % (
expected_status, response.status_code, method, url))
def test_public_urls(self):
# type: () -> None
"""
Test which views are accessible when not logged in.
"""
# FIXME: We should also test the Tornado URLs -- this codepath
# ca | n't do so because this Django test mechanism doesn't go
| # through Tornado.
denmark_stream_id = Stream.objects.get(name='Denmark').id
get_urls = {200: ["/accounts/home/", "/accounts/login/"
"/en/accounts/home/", "/ru/accounts/home/",
"/en/accounts/login/", "/ru/accounts/login/",
"/help/"],
302: ["/", "/en/", "/ru/"],
401: ["/json/streams/%d/members" % (denmark_stream_id,),
"/api/v1/users/me/subscriptions",
"/api/v1/messages",
"/json/messages",
"/api/v1/streams",
],
404: ["/help/nonexistent"],
}
# Add all files in 'templates/zerver/help' directory (except for 'main.html' and
# 'index.md') to `get_urls['200']` list.
for doc in os.listdir('./templates/zerver/help'):
if doc.startswith(".") or '~' in doc or '#' in doc:
continue # nocoverage -- just here for convenience
if doc not in {'main.html', 'index.md', 'include'}:
get_urls[200].append('/help/' + os.path.splitext(doc)[0]) # Strip the extension.
post_urls = {200: ["/accounts/login/"],
302: ["/accounts/logout/"],
401: ["/json/messages",
"/json/invites",
"/json/subscriptions/exists",
"/api/v1/users/me/subscriptions/properties",
"/json/fetch_api_key",
"/json/users/me/pointer",
"/json/users/me/subscriptions",
"/api/v1/users/me/subscriptions",
],
400: ["/api/v1/external/github",
"/api/v1/fetch_api_key",
],
}
patch_urls = {
401: ["/json/settings"],
}
put_urls = {401: ["/json/users/me/pointer"],
}
for status_code, url_set in six.iteritems(get_urls):
self.fetch("client_get", url_set, status_code)
for status_code, url_set in six.iteritems(post_urls):
self.fetch("client_post", url_set, status_code)
for status_code, url_set in six.iteritems(patch_urls):
self.fetch("client_patch", url_set, status_code)
for status_code, url_set in six.iteritems(put_urls):
self.fetch("client_put", url_set, status_code)
def test_get_gcid_when_not_configured(self):
# type: () -> None
with self.settings(GOOGLE_CLIENT_ID=None):
resp = self.client_get("/api/v1/fetch_google_client_id")
self.assertEqual(400, resp.status_code,
msg="Expected 400, received %d for GET /api/v1/fetch_google_client_id" % (
resp.status_code,))
self.assertEqual('error', resp.json()['result'])
def test_get_gcid_when_configured(self):
# type: () -> None
with self.settings(GOOGLE_CLIENT_ID="ABCD"):
resp = self.client_get("/api/v1/fetch_google_client_id")
self.assertEqual(200, resp.status_code,
msg="Expected 200, received %d for GET /api/v1/fetch_google_client_id" % (
resp.status_code,))
data = ujson.loads(resp.content)
self.assertEqual('success', data['result'])
self.assertEqual('ABCD', data['google_client_id'])
class URLResolutionTest(TestCase):
def get_callback_string(self, pattern):
# type: (django.core.urlresolvers.RegexURLPattern) -> Optional[str]
callback_str = hasattr(pattern, 'lookup_str') and 'lookup_str'
callback_str = callback_str or '_callback_str'
return getattr(pattern, callback_str, None)
def check_function_exists(self, module_name, view):
# type: (str, str) -> None
module = importlib.import_module(module_name)
self.assertTrue(hasattr(module, view), "View %s.%s does not exist" % (module_name, view))
# Tests that all views in urls.v1_api_and_json_patterns exist
def test_rest_api_url_resolution(self):
# type: () -> None
for pattern in urls.v1_api_and_json_patterns:
callback_str = self.get_callback_string(pattern)
if callback_str and hasattr(pattern, "default_args"):
for func_string in pattern.default_args.values():
if isinstance(func_string, tuple):
func_string = func_string[0]
module_name, view = func_string.rsplit('.', 1)
self.check_function_exists(module_name, view)
# Tests function-based views declared in urls.urlpatterns for
# whether the function exists. We at present do not test the
# class-based views.
def test_non_api_url_resolution(self):
# type: () -> None
for pattern in urls.urlpatterns:
callback_str = self.get_callback_string(pattern)
if callback_str:
(module_name, base_view) = callback_str.rsplit(".", 1)
self.check_function_exists(module_name, base_view)
|
umitproject/openmonitor-desktop-agent | umit/icm/agent/gui/dashboard/timeline/TimeLineConnector.py | Python | gpl-2.0 | 3,540 | 0.013842 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2011 S2S Network Consultoria e Tecnologia da Informacao LTDA
#
# Authors: Guilherme Polo <ggpolo@gmail.com>
# Tianwei Liu <liutainweidlut@gmail.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
"""
Timeline Graph Connector
"""
import gobject
class Connector(gobject.GObject):
__gsignals__ = {
# graph-show is used at Timeline to hide/show graph and filter.
| 'graph_show': (gobject.SIGNAL_RUN_FIRST, gobject.TYPE_NONE, (bool, )),
#Lin | e Filter updates. Timeline handles this signals and updates Timeline Graph accordingly
'filter_update':(gobject.SIGNAL_RUN_FIRST,gobject.TYPE_NONE,(object, )),
#Timeline Graph emits this signal when a new selection is done, TimelineBase grabs it,
#handles it and stores selected timerange
'selection_changed':(gobject.SIGNAL_RUN_FIRST, gobject.TYPE_NONE,(object, )),
# After TimeLineBase defines the timerange for selection, it emits a selection-update,
# meaning that TimeLineBarDisplay now may use this range to grab data for display
# statistics. It is also used at TimeLineChangesTree to updates changes listing.
'selection_update': (gobject.SIGNAL_RUN_FIRST, gobject.TYPE_NONE,(object, object)),
# data-changed is used as a way to tell that graph needs a redraw,it is used at
# TimeLineGraphToolbar to update graph mode. TimeLineBase handles this signal
# and updates everything needed.
'data_changed': (gobject.SIGNAL_RUN_FIRST, gobject.TYPE_NONE,(str, str)),
# Tab-changed is used as a way to tell that graph needs a redraw, it is used at
# Dashboard Left treeview current tab to update graph. TimeLineBase handles this
# signal and updates everything neededs
'tab_changed': (gobject.SIGNAL_RUN_FIRST, gobject.TYPE_NONE,(str,)),
# After handling data-changed at TLBase, it emits a new signal:
# data_update with new data. Timeline catches this signal and
# requests graph update.
'data_update': (gobject.SIGNAL_RUN_FIRST, gobject.TYPE_NONE,
(object, object, object, object, object, object, object)),
# date-changed is used at TLToolBar to update ranges and labels.
# TLBase emits this after emitting data-update.
'date_changed': (gobject.SIGNAL_RUN_FIRST, gobject.TYPE_NONE, ()),
# date-update is emitted from TLToolBar when a date update is
# requested, Timeline grabs it, then updates date based on current
# mode and then emits a date-update.
'date_update': (gobject.SIGNAL_RUN_FIRST, gobject.TYPE_NONE, (int, ))
}
def __init__(self):
self.__gobject_init__()
gobject.type_register(Connector)
|
optiv-labs/talus_client | talus_client/cmds/images.py | Python | mit | 11,313 | 0.034827 | #!/usr/bin/env python
# encoding: utf-8
import argparse
import cmd
import os
import shlex
import sys
from tabulate import tabulate
import time
from talus_client.cmds import TalusCmdBase
import talus_client.api
import talus_client.errors as errors
from talus_client.models import Image,Field
class ImageCmd(TalusCmdBase):
"""The Talus images command processor
"""
command_name = "image"
def do_list(self, args):
"""List existing images in Talus
image list
Examples:
List all images in Talus:
image list
"""
parts = shlex.split(args)
search = self._search_terms(parts, user_default_filter=False)
if "sort" not in search:
search["sort"] = "timestamps.created"
if "--all" not in parts and "num" not in search:
search["num"] = 20
self.out("showing first 20 results, use --all to see everything")
fields = []
for image in self._talus_client.image_iter(**search):
fields.append([
image.id,
image.name,
image.status["name"],
image.tags,
self._nice_name(image, "base_image") if image.base_image is not None else None,
self._nice_name(image, "os"),
image.md5,
])
print(tabulate(fields, headers=Image.headers()))
def do_info(self, args):
"""List detailed information about an image
info ID_OR_NAME
Examples:
List info about the image named "Win 7 Pro"
image info "Win 7 Pro"
"""
pass
def do_import(self, args):
"""Import an image into Talus
import FILE -n NAME -o OSID [-d DESC] [-t TAG1,TAG2,..] [-u USER] [-p PASS] [-i]
FILE The file to i | mport
-o,--os ID or name of the operating system model
| -n,--name The name of the resulting image (default: basename(FILE))
-d,--desc A description of the image (default: "")
-t,--tags Tags associated with the image (default: [])
-f,--file-id The id of an already-uploaded file (NOT A NORMAL USE CASE)
-u,--username The username to be used in the image (default: user)
-p,--password The password to be used in the image (default: password)
-i,--interactive To interact with the imported image for setup (default: False)
Examples:
To import an image from VMWare at ``~/images/win7pro.vmdk`` named "win 7 pro test"
and to be given a chance to perform some manual setup/checks:
image import ~/images/win7pro.vmdk -n "win 7 pro test" -i -o "win7pro" -t windows7,x64,IE8
"""
parser = argparse.ArgumentParser()
parser.add_argument("file", type=str)
parser.add_argument("--os", "-o")
parser.add_argument("--name", "-n")
parser.add_argument("--desc", "-d", default="desc")
parser.add_argument("--file-id", "-f", default=None)
parser.add_argument("--tags", "-t", default="")
parser.add_argument("--username", "-u", default="user")
parser.add_argument("--password", "-p", default="password")
parser.add_argument("--interactive", "-i", action="store_true", default=False)
args = parser.parse_args(shlex.split(args))
args.tags = args.tags.split(",")
if args.name is None:
args.name = os.path.basename(args.file)
image = self._talus_client.image_import(
image_path = args.file,
image_name = args.name,
os_id = args.os,
desc = args.desc,
tags = args.tags,
file_id = args.file_id,
username = args.username,
password = args.password
)
self._wait_for_image(image, args.interactive)
def do_edit(self, args):
"""Edit an existing image. Interactive mode only
"""
if args.strip() == "":
raise errors.TalusApiError("you must provide a name/id of an image to edit it")
parts = shlex.split(args)
leftover = []
image_id_or_name = None
search = self._search_terms(parts, out_leftover=leftover)
if len(leftover) > 0:
image_id_or_name = leftover[0]
image = self._resolve_one_model(image_id_or_name, Image, search)
if image is None:
raise errors.TalusApiError("could not find talus image with id {!r}".format(image_id_or_name))
while True:
model_cmd = self._make_model_cmd(image)
cancelled = model_cmd.cmdloop()
if cancelled:
break
error = False
if image.os is None:
self.err("You must specify the os")
error = True
if image.name is None or image.name == "":
self.err("You must specify a name for the image")
error = True
if image.base_image is None:
self.err("You must specify the base_image for your new image")
error = True
if error:
continue
try:
image.timestamps = {"modified": time.time()}
image.save()
self.ok("edited image {}".format(image.id))
self.ok("note that this DOES NOT start the image for configuring!")
except errors.TalusApiError as e:
self.err(e.message)
return
def do_create(self, args):
"""Create a new image in talus using an existing base image. Anything not explicitly
specified will be inherited from the base image, except for the name, which is required.
create -n NAME -b BASEID_NAME [-d DESC] [-t TAG1,TAG2,..] [-u USER] [-p PASS] [-o OSID] [-i]
-o,--os ID or name of the operating system model
-b,--base ID or name of the base image
-n,--name The name of the resulting image (default: basename(FILE))
-d,--desc A description of the image (default: "")
-t,--tags Tags associated with the image (default: [])
--shell Forcefully drop into an interactive shell
-v,--vagrantfile A vagrant file that will be used to congfigure the image
-i,--interactive To interact with the imported image for setup (default: False)
Examples:
To create a new image based on the image with id 222222222222222222222222 and adding
a new description and allowing for manual user setup:
image create -b 222222222222222222222222 -d "some new description" -i
"""
args = shlex.split(args)
if self._go_interactive(args):
image = Image()
self._prep_model(image)
image.username = "user"
image.password = "password"
image.md5 = " "
image.desc = "some description"
image.status = {
"name": "create",
"vagrantfile": None,
"user_interaction": True
}
while True:
model_cmd = self._make_model_cmd(image)
model_cmd.add_field(
"interactive",
Field(True),
lambda x,v: x.status.update({"user_interaction": v}),
lambda x: x.status["user_interaction"],
desc="If the image requires user interaction for configuration",
)
model_cmd.add_field(
"vagrantfile",
Field(str),
lambda x,v: x.status.update({"vagrantfile": open(v).read()}),
lambda x: x.status["vagrantfile"],
desc="The path to the vagrantfile that will configure the image"
)
cancelled = model_cmd.cmdloop()
if cancelled:
break
error = False
if image.os is None:
self.err("You must specify the os")
error = True
if image.name is None or image.name == "":
self.err("You must specify a name for the image")
error = True
if image.base_image is None:
self.err("You must specify the base_image for your new image")
error = True
if error:
continue
try:
image.timestamps = {"created": time.time()}
image.save()
self.ok("created new image {}".format(image.id))
except errors.TalusApiError as e:
self.err(e.message)
else:
self._wait_for_image(image, image.status["user_interaction"])
return
parser = self._argparser()
parser.add_argument("--os", "-o", default=None)
parser.add_argument("--base", "-b", default=None)
parser.add_argument("--name", "-n", default=None)
parser.add_argument("--desc", "-d", default="")
parser.add_argument("--tags", "-t", default="")
parser.add_argument("--vagrantfile", "-v", default=None, type=argparse.FileType("rb"))
parser.add_argument("--interactive", "-i", action="store_true", default=False)
args = parser.parse_args(args)
if args.name is None:
raise errors.TalusApiError("You must specify an image name")
vagrantfile_contents = None
if args.vagrantfile is not None:
vagrantfile_contents = args.vagrantfile.read()
if args.tags is not None:
args.tags = args.tags.split(",")
error = False
validation = {
"os" : "You m |
buzz/flicks | flicksapp/management/commands/scan_files.py | Python | gpl-2.0 | 7,514 | 0.00173 | from django.core.management.base import BaseCommand
import os
import re
import subprocess
from pymediainfo import MediaInfo
from flicksapp.models import Movie, File, Track
from flicksapp.choices import FILE_TYPES, PICTURE_TYPE, VIDEO_TYPE, NFO_TYPE, \
SUBTITLES_TYPE, OTHER_TYPE, AUDIO_TYPE
IGNORES = (
'^\..+\.sha1$',
'\.ifo$',
'\.bup$',
'\.vob$',
)
MEDIAINFO_MAPPING_GENERAL = (
('codec', 'container_format'),
('file_size', 'file_size'),
('duration', 'duration'),
('overall_bit_rate', 'overall_bit_rate'),
('writing_application', 'writing_application'),
('writing_library', 'writing_library'),
)
MEDIAINFO_MAPPING_TRACK_BASE = (
('format', 'format'),
('codec_info', 'codec'),
('bit_rate', 'bit_rate'),
('stream_size', 'stream_size'),
('writing_library', 'writing_library'),
('language', 'language'),
)
MEDIAINFO_MAPPING_VIDEO = MEDIAINFO_MAPPING_TRACK_BASE + (
('width', 'video_width'),
('height', 'video_height'),
('display_aspect_ratio', 'video_aspect_ratio'),
('frame_rate', 'video_frame_rate'),
('bits__pixel_frame', 'video_bpp'),
)
MEDIAINFO_MAPPING_AUDIO = MEDIAINFO_MAPPING_TRACK_BASE + (
('bit_rate_mode', 'audio_bit_rate_mode'),
('sampling_rate', 'audio_sampling_rate'),
('channel_s', 'audio_channels'),
)
MEDIAINFO_MAPPING_SUBTITLES = MEDIAINFO_MAPPING_TRACK_BASE
class Command(BaseCommand):
help = 'Scans media files using mediainfo'
def handle(self, *args, **options):
i = 0
nodir = []
file_count_new = 0
file_count_updated = 0
track_count_new = 0
track_count_updated = 0
subdirs = []
for m in Movie.objects.all():
i += 1
mdir = m.media_directory
if not os.path.isdir(mdir):
nodir.append([mdir, m.id, m.title])
continue
print '*' * 80
print ' %25s %90s' % \
(mdir, m.title.encode('ascii', 'replace'))
for root, dirs, files in os.walk(mdir):
# warn for dirs
for d in dirs:
subdir = '%s%s' % (root, d)
subdirs.append([subdir, m.id, m.title])
# check media files
for f in files:
# ignore?
ignore = False
for p in IGNORES:
if re.search(p, f, flags=re.IGNORECASE):
ignore = True
break
if ignore:
continue
filename = '%s/%s' % (root, f)
print f
# create/update file object
qs = File.objects.filter(filename=f, movie=m)
if qs.count() > 0:
file_obj = qs.first()
file_count_updated += 1
else:
file_obj = File.objects.create(filename=f, movie=m)
file_obj.save()
file_count_new += 1
# file type
for file_type, n, patterns in FILE_TYPES:
found = False
if patterns is None:
file_obj.file_type = file_type
| found = True
else:
for pat in patterns:
pattern = '.+\.%s$' % pat
| if re.search(pattern, f, flags=re.IGNORECASE):
file_obj.file_type = file_type
found = True
break
if found:
break
# print 'Assigning file type %s' % file_type
# file command
file_output = subprocess.check_output([
'file', '--brief', filename])
file_obj.file_output = file_output
# mediainfo
if file_obj.file_type != VIDEO_TYPE:
file_obj.save()
continue
media_info = MediaInfo.parse(filename)
for track in media_info.tracks:
# general
if track.track_type == 'General':
for key, model_attr in MEDIAINFO_MAPPING_GENERAL:
value = getattr(track, key)
setattr(file_obj, model_attr, value)
continue
# media track
elif track.track_type == 'Video':
track_type = VIDEO_TYPE
mapping = MEDIAINFO_MAPPING_VIDEO
elif track.track_type == 'Audio':
track_type = AUDIO_TYPE
mapping = MEDIAINFO_MAPPING_AUDIO
elif track.track_type == 'Text':
track_type = SUBTITLES_TYPE
mapping = MEDIAINFO_MAPPING_SUBTITLES
elif track.track_type == 'None' or \
track.track_type == 'Menu':
continue
else:
print 'Warning: Unknown track type: %s' % \
track.track_type
continue
qs = Track.objects.filter(file=file_obj,
track_type=track_type, track_id=track.track_id)
if qs.count() > 0:
track_obj = qs.first()
track_count_updated += 1
else:
track_obj = Track.objects.create(file=file_obj,
track_type=track_type, track_id=track.track_id)
track_obj.save()
track_count_new += 1
for key, model_attr in mapping:
value = getattr(track, key)
setattr(track_obj, model_attr, value)
track_obj.save()
file_obj.save()
track_count = track_count_new + track_count_updated
file_count = file_count_new + file_count_updated
print 'Found %d tracks (%d new, %d updated)' % \
(track_count, track_count_new, track_count_updated)
print ' in %d files (%d new, %d updated)' % \
(file_count, file_count_new, file_count_updated)
print '%d movies processed, of which %d have no dir:' % \
(i, len(nodir))
print nodir
if len(nodir) > 0:
for dirname, id, title in nodir:
print ' %s: %s (%i)' % \
(dirname, title.encode('ascii', 'replace'), id)
print '%d subdirs in folders found' % len(subdirs)
if len(subdirs) > 0:
for dirname, id, title in subdirs:
print ' %s: %s (%i)' % \
(dirname, title.encode('ascii', 'replace'), id)
|
justephens/ArduMusic | ArduMusic.py | Python | mit | 3,240 | 0.01142 | import MuseScoreHelper as MuseScore
import SerialHelper
while True:
# Print a menu of available operations
print ("AVAILABLE OPERATIONS:")
print (" 1. Connect Device\n")
print (" 2. Load Music File\n")
print (" 3. Exit Program\n")
# Loop until valid input is provided
while True:
try:
operation_code = int(input("SELECT OPERATION: "))
if operation_code >= 1 and operation_code <= 3: break
except: pass
# Print spacer
print("\n...\n")
# CONNECT DEVICE
if operation_code == 1:
# Allow the user to select from a list of devices,
# and then form a connection with the given device
device = SerialHelper.select_device()
| if type(device) != None:
Arduino = SerialHelper.device_begin(device)
else:
print("No device found")
print("\n...\n")
# LOAD MUSIC FILE
elif operation_code == 2:
# Asks the user for a file to load music from, and
# loads it into a list of tuples (pitch, duration)
Mu | sic_Raw = MuseScore.read_file()
# Loop through the music tracks that were returned
# (one track will be played on one buzzer)
for i in range(0, len(Music_Raw)):
print("BUZZER " + str(i) + ":")
for i in Music_Raw[i]:
print (" " + str(i[0]) + " " + str(i[1]))
# Print a divider
print("\n...\n")
# Loop through the musical tuples and build a byte
# array that can be sent to the Arduino
output_b1 = bytes(Music_Raw[0][1])
for i in Music_Raw[0][2:]:
output_b1 += bytes(i)
output_b2 = bytes(Music_Raw[1][1])
for i in Music_Raw[1][2:]:
output_b2 += bytes(i)
print(output_b1)
print(output_b2)
# Write 255 to the Arduino to begin a transmission
Arduino.write(bytes([255]))
# The Arduino will send back the maximum notes it
# can handle (this is per-buzzer). From this we
# decide how many we are actually going to send
Max_notes = int(SerialHelper.device_readline(Arduino))
Num_buzzers = int(SerialHelper.device_readline(Arduino))
Num_notes = Max_notes if int(len(output_b1)/2) > Max_notes else int(len(output_b1)/2)
# Send a message dictating how many notes will be sent
# to the Arduino
Arduino.write(SerialHelper.pack_short(Num_notes))
# Next we send the note output for Buzzer one
Arduino.write(output_b1[0:Num_notes*2])
# Send a message dictating how many notes will be sent
# to the Arduino
Arduino.write(SerialHelper.pack_short(Num_notes))
# Next we send the note output for Buzzer one
Arduino.write(output_b2[0:Num_notes*2])
# Print a nice overview at the end
print("Music Transfer Complete:")
print(" Arduino with " + str(Num_buzzers) + " buzzers")
print(" " + str(Num_notes) + " notes transferred")
elif operation_code == 3:
print("Exiting Program")
break
|
kevinlee12/oppia | core/controllers/android_e2e_config.py | Python | apache-2.0 | 13,766 | 0.000218 | # Copyright 2021 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Controller for initializing android specific structures."""
from __future__ import annotations
import os
from core import feconf
from core import python_utils
from core.constants import constants
from core.controllers import acl_decorators
from core.controllers import base
from core.domain import e | xp_domain
from core.domain import exp_services
from core.domain import fs_domain
from core.domain import fs_services
from | core.domain import opportunity_services
from core.domain import question_domain
from core.domain import question_services
from core.domain import rights_manager
from core.domain import skill_domain
from core.domain import skill_services
from core.domain import state_domain
from core.domain import story_domain
from core.domain import story_services
from core.domain import subtopic_page_domain
from core.domain import subtopic_page_services
from core.domain import topic_domain
from core.domain import topic_fetchers
from core.domain import topic_services
from core.domain import user_services
class InitializeAndroidTestDataHandler(base.BaseHandler):
"""Handler to initialize android specific structures."""
URL_PATH_ARGS_SCHEMAS = {}
HANDLER_ARGS_SCHEMAS = {'POST': {}}
@acl_decorators.open_access
def post(self):
"""Generates structures for Android end-to-end tests.
This handler generates structures for Android end-to-end tests in
order to evaluate the integration of network requests from the
Android client to the backend. This handler should only be called
once (or otherwise raises an exception), and can only be used in
development mode (this handler is unavailable in production).
Note that the handler outputs an empty JSON dict when the request is
successful.
The specific structures that are generated:
Topic: A topic with both a test story and a subtopic.
Story: A story with 'android_interactions' as a exploration
node.
Exploration: 'android_interactions' from the local assets.
Subtopic: A dummy subtopic to validate the topic.
Skill: A dummy skill to validate the subtopic.
Raises:
Exception. When used in production mode.
InvalidInputException. The topic is already
created but not published.
InvalidInputException. The topic is already published.
"""
if not constants.DEV_MODE:
raise Exception('Cannot load new structures data in production.')
if topic_services.does_topic_with_name_exist(
'Android test'):
topic = topic_fetchers.get_topic_by_name('Android test')
topic_rights = topic_fetchers.get_topic_rights(
topic.id, strict=False)
if topic_rights.topic_is_published:
raise self.InvalidInputException(
'The topic is already published.')
else:
raise self.InvalidInputException(
'The topic exists but is not published.')
exp_id = '26'
user_id = feconf.SYSTEM_COMMITTER_ID
# Generate new Structure id for topic, story, skill and question.
topic_id = topic_fetchers.get_new_topic_id()
story_id = story_services.get_new_story_id()
skill_id = skill_services.get_new_skill_id()
question_id = question_services.get_new_question_id()
# Create dummy skill and question.
skill = self._create_dummy_skill(
skill_id, 'Dummy Skill for Android', '<p>Dummy Explanation 1</p>')
question = self._create_dummy_question(
question_id, 'Question 1', [skill_id])
question_services.add_question(user_id, question)
question_services.create_new_question_skill_link(
user_id, question_id, skill_id, 0.3)
# Create and update topic to validate before publishing.
topic = topic_domain.Topic.create_default_topic(
topic_id, 'Android test', 'test-topic-one', 'description')
topic.update_url_fragment('test-topic')
topic.update_meta_tag_content('tag')
topic.update_page_title_fragment_for_web('page title for topic')
# Save the dummy image to the filesystem to be used as thumbnail.
with python_utils.open_file(
os.path.join(feconf.TESTS_DATA_DIR, 'test_svg.svg'),
'rb', encoding=None) as f:
raw_image = f.read()
fs = fs_domain.AbstractFileSystem(
fs_domain.GcsFileSystem(
feconf.ENTITY_TYPE_TOPIC, topic_id))
fs.commit(
'%s/test_svg.svg' % (constants.ASSET_TYPE_THUMBNAIL), raw_image,
mimetype='image/svg+xml')
# Update thumbnail properties.
topic.update_thumbnail_filename('test_svg.svg')
topic.update_thumbnail_bg_color('#C6DCDA')
# Add other structures to the topic.
topic.add_canonical_story(story_id)
topic.add_uncategorized_skill_id(skill_id)
topic.add_subtopic(1, 'Test Subtopic Title')
# Update and validate subtopic.
topic.update_subtopic_thumbnail_filename(1, 'test_svg.svg')
topic.update_subtopic_thumbnail_bg_color(1, '#FFFFFF')
topic.update_subtopic_url_fragment(1, 'suburl')
topic.move_skill_id_to_subtopic(None, 1, skill_id)
subtopic_page = (
subtopic_page_domain.SubtopicPage.create_default_subtopic_page(
1, topic_id))
# Upload local exploration to the datastore and enable feedback.
exp_services.load_demo(exp_id)
rights_manager.release_ownership_of_exploration(
user_services.get_system_user(), exp_id)
exp_services.update_exploration(
user_id, exp_id, [exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_EXPLORATION_PROPERTY,
'property_name': 'correctness_feedback_enabled',
'new_value': True
})], 'Changed correctness_feedback_enabled.')
# Add and update the exploration/node to the story.
story = story_domain.Story.create_default_story(
story_id, 'Android End to End testing', 'Description',
topic_id, 'android-end-to-end-testing')
story.add_node(
'%s%d' % (story_domain.NODE_ID_PREFIX, 1),
'Testing with UI Automator'
)
story.update_node_description(
'%s%d' % (story_domain.NODE_ID_PREFIX, 1),
'To test all Android interactions'
)
story.update_node_exploration_id(
'%s%d' % (story_domain.NODE_ID_PREFIX, 1),
exp_id
)
# Save the dummy image to the filesystem to be used as thumbnail.
with python_utils.open_file(
os.path.join(feconf.TESTS_DATA_DIR, 'test_svg.svg'),
'rb', encoding=None) as f:
raw_image = f.read()
fs = fs_domain.AbstractFileSystem(
fs_domain.GcsFileSystem(
feconf.ENTITY_TYPE_STORY, story_id))
fs.commit(
'%s/test_svg.svg' % (constants.ASSET_TYPE_THUMBNAIL), raw_image,
mimetype='image/svg+xml')
story.update_node_thumbnail_filename(
'%s%d' % (story_domain.NODE_ID_PREFIX, 1),
'test_svg.svg')
story.update_node_thumbnail_bg_color(
'%s%d' % (story_domain.NODE_ID_PREFIX, 1), '#F8BF74')
# Update and validate the story.
story.update_meta_tag_content('tag')
story.up |
MicroPyramid/Django-CRM | leads/views.py | Python | mit | 30,110 | 0.000731 | from django.db.models import Q
from django.shortcuts import get_object_or_404
from accounts.models import Account, Tags
from contacts.models import Contact
from leads import swagger_params
from common.models import Attachments, Comment, APISettings, Profile
from common.utils import COUNTRIES, LEAD_SOURCE, LEAD_STATUS, INDCHOICES
from common.custom_auth import JSONWebTokenAuthentication
from common.serializer import (
ProfileSerializer,
CommentSerializer,
AttachmentsSerializer,
LeadCommentSerializer,
)
from leads.models import Lead, Company
from leads.forms import LeadListForm
from leads.serializer import LeadSerializer, LeadCreateSerializer, CompanySerializer
from leads.tasks import (
create_lead_from_file,
send_email_to_assigned_user,
send_lead_assigned_emails,
)
from teams.serializer import TeamsSerializer
from teams.models import Teams
from rest_framework import status
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework.permissions import IsAuthenticated
from rest_framework.pagination import LimitOffsetPagination
from drf_yasg.utils import swagger_auto_schema
import json
class LeadListView(APIView, LimitOffsetPagination):
model = Lead
authentication_classes = (JSONWebTokenAuthenti | cation,)
permission_classes = (IsAuthenticated,)
def get_context_data(self, **kwargs):
params | = (
self.request.query_params
if len(self.request.data) == 0
else self.request.data
)
queryset = (
self.model.objects.filter(org=self.request.org)
.exclude(status="converted")
.select_related("created_by")
.prefetch_related(
"tags",
"assigned_to",
)
).order_by('-id')
if self.request.profile.role != "ADMIN" and not self.request.user.is_superuser:
queryset = queryset.filter(
Q(assigned_to__in=[self.request.profile]) | Q(
created_by=self.request.profile)
)
if params:
if params.get("name"):
queryset = queryset.filter(
Q(first_name__icontains=params.get("name"))
& Q(last_name__icontains=params.get("name"))
)
if params.get("title"):
queryset = queryset.filter(
title__icontains=params.get("title"))
if params.get("source"):
queryset = queryset.filter(source=params.get("source"))
if params.getlist("assigned_to"):
queryset = queryset.filter(
assigned_to__id__in=json.loads(
params.get("assigned_to"))
)
if params.get("status"):
queryset = queryset.filter(status=params.get("status"))
if params.get("tags"):
queryset = queryset.filter(
tags__in=json.loads(params.get("tags"))
)
if params.get("city"):
queryset = queryset.filter(
city__icontains=params.get("city"))
if params.get("email"):
queryset = queryset.filter(
email__icontains=params.get("email"))
context = {}
queryset_open = queryset.exclude(status="closed")
results_leads_open = self.paginate_queryset(
queryset_open.distinct(), self.request, view=self
)
open_leads = LeadSerializer(results_leads_open, many=True).data
if results_leads_open:
offset = queryset_open.filter(
id__gte=results_leads_open[-1].id).count()
if offset == queryset_open.count():
offset = None
else:
offset = 0
context["per_page"] = 10
context["open_leads"] = {
"leads_count": self.count,
"open_leads": open_leads,
"offset": offset
}
queryset_close = queryset.filter(status="closed")
results_leads_close = self.paginate_queryset(
queryset_close.distinct(), self.request, view=self
)
close_leads = LeadSerializer(results_leads_close, many=True).data
if results_leads_close:
offset = queryset_close.filter(
id__gte=results_leads_close[-1].id).count()
if offset == queryset_close.count():
offset = None
else:
offset = 0
context["close_leads"] = {
"leads_count": self.count,
"close_leads": close_leads,
"offset": offset
}
contacts = Contact.objects.filter(org=self.request.org).values(
"id",
"first_name"
)
context["contacts"] = contacts
context["status"] = LEAD_STATUS
context["source"] = LEAD_SOURCE
context["companies"] = CompanySerializer(
Company.objects.filter(org=self.request.org), many=True).data
context["countries"] = COUNTRIES
context["industries"] = INDCHOICES
return context
@swagger_auto_schema(
tags=["Leads"], manual_parameters=swagger_params.lead_list_get_params
)
def get(self, request, *args, **kwargs):
context = self.get_context_data(**kwargs)
return Response(context)
@swagger_auto_schema(
tags=["Leads"], manual_parameters=swagger_params.lead_create_post_params
)
def post(self, request, *args, **kwargs):
params = (
self.request.query_params
if len(self.request.data) == 0
else self.request.data
)
serializer = LeadCreateSerializer(data=params, request_obj=request)
if serializer.is_valid():
lead_obj = serializer.save(
created_by=request.profile, org=request.org)
if params.get("tags"):
tags = json.loads(params.get("tags"))
for t in tags:
tag = Tags.objects.filter(slug=t.lower())
if tag.exists():
tag = tag[0]
else:
tag = Tags.objects.create(name=t)
lead_obj.tags.add(tag)
if params.get("contacts"):
obj_contact = Contact.objects.filter(
id=params.get("contacts"), org=request.org)
lead_obj.contacts.add(obj_contact)
recipients = list(
lead_obj.assigned_to.all().values_list("id", flat=True))
send_email_to_assigned_user.delay(
recipients,
lead_obj.id,
)
if request.FILES.get("lead_attachment"):
attachment = Attachments()
attachment.created_by = request.profile
attachment.file_name = request.FILES.get(
"lead_attachment").name
attachment.lead = lead_obj
attachment.attachment = request.FILES.get("lead_attachment")
attachment.save()
if params.get("teams"):
teams_list = json.loads(params.get("teams"))
teams = Teams.objects.filter(
id__in=teams_list, org=request.org)
lead_obj.teams.add(*teams)
if params.get("assigned_to"):
assinged_to_list = json.loads(
params.get("assigned_to"))
profiles = Profile.objects.filter(
id__in=assinged_to_list, org=request.org)
lead_obj.assigned_to.add(*profiles)
if params.get("status") == "converted":
account_object = Account.objects.create(
created_by=request.profile,
name=lead_obj.account_name,
email=lead_obj.email,
phone=lead_obj.phone,
description=params.get("description"),
website=params.get("website"),
org=request.org
)
account |
rhyolight/nupic.son | app/soc/mapreduce/publish_gci_tasks.py | Python | apache-2.0 | 1,249 | 0.005604 | # Copyright 2011 the Melange authors.
#
# Licensed under the Apache License, Ver | sion 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific lan | guage governing permissions and
# limitations under the License.
"""Surveys model updating MapReduce."""
from mapreduce import context
from mapreduce import operation
from soc.modules.gci.models import task as task_model
from soc.modules.gci.models.program import GCIProgram
def process(task):
ctx = context.get()
params = ctx.mapreduce_spec.mapper.params
program_key = params['program_key']
program = GCIProgram.get_by_key_name(program_key)
if (task.program.key() == program.key() and
(task.status in task_model.UNAVAILABLE)):
task.status = task_model.OPEN
yield operation.db.Put(task)
yield operation.counters.Increment("task_updated")
yield operation.counters.Increment("task_not_updated")
|
oemof/reegis-hp | reegis_hp/berlin_hp/create_objects.py | Python | gpl-3.0 | 4,813 | 0 | # -*- coding: utf-8 -*-
# Demandlib
import logging
import oemof.solph as solph
import reegis_hp.berlin_hp.electricity as electricity
import pandas as pd
import demandlib.bdew as bdew
import demandlib.particular_profiles as pprofiles
import reegis_hp.berlin_hp.prepare_data as prepare_data
def heating_systems(esyste | m, dfull, add_elec, p):
power_plants = prepare_data.chp_berlin(p)
time_index = esystem.time_idx
temperature_path = '/home/uwe/rli-lokal/git_home/demandlib/examples'
temperature_file = temperature_path + '/example_data.csv'
temperature = pd.read_csv(temperature_file)['temperature']
sli = pd.Series(list(temperature.loc[:23]), inde | x=list(range(8760, 8784)))
temperature = temperature.append(sli)
temperature = temperature.iloc[0:len(time_index)]
heatbus = dict()
hd = dict()
auxiliary_energy = 0
print(dfull)
for h in dfull.keys():
hd.setdefault(h, 0)
lsink = 'demand_{0}'.format(h)
lbus = 'bus_{0}'.format(h)
ltransf = '{0}'.format(h)
lres_bus = 'bus_{0}'.format(p.heating2resource[h])
for b in dfull[h].keys():
if b.upper() in p.bdew_types:
bc = 0
if b.upper() in ['EFH', 'MFH']:
bc = 1
hd[h] += bdew.HeatBuilding(
time_index, temperature=temperature, shlp_type=b,
building_class=bc, wind_class=1,
annual_heat_demand=dfull[h][b], name=h
).get_bdew_profile()
if b.upper() in ['EFH', 'MFH']:
print(h, 'in')
auxiliary_energy += bdew.HeatBuilding(
time_index, temperature=temperature, shlp_type=b,
building_class=bc, wind_class=1,
annual_heat_demand=add_elec[h][b], name=h
).get_bdew_profile()
elif b in ['i', ]:
hd[h] += pprofiles.IndustrialLoadProfile(
time_index).simple_profile(annual_demand=dfull[h][b])
else:
logging.error('Demandlib typ "{0}" not found.'.format(b))
heatbus[h] = solph.Bus(label=lbus)
solph.Sink(label=lsink, inputs={heatbus[h]: solph.Flow(
actual_value=hd[h].div(hd[h].max()), fixed=True,
nominal_value=hd[h].max())})
if 'district' not in h:
if lres_bus not in esystem.groups:
solph.Bus(label=lres_bus)
solph.LinearTransformer(
label=ltransf,
inputs={esystem.groups[lres_bus]: solph.Flow()},
outputs={heatbus[h]: solph.Flow(
nominal_value=hd[h].max(),
variable_costs=0)},
conversion_factors={heatbus[h]: 1})
else:
for pp in power_plants[h].index:
lres_bus = 'bus_' + pp
if lres_bus not in esystem.groups:
solph.Bus(label=lres_bus)
solph.LinearTransformer(
label='pp_chp_{0}_{1}'.format(h, pp),
inputs={esystem.groups[lres_bus]: solph.Flow()},
outputs={
esystem.groups['bus_el']: solph.Flow(
nominal_value=power_plants[h]['power_el'][pp]),
heatbus[h]: solph.Flow(
nominal_value=power_plants[h]['power_th'][pp])},
conversion_factors={esystem.groups['bus_el']: 0.3,
heatbus[h]: 0.4})
from matplotlib import pyplot as plt
hd_df = pd. DataFrame(hd)
print(hd_df.sum().sum())
print('z_max:', hd_df['district_z'].max())
print('dz_max:', hd_df['district_dz'].max())
print('z_sum:', hd_df['district_z'].sum())
print('dz_sum:', hd_df['district_dz'].sum())
hd_df.plot(colormap='Spectral')
hd_df.to_csv('/home/uwe/hd.csv')
plt.show()
solph.Sink(label='auxiliary_energy',
inputs={esystem.groups['bus_el']: solph.Flow(
actual_value=auxiliary_energy.div(auxiliary_energy.max()),
fixed=True, nominal_value=auxiliary_energy.max())})
# Create sinks
# Get normalise demand and maximum value of electricity usage
electricity_usage = electricity.DemandElec(time_index)
normalised_demand, max_demand = electricity_usage.solph_sink(
resample='H', reduce=auxiliary_energy)
sum_demand = normalised_demand.sum() * max_demand
print("delec:", "{:.2E}".format(sum_demand))
solph.Sink(label='elec_demand',
inputs={esystem.groups['bus_el']: solph.Flow(
actual_value=normalised_demand, fixed=True,
nominal_value=max_demand)})
|
taigaio/taiga-back | taiga/events/management/commands/emit_notification_message.py | Python | agpl-3.0 | 1,345 | 0.001487 | # -*- coding: utf-8 -*-
# Copyright (C) 2014-present Taiga Agile LLC
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.core.management.base import BaseCommand
from taiga.events.events import emit_event
class Command(BaseCommand):
help = 'Send a notification message to the current users'
def add_arguments(sel | f, parser):
parser.add_argument("titl | e", help="The title of the message.")
parser.add_argument("description", help="The description of the message.")
def handle(self, **options):
data = {
"title": options["title"],
"desc": options["description"],
}
routing_key = "notifications"
emit_event(data, routing_key, on_commit=False)
|
janrygl/gensim | gensim/test/test_count_minimal_sketch_counter.py | Python | gpl-3.0 | 3,092 | 0.004204 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
Automated tests for checking transformation algorithms (the models package).
"""
import logging
import unittest
import os
import sys
import random
import itertools
from gensim.models.phrases import Phrases
if sys.version_info[0] >= 3:
unicode = str
module_path = os.path.dirname(__file__) # needed because sample data files are located in the same folder
datapath = lambda fname: os.path.join(module_path, 'test_data', fname)
WORDS = ['PHRASE%i' % i for i in range(10)] # selected words for phrases
class TestPhrasesModel(unittest.TestCase):
@staticmethod
def get_word():
"""Generate random word from letters A-Z."""
word_len = random.randint(1, 12)
return ''.join(chr(random.randint(65, 80)) for i in range(word_len))
@staticmethod
def get_sentence(size=10000):
"""Generator for random sentences.
10% probability to return sentence containing only preselected words"""
for i in range(size):
if random.random() > 0.9:
yield [WORDS[random.randint(0, len(WORDS) -1)] for i in range(random.randint(2, 10))] + ["."]
else:
yield [TestPhrasesModel.get_word() for i in range(random.randint(2, 10))] + ["."]
def testUpdate(self):
"""Test adding one token.
"""
special_token = 'non_present_token'
phrases = Phrases(TestPhrasesModel.get_sentence(), min_count=1)
present = special_token in phrases.vocab
freq = phrases.vocab[special_token]
phrases.add_vocab([[special_token]])
freq_after_change = phrases.vocab[special_token]
present_after_change = special_token in phrases.vocab
self.assertEqual(present, False, msg="Non-present token is marked as present.")
self.assertEqual(present_after_change, True, msg="Present token is marked as non-present.")
self.assertEqual(freq, 0, msg="Predicted non-zero freq for non-present token.")
self.assertEqual(freq_after_change, 1, msg="Predicted non 1 freq for token inserted once.")
def testFreqCount(self):
"""Test adding one token.
"""
speci | al_token = 'non_present_token'
phrases = Phrases(None, min_count=1)
current = iter([])
for i in range(100):
current = itertools.chain(current, iter([[special_token]]), TestPhrasesModel.get_sentence(i))
phrases.add_vocab(current)
freq = phrases.vocab[special_token]
self.assertTrue(freq >= 100)
current = iter([])
| for i in range(100):
current = itertools.chain(current, iter([[special_token]]), TestPhrasesModel.get_sentence(i))
phrases.add_vocab(current)
freq = phrases.vocab[special_token]
self.assertTrue(freq >= 200)
#endclass TestPhrasesModel
if __name__ == '__main__':
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.DEBUG)
unittest.main()
|
xuhdev/nikola | tests/test_utils.py | Python | mit | 15,927 | 0.000753 | # -*- coding: utf-8 -*-
import unittest
import mock
import os
import lxml.html
from nikola import metadata_extractors
from nikola.post import get_meta
from nikola.utils import (
demote_headers, TranslatableSetting, get_crumbs, TemplateHookRegistry,
get_asset_path, get_theme_chain, get_translation_candidate, write_metadata)
from nikola.plugins.task.sitemap import get_base_path as sitemap_get_base_path
class dummy(object):
default_lang = 'en'
metadata_extractors_by = metadata_extractors.default_metadata_extractors_by()
config = {'TRANSLATIONS_PATTERN': '{path}.{lang}.{ext}',
'TRANSLATIONS': {'en': './'},
'DEFAULT_LANG': 'en'} |
def __init__(self):
metadata_extractors.load_defaults(s | elf, self.metadata_extractors_by)
class GetMetaTest(unittest.TestCase):
def test_getting_metadata_from_content(self):
file_metadata = ".. title: Nikola needs more tests!\n"\
".. slug: write-tests-now\n"\
".. date: 2012/09/15 19:52:05\n"\
".. tags:\n"\
".. link:\n"\
".. description:\n\n"\
"Post content\n"
opener_mock = mock.mock_open(read_data=file_metadata)
post = dummy()
post.source_path = 'file_with_metadata'
post.metadata_path = 'file_with_metadata.meta'
with mock.patch('nikola.post.io.open', opener_mock, create=True):
meta = get_meta(post, None)[0]
self.assertEqual('Nikola needs more tests!', meta['title'])
self.assertEqual('write-tests-now', meta['slug'])
self.assertEqual('2012/09/15 19:52:05', meta['date'])
self.assertFalse('tags' in meta)
self.assertFalse('link' in meta)
self.assertFalse('description' in meta)
def test_get_title_from_fname(self):
file_metadata = ".. slug: write-tests-now\n"\
".. date: 2012/09/15 19:52:05\n"\
".. tags:\n"\
".. link:\n"\
".. description:\n"
opener_mock = mock.mock_open(read_data=file_metadata)
post = dummy()
post.source_path = 'file_with_metadata'
post.metadata_path = 'file_with_metadata.meta'
with mock.patch('nikola.post.io.open', opener_mock, create=True):
meta = get_meta(post, None)[0]
self.assertEqual('file_with_metadata', meta['title'])
self.assertEqual('write-tests-now', meta['slug'])
self.assertEqual('2012/09/15 19:52:05', meta['date'])
self.assertFalse('tags' in meta)
self.assertFalse('link' in meta)
self.assertFalse('description' in meta)
def test_use_filename_as_slug_fallback(self):
file_metadata = ".. title: Nikola needs more tests!\n"\
".. date: 2012/09/15 19:52:05\n"\
".. tags:\n"\
".. link:\n"\
".. description:\n\n"\
"Post content\n"
opener_mock = mock.mock_open(read_data=file_metadata)
post = dummy()
post.source_path = 'Slugify this'
post.metadata_path = 'Slugify this.meta'
with mock.patch('nikola.post.io.open', opener_mock, create=True):
meta = get_meta(post, None)[0]
self.assertEqual('Nikola needs more tests!', meta['title'])
self.assertEqual('slugify-this', meta['slug'])
self.assertEqual('2012/09/15 19:52:05', meta['date'])
self.assertFalse('tags' in meta)
self.assertFalse('link' in meta)
self.assertFalse('description' in meta)
def test_extracting_metadata_from_filename(self):
post = dummy()
post.source_path = '2013-01-23-the_slug-dub_dub_title.md'
post.metadata_path = '2013-01-23-the_slug-dub_dub_title.meta'
post.config['FILE_METADATA_REGEXP'] = r'(?P<date>\d{4}-\d{2}-\d{2})-(?P<slug>.*)-(?P<title>.*)\.md'
for unslugify, title in ((True, 'Dub dub title'), (False, 'dub_dub_title')):
post.config['FILE_METADATA_UNSLUGIFY_TITLES'] = unslugify
with mock.patch('nikola.post.io.open', create=True):
meta = get_meta(post, None)[0]
self.assertEqual(title, meta['title'])
self.assertEqual('the_slug', meta['slug'])
self.assertEqual('2013-01-23', meta['date'])
def test_get_meta_slug_only_from_filename(self):
post = dummy()
post.source_path = 'some/path/the_slug.md'
post.metadata_path = 'some/path/the_slug.meta'
with mock.patch('nikola.post.io.open', create=True):
meta = get_meta(post, None)[0]
self.assertEqual('the_slug', meta['slug'])
class HeaderDemotionTest(unittest.TestCase):
def demote_by_zero(self):
input_str = '''\
<h1>header 1</h1>
<h2>header 2</h2>
<h3>header 3</h3>
<h4>header 4</h4>
<h5>header 5</h5>
<h6>header 6</h6>
'''
expected_output = '''\
<h1>header 1</h1>
<h2>header 2</h2>
<h3>header 3</h3>
<h4>header 4</h4>
<h5>header 5</h5>
<h6>header 6</h6>
'''
doc = lxml.html.fromstring(input_str)
outdoc = lxml.html.fromstring(expected_output)
demote_headers(doc, 0)
self.assertEquals(lxml.html.tostring(outdoc), lxml.html.tostring(doc))
def demote_by_one(self):
input_str = '''\
<h1>header 1</h1>
<h2>header 2</h2>
<h3>header 3</h3>
<h4>header 4</h4>
<h5>header 5</h5>
<h6>header 6</h6>
'''
expected_output = '''\
<h2>header 1</h2>
<h3>header 2</h3>
<h4>header 3</h4>
<h5>header 4</h5>
<h6>header 5</h6>
<h6>header 6</h6>
'''
doc = lxml.html.fromstring(input_str)
outdoc = lxml.html.fromstring(expected_output)
demote_headers(doc, 1)
self.assertEquals(lxml.html.tostring(outdoc), lxml.html.tostring(doc))
def demote_by_two(self):
input_str = '''\
<h1>header 1</h1>
<h2>header 2</h2>
<h3>header 3</h3>
<h4>header 4</h4>
<h5>header 5</h5>
<h6>header 6</h6>
'''
expected_output = '''\
<h3>header 1</h3>
<h4>header 2</h4>
<h5>header 3</h5>
<h6>header 4</h6>
<h6>header 5</h6>
<h6>header 6</h6>
'''
doc = lxml.html.fromstring(input_str)
outdoc = lxml.html.fromstring(expected_output)
demote_headers(doc, 2)
self.assertEquals(lxml.html.tostring(outdoc), lxml.html.tostring(doc))
def demote_by_minus_one(self):
input_str = '''\
<h1>header 1</h1>
<h2>header 2</h2>
<h3>header 3</h3>
<h4>header 4</h4>
<h5>header 5</h5>
<h6>header 6</h6>
'''
expected_output = '''\
<h1>header 1</h1>
<h1>header 2</h1>
<h2>header 3</h2>
<h3>header 4</h3>
<h4>header 5</h4>
<h5>header 6</h5>
'''
doc = lxml.html.fromstring(input_str)
outdoc = lxml.html.fromstring(expected_output)
demote_headers(doc, -1)
self.assertEquals(lxml.html.tostring(outdoc), lxml.html.tostring(doc))
class TranslatableSettingsTest(unittest.TestCase):
"""Tests for translatable settings."""
def test_string_input(self):
"""Tests for string input."""
inp = 'Fancy Blog'
S = TranslatableSetting('S', inp, {'xx': ''})
S.default_lang = 'xx'
S.lang = 'xx'
try:
u = unicode(S)
except NameError: # Python 3
u = str(S)
cn = S() # no language specified
cr = S('xx') # real language specified
cf = S('zz') # fake language specified
self.assertEqual(inp, u)
self.assertEqual(inp, cn)
self.assertEqual(inp, cr)
self.assertEqual(inp, cf)
self.assertEqual(S.lang, 'xx')
self.assertEqual(S.default_lang, 'xx')
def test_dict_input(self):
"""Tests f |
SteffenDE/monitornjus-classic | admin/index.py | Python | mit | 15,077 | 0.022087 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
#
# Copyright (c) 2015 Steffen Deusch
# Licensed under the MIT license
# Beilage zu MonitorNjus, 14.09.2015 (Version 0.9.3)
import os
workingdir = os.path.dirname(os.path.realpath(__file__))
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
import cgi
import imp
modulesdir = workingdir+"/../modules"
common = imp.load_source("common", modulesdir+"/common.py")
colors = imp.load_source("colors", modulesdir+"/colors.py")
try:
if common.authentication:
auth = imp.load_source("auth", modulesdir+"/auth.py")
auth.me()
rows = int(common.getrows())
rowsone = rows + 1
def displaysets():
x = 1
out = ""
while x <= rows:
if unicode(x) in common.getallrows():
out += u"""\
<div class="col s12">
<h5 class="header center """+colors.color+"""-text">Displayset """+unicode(x)+"""</h5>
<div class="row">
<div class="col s6">
<div class="card white darken-1">
<div class="card-content white-text">
<span class="card-title """+colors.color+"""-text text-darken-2">Linke Seite</span><br>
<div class="row">
<div class="input-field col s6">
<input value=\""""+cgi.escape(unicode(common.testexist("URL", "Links", x)))+"""\" name="URL-Links-"""+unicode(x)+"""\" id="URL-Links-"""+unicode(x)+"""\" type="text">
<label for="URL-Links-"""+unicode(x)+"""\">URL Links</label>
</div>
<div class="input-field col s6">
<input value=\""""+cgi.escape(unicode(common.testexist("REFRESH", "Links", x)))+"""\" name="REFRESH-Links-"""+unicode(x)+"""\" id="REFRESH-Links-"""+unicode(x)+"""\" type="number">
<label for=" | REFRESH-Links-"""+unicode(x)+"""\">Refresh Links</label>
</div>
</div>
<div>
<input type="checkbox" n | ame="AKTIV-Links-"""+unicode(x)+"""\" id="AKTIV-Links-"""+unicode(x)+"""\" """+common.aktiv("AKTIV", "Links", x)+"""/>
<label for="AKTIV-Links-"""+unicode(x)+"""\">Links aktiviert</label>
<input type="hidden" value="0" name="HIDDEN.AKTIV-Links-"""+unicode(x)+"""\">
<input type="checkbox" name="REFRESHAKTIV-Links-"""+unicode(x)+"""\" id="REFRESHAKTIV-Links-"""+unicode(x)+"""\" """+common.aktiv("REFRESHAKTIV", "Links", x)+"""/>
<label for="REFRESHAKTIV-Links-"""+unicode(x)+"""\">Links neu laden</label>
<input type="hidden" value="0" name="HIDDEN.REFRESHAKTIV-Links-"""+unicode(x)+"""\">
</div>
<div class="row">
<div class="input-field col s4">
<input value=\""""+cgi.escape(unicode(common.getdate("uhrzeit", "Links", x)))+"""\" name="uhrzeit-Links-"""+unicode(x)+"""\" id="uhrzeit-Links-"""+unicode(x)+"""\" type="text">
<label for="uhrzeit-Links-"""+unicode(x)+"""\">Uhrzeit</label>
</div>
<div class="input-field col s4">
<input value=\""""+cgi.escape(unicode(common.getdate("wochentag", "Links", x)))+"""\" name="wochentag-Links-"""+unicode(x)+"""\" id="wochentag-Links-"""+unicode(x)+"""\" type="text">
<label for="wochentag-Links-"""+unicode(x)+"""\">Wochentag</label>
</div>
<div class="input-field col s2">
<input value=\""""+cgi.escape(unicode(common.getdate("tag", "Links", x)))+"""\" name="tag-Links-"""+unicode(x)+"""\" id="tag-Links-"""+unicode(x)+"""\" type="text">
<label for="tag-Links-"""+unicode(x)+"""\">Tag</label>
</div>
<div class="input-field col s2">
<input value=\""""+cgi.escape(unicode(common.getdate("monat", "Links", x)))+"""\" name="monat-Links-"""+unicode(x)+"""\" id="monat-Links-"""+unicode(x)+"""\" type="text">
<label for="monat-Links-"""+unicode(x)+"""\">Monat</label>
</div>
</div>
<div class="row">
<div class="input-field col s3">
<input value=\""""+cgi.escape(unicode(common.getinfo("MARGINLEFT","Links",x)))+"""\" name="MARGINLEFT-Links-"""+unicode(x)+"""\" id="MARGINLEFT-Links-"""+unicode(x)+"""\" type="text">
<label for="MARGINLEFT-Links-"""+unicode(x)+"""\">Rand-Links</label>
</div>
<div class="input-field col s3">
<input value=\""""+cgi.escape(unicode(common.getinfo("MARGINRIGHT","Links",x)))+"""\" name="MARGINRIGHT-Links-"""+unicode(x)+"""\" id="MARGINRIGHT-Links-"""+unicode(x)+"""\" type="text">
<label for="MARGINRIGHT-Links-"""+unicode(x)+"""\">Rand-Rechts</label>
</div>
<div class="input-field col s3">
<input value=\""""+cgi.escape(unicode(common.getinfo("MARGINTOP","Links",x)))+"""\" name="MARGINTOP-Links-"""+unicode(x)+"""\" id="MARGINTOP-Links-"""+unicode(x)+"""\" type="text">
<label for="MARGINTOP-Links-"""+unicode(x)+"""\">Rand-Oben</label>
</div>
<div class="input-field col s3">
<input value=\""""+cgi.escape(unicode(common.getinfo("MARGINBOTTOM","Links",x)))+"""\" name="MARGINBOTTOM-Links-"""+unicode(x)+"""\" id="MARGINBOTTOM-Links-"""+unicode(x)+"""\" type="text">
<label for="MARGINBOTTOM-Links-"""+unicode(x)+"""\">Rand-Unten</label>
</div>
</div>
</div>
</div>
</div>
<div class="col s6">
<div class="card white darken-1">
<div class="card-content white-text">
<span class="card-title """+colors.color+"""-text text-darken-2">Rechte Seite</span><br>
<div class="row">
<div class="input-field col s6">
<input value=\""""+cgi.escape(unicode(common.testexist("URL", "Rechts", x)))+"""\" name="URL-Rechts-"""+unicode(x)+"""\" id="URL-Rechts-"""+unicode(x)+"""\" type="text">
<label for="URL-Rechts-"""+unicode(x)+"""\">URL Rechts</label>
</div>
<div class="input-field col s6">
<input value=\""""+cgi.escape(unicode(common.testexist("REFRESH", "Rechts", x)))+"""\" name="REFRESH-Rechts-"""+unicode(x)+"""\" id="REFRESH-Rechts-"""+unicode(x)+"""\" type="number">
<label for="REFRESH-Rechts-"""+unicode(x)+"""\">Refresh Rechts</label>
</div>
</div>
<div>
<input type="checkbox" name="AKTIV-Rechts-"""+unicode(x)+"""\" id="AKTIV-Rechts-"""+unicode(x)+"""\" """+common.aktiv("AKTIV", "Rechts", x)+"""/>
<label for="AKTIV-Rechts-"""+unicode(x)+"""\">Rechts aktiviert</label>
<input type="hidden" value="0" name="HIDDEN.AKTIV-Rechts-"""+unicode(x)+"""\">
<input type="checkbox" name="REFRESHAKTIV-Rechts-"""+unicode(x)+"""\" id="REFRESHAKTIV-Rechts-"""+unicode(x)+"""\" """+common.aktiv("REFRESHAKTIV", "Rechts", x)+"""/>
<label for="REFRESHAKTIV-Rechts-"""+unicode(x)+"""\">Rechts neu laden</label>
<input type="hidden" value="0" name="HIDDEN.REFRESHAKTIV-Rechts-"""+unicode(x)+"""\">
</div>
<div class="row">
<div class="input-field col s4">
<input value=\""""+cgi.escape(unicode(common.getdate("uhrzeit", "Rechts", x)))+"""\" name="uhrzeit-Rechts-"""+unicode(x)+"""\" id="uhrzeit-Rechts-"""+unicode(x)+"""\" type="text">
<label for="uhrzeit-Rechts-"""+unicode(x)+"""\">Uhrzeit</label>
</div>
<div class="input-field col s4">
<input value=\""""+cgi.escape(unicode(common.getdate("wochentag", "Rechts", x)))+"""\" name="wochentag-Rechts-"""+unicode(x)+"""\" id="wochentag-Rechts-"""+unicode(x)+"""\" type="text">
<label for="wochentag-Rechts-"""+unicode(x)+"""\">Wochentag</label>
</div>
<div class="input-field col s2">
<input value=\""""+cgi.escape(unicode(common.getdate("tag", "Rechts", x)))+"""\" name="tag-Rechts-"""+unicode(x)+"""\" id="tag-Rechts-"""+unicode(x)+"""\" type="text">
<label for="tag-Rechts-"""+unicode(x)+"""\">Tag</label>
</div>
<div class="input-field col s2">
<input value=\""""+cgi.escape(unicode(common.getdate("monat", "Rechts", x)))+"""\" name="monat-Rechts-"""+unicode(x)+"""\" id="m |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.