repo_name stringlengths 5 100 | path stringlengths 4 231 | language stringclasses 1 value | license stringclasses 15 values | size int64 6 947k | score float64 0 0.34 | prefix stringlengths 0 8.16k | middle stringlengths 3 512 | suffix stringlengths 0 8.17k |
|---|---|---|---|---|---|---|---|---|
bmilde/ambientsearch | python/wiki_search_es.py | Python | apache-2.0 | 6,033 | 0.010774 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'Jonas Wacker, Benjamin Milde'
from elasticsearch import Elasticsearch
import nltk
import traceback
import nltk.data
wiki_index = 'simple_en_wiki'
default_type = 'page'
sent_detector = nltk.data.load('tokenizers/punkt/english.pickle')
# Currently using a public server. Configure this to your own server.
es = Elasticsearch([
{'host': 'wiki.machinelearning.online', 'port': 80}
#{'host': 'localhost', 'port': 9200}
], send_get_body_as='POST')
# Build a full ES query with filters (to remove redirect and special pages)
# and matching typical wiki fields (["text", "title", "category"]), for a simple query string
# e.g. es_full_query_with_wiki_filters(query="woman~ spiritual~ 'mutual aid'~ kind~ religion~ mormon~ mormonism~ religious~")
def es_full_query_with_wiki_filters(query,minimum_should_match_percent=25):
query = {
"query": {
"filtered": {
"query": {
"query_string": {
"fields": ["text", "title", "category"],
"query": query,
"minimum_should_match": str(minimum_should_match_percent)+"%",
"use_dis_max": "false",
"phrase_slop": "0"
}
},
"filter": {
"bool": {
"must": [
{"term": {
"special": "false"
}},
{"term": {
"redirect": "false"
}}
],
"must_not": [
{"query": {"match": {
"text": "#redirect"
}}}
]
}
}
}
}
}
return query
# Expects a set of keywords along with their scores (Tuples).
# Returns an es-compatible query string (see example query field above).
# Scores boost each keyword along with their scores, fuzziness also matches different spellings.
# Recommendation: Scores - yes, fuzziness - yes, Multiword - yes
def construct_query_string(keywords, scores=True, fuzziness=False, multiword=True, round_numbers=True):
keyword_strings = []
for keyword, score in keywords:
keyword = keyword.replace('_', ' ')
# Phrases need to be in quotation marks
if multiword and len(keyword.split()) > 1:
keyword = "\"" + keyword + "\""
# Fuzziness rather causes problems (~)
if fuzziness:
keyword += "~"
if scores:
keyword += "^" + ("%.5f" % score)
keyword_strings.append(keyword)
return " ".join(keyword_strings)
#Adapted from http://stackoverflow.com/questions/14596884/remove-text-between-and-in-python
def clean_wiki_brackets(text):
ret = ''
skip1c = 0
skip2c = 0
skip3c = 0
skip4c = 0
for i in text:
if i == '[':
skip1c += 1
elif i == '(':
skip2c += 1
elif i == '{':
skip3c += 1
elif i == '|':
skip4c += 1
elif i == ']' and skip1c > 0:
skip1c -= 1
elif i == ')'and sk | ip2c > 0:
skip2c -= 1
elif i == '}'and skip3c > 0:
skip3c -= 1
elif | i == '|'and skip4c > 0:
skip4c -= 1
elif skip1c == 0 and skip2c == 0 and skip3c == 0 and skip4c == 0:
ret += i
return ret
# Extracts the first n words from the given text.
def get_summary_from_text(text, n=50):
#print '-> fulltext:',text[:500]
text = clean_wiki_brackets(text)
# This is actually specific to stream2es, which sometimes returns broken wiki text.
# This is an attempt to fix it.
if ']]' in text:
text = ''.join(text.split(']]')[1:])
#print '-> fulltext cleaned:',text[:500]
sents = sent_detector.tokenize(text)
summary = ''
i = 0
while(len(summary) < n and i < len(sents)):
summary += sents[i] + ' '
i += 1
#print 'summary:', summary
return summary
# Expects a set of keywords along with their scores (Tuples).
# Extracts the n best scoring article results from elasticsearch. Use n=-1 if you want all articles returned.
def extract_best_articles(keywords, n=10, minimum_should_match_percent=25, min_summary_chars=50):
simple_query_string = construct_query_string(keywords)
print 'wiki search query:',simple_query_string
query = es_full_query_with_wiki_filters(simple_query_string,minimum_should_match_percent)
summary_box_infos = []
try:
results = es.search(index=wiki_index, doc_type=default_type, body=query)
except Exception as ex:
traceback.print_exc()
summary_box_info = [{'title': 'Elasticsearch Error',
'text': 'Unable to connect to elasticsearch server. Server running?',
'url': 'https://www.elastic.co/',
'categories': [],
'score': 10}]
return summary_box_info
for result in results['hits']['hits']:
title = result['_source']['title']
full_text = result['_source']['text']
categories = result['_source']['category']
score = result['_score']
summary = get_summary_from_text(full_text,min_summary_chars)
url = 'https://simple.wikipedia.org/w/index.php?title='+title.replace(' ', '_')
#print 'wiki search: found',title,'with score',score
summary_box_infos.append({
'title': title,
'text': summary,
'url': url,
'categories': categories,
'score': score
})
summary_box_infos = sorted(summary_box_infos, key=lambda x: x['score'], reverse=True)
if n != -1:
summary_box_infos = summary_box_infos[:n]
return summary_box_infos
if __name__ == "__main__":
print(extract_best_articles([("climate change",2.0), ("global warming",2.0)]))
|
Finntack/pootle | tests/pootle_fs/fs_response.py | Python | gpl-3.0 | 6,794 | 0 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) Pootle contributors.
#
# This file is a part of the Pootle project. It is distributed under the GPL3
# or later license. See the LICENSE file for a copy of the license and the
# AUTHORS file for copyright and authorship information.
import os
import pytest
from pootle.core.state import ItemState, State
from pootle_fs.models import StoreFS
from pootle_fs.response import (
FS_RESPONSE, ProjectFSItemResponse, ProjectFSResponse)
from pootle_project.models import Project
from pootle_store.models import Store
class DummyContext(object):
def __str__(self):
return "<DummyContext object>"
class DummyFSItemState(ItemState):
@property
def pootle_path(self):
if "pootle_path" in self.kwargs:
return self.kwargs["pootle_path"]
elif self.store_fs:
return self.store_fs.pootle_path
elif self.store:
return self.store.pootle_path
@property
def fs_path(self):
if "fs_path" in self.kwargs:
return self.kwargs["fs_path"]
elif self.store_fs:
return self.store_fs.path
@property
def store(self):
if "store" in self.kwargs:
return self.kwargs["store"]
elif self.store_fs:
return self.store_fs.store
@property
def store_fs(self):
return self.kwargs.get("store_fs")
class DummyFSState(State):
"""The pootle_fs State can create ItemStates with
- a store_fs (that has a store)
- a store_fs (that has no store)
- a store and an fs_path
- a pootle_path and an fs_path
"""
item_state_class = DummyFSItemState
def state_fs_staged(self, **kwargs):
for store_fs in kwargs.get("fs_staged", []):
yield dict(store_fs=store_fs)
def state_fs_ahead(self, **kwargs):
for store_fs in kwargs.get("fs_ahead", []):
yield dict(store_fs=store_fs)
def state_fs_untracked(self, **kwargs):
for fs_path, pootle_path in kwargs.get("fs_untracked", []):
yield dict(fs_path=fs_path, pootle_path=pootle_path)
def state_pootle_untracked(self, **kwargs):
for fs_path, store in kwargs.get("pootle_untracked", []):
yield dict(fs_path=fs_path, store=store)
@pytest.mark.django_db
def test_fs_response_instance():
context = DummyContext()
resp = ProjectFSResponse(context)
asse | rt resp.context == context
assert resp.response_types == FS_RESPONSE.keys()
assert resp.has_failed is False
assert resp.made_changes is False
assert list(resp.failed()) == []
assert list(resp. | completed()) == []
assert str(resp) == (
"<ProjectFSResponse(<DummyContext object>): No changes made>")
assert list(resp) == []
with pytest.raises(KeyError):
resp["DOES_NOT_EXIST"]
def _test_item(item, item_state):
assert isinstance(item, ProjectFSItemResponse)
assert item.kwargs["fs_state"] == item_state
assert item.fs_state == item_state
assert item.failed is False
assert item.fs_path == item.fs_state.fs_path
assert item.pootle_path == item.fs_state.pootle_path
assert item.store_fs == item.fs_state.store_fs
assert item.store == item.fs_state.store
assert (
str(item)
== ("<ProjectFSItemResponse(<DummyContext object>): %s "
"%s::%s>" % (item.action_type, item.pootle_path, item.fs_path)))
def _test_fs_response(expected=2, **kwargs):
action_type = kwargs.pop("action_type")
state_type = kwargs.pop("state_type")
resp = ProjectFSResponse(DummyContext())
state = DummyFSState(DummyContext(), **kwargs)
for fs_state in state[state_type]:
resp.add(action_type, fs_state=fs_state)
assert resp.has_failed is False
assert resp.made_changes is True
assert resp.response_types == FS_RESPONSE.keys()
assert len(list(resp.completed())) == 2
assert list(resp.failed()) == []
assert action_type in resp
assert str(resp) == (
"<ProjectFSResponse(<DummyContext object>): %s: %s>"
% (action_type, expected))
for i, item in enumerate(resp[action_type]):
_test_item(item, state[state_type][i])
@pytest.mark.django_db
def test_fs_response_path_items(settings, tmpdir):
pootle_fs_path = os.path.join(str(tmpdir), "fs_response_test")
settings.POOTLE_FS_PATH = pootle_fs_path
project = Project.objects.get(code="project0")
fs_untracked = []
for i in range(0, 2):
fs_untracked.append(
("/some/fs/fs_untracked_%s.po" % i,
"/language0/%s/fs_untracked_%s.po" % (project.code, i)))
_test_fs_response(
fs_untracked=fs_untracked,
action_type="fetched_from_fs",
state_type="fs_untracked")
@pytest.mark.django_db
def test_fs_response_store_items(settings, tmpdir):
pootle_fs_path = os.path.join(str(tmpdir), "fs_response_test")
settings.POOTLE_FS_PATH = pootle_fs_path
project = Project.objects.get(code="project0")
pootle_untracked = []
for i in range(0, 2):
pootle_untracked.append(
("/some/fs/pootle_untracked_%s.po" % i,
Store.objects.create_by_path(
"/language0/%s/pootle_untracked_%s.po" % (project.code, i))))
_test_fs_response(
pootle_untracked=pootle_untracked,
action_type="added_from_pootle",
state_type="pootle_untracked")
@pytest.mark.django_db
def test_fs_response_store_fs_items(settings, tmpdir):
pootle_fs_path = os.path.join(str(tmpdir), "fs_response_test")
settings.POOTLE_FS_PATH = pootle_fs_path
project = Project.objects.get(code="project0")
fs_ahead = []
for i in range(0, 2):
pootle_path = "/language0/%s/fs_ahead_%s.po" % (project.code, i)
fs_path = "/some/fs/fs_ahead_%s.po" % i
fs_ahead.append(
StoreFS.objects.create(
store=Store.objects.create_by_path(pootle_path),
path=fs_path))
_test_fs_response(
fs_ahead=fs_ahead,
action_type="pulled_to_pootle",
state_type="fs_ahead")
@pytest.mark.django_db
def test_fs_response_store_fs_no_store_items(settings, tmpdir):
pootle_fs_path = os.path.join(str(tmpdir), "fs_response_test")
settings.POOTLE_FS_PATH = pootle_fs_path
project = Project.objects.get(code="project0")
fs_staged = []
for i in range(0, 2):
pootle_path = "/language0/%s/fs_staged_%s.po" % (project.code, i)
fs_path = "/some/fs/fs_staged_%s.po" % i
fs_staged.append(
StoreFS.objects.create(
pootle_path=pootle_path,
path=fs_path))
_test_fs_response(
fs_staged=fs_staged,
action_type="pulled_to_pootle",
state_type="fs_staged")
|
collaj/MusicServer | scripts/test_script_delete.py | Python | agpl-3.0 | 404 | 0.00995 | import os
import time
import sys
FOLD | ERPATH = sys.argv[1]
#os.chdir(FOLDERPATH)
walk = os.walk(FOLDERPATH)
FSEVENT = "delete"
for item in walk:
FILEPATHPREFIX = item[0] + "\\"
for song in item[2]:
if song | .endswith(".mp3"):
FILEPATH = "%s%s" % (FILEPATHPREFIX, song)
os.system('python script.py "' + song + '" "' + FILEPATH + '" "' + FSEVENT + '"') |
proversity-org/problem-builder | problem_builder/step.py | Python | agpl-3.0 | 9,949 | 0.002714 | # -*- coding: utf-8 -*-
#
# Copyright (c) 2014-2015 Harvard, edX & OpenCraft
#
# This software's license gives you freedom; you can copy, convey,
# propagate, redistribute and/or modify this program under the terms of
# the GNU Affero General Public License (AGPL) as published by the Free
# Software Foundation (FSF), either version 3 of the License, or (at your
# option) any later version of the AGPL published by the FSF.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero
# General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program in a file in the toplevel directory called
# "AGPLv3". If not, see <http://www.gnu.org/licenses/>.
#
import logging
from lazy.lazy import lazy
from xblock.core import XBlock
from xblock.fields import String, List, Scope
from xblock.fragment import Fragment
from xblockutils.resources import ResourceLoader
from xblockutils.studio_editable import (
NestedXBlockSpec, StudioEditableXBlockMixin, StudioContainerWithNestedXBlocksMixin, XBlockWithPreviewMixin
)
from problem_builder.answer import AnswerBlock, AnswerRecapBlock
from problem_builder.mcq import MCQBlock, RatingBlock
from problem_builder.mixins import EnumerableChildMixin, StepParentMixin
from problem_builder.mrq import MRQBlock
from problem_builder.plot import PlotBlock
from problem_builder.slider import SliderBlock
from problem_builder.table import MentoringTableBlock
log = logging.getLogger(__name__)
loader = ResourceLoader(__name__)
# Make '_' a no-op so we can scrape strings
def _(text):
return text
def _normalize_id(key):
"""
Helper method to normalize a key to avoid issues where some keys have version/branch and others don't.
e.g. self.scope_ids.usage_id != self.runtime.get_block(self.scope_ids.usage_id).scope_ids.usage_id
"""
if hasattr(key, "for_branch"):
key = key.for_branch(None)
if hasattr(key, "for_version"):
key = key.for_version(None)
return key
class Correctness(object):
CORRECT = 'correct'
PARTIAL = 'partial'
INCORRECT = 'incorrect'
@XBlock.needs('i18n')
class MentoringStepBlock(
StudioEditableXBlockMixin, StudioContainerWithNestedXBlocksMixin, XBlockWithPreviewMixin,
EnumerableChildMixin, StepParentMixin, XBlock
):
"""
An XBlock for a step.
"""
CAPTION = _(u"Step")
STUDIO_LABEL = _(u"Mentoring Step")
CATEGORY = 'sb-step'
# Settings
display_name = String(
| display_name=_("Step Title"),
help=_('Leave blank to use sequential numbering'),
default="",
scope=Scope.content
)
# U | ser state
student_results = List(
# Store results of student choices.
default=[],
scope=Scope.user_state
)
next_button_label = String(
display_name=_("Next Button Label"),
help=_("Customize the text of the 'Next' button."),
default=_("Next Step")
)
message = String(
display_name=_("Message"),
help=_("Feedback or instructional message which pops up after submitting."),
)
editable_fields = ('display_name', 'show_title', 'next_button_label', 'message')
@lazy
def siblings(self):
return self.get_parent().step_ids
@property
def is_last_step(self):
parent = self.get_parent()
return self.step_number == len(parent.step_ids)
@property
def allowed_nested_blocks(self):
"""
Returns a list of allowed nested XBlocks. Each item can be either
* An XBlock class
* A NestedXBlockSpec
If XBlock class is used it is assumed that this XBlock is enabled and allows multiple instances.
NestedXBlockSpec allows explicitly setting disabled/enabled state, disabled reason (if any) and single/multiple
instances
"""
additional_blocks = []
try:
from xmodule.video_module.video_module import VideoDescriptor
additional_blocks.append(NestedXBlockSpec(
VideoDescriptor, category='video', label=_(u"Video")
))
except ImportError:
pass
try:
from imagemodal import ImageModal
additional_blocks.append(NestedXBlockSpec(
ImageModal, category='imagemodal', label=_(u"Image Modal")
))
except ImportError:
pass
return [
NestedXBlockSpec(AnswerBlock, boilerplate='studio_default'),
MCQBlock, RatingBlock, MRQBlock,
NestedXBlockSpec(None, category="html", label=self._("HTML")),
AnswerRecapBlock, MentoringTableBlock, PlotBlock, SliderBlock
] + additional_blocks
@property
def has_question(self):
return any(getattr(child, 'answerable', False) for child in self.steps)
def submit(self, submissions):
""" Handle a student submission. This is called by the parent XBlock. """
log.info(u'Received submissions: {}'.format(submissions))
# Submit child blocks (questions) and gather results
submit_results = []
for child in self.steps:
if child.name and child.name in submissions:
submission = submissions[child.name]
child_result = child.submit(submission)
submit_results.append([child.name, child_result])
child.save()
# Update results stored for this step
self.reset()
for result in submit_results:
self.student_results.append(result)
self.save()
return {
'message': 'Success!',
'step_status': self.answer_status,
'results': submit_results,
}
@XBlock.json_handler
def get_results(self, queries, suffix=''):
results = {}
answers = dict(self.student_results)
for question in self.steps:
previous_results = answers[question.name]
result = question.get_results(previous_results)
results[question.name] = result
# Add 'message' to results? Looks like it's not used on the client ...
return {
'results': results,
'step_status': self.answer_status,
}
def reset(self):
while self.student_results:
self.student_results.pop()
@property
def answer_status(self):
if all(result[1]['status'] == 'correct' for result in self.student_results):
answer_status = Correctness.CORRECT
elif all(result[1]['status'] == 'incorrect' for result in self.student_results):
answer_status = Correctness.INCORRECT
else:
answer_status = Correctness.PARTIAL
return answer_status
def author_edit_view(self, context):
"""
Add some HTML to the author view that allows authors to add child blocks.
"""
local_context = dict(context)
local_context['author_edit_view'] = True
fragment = super(MentoringStepBlock, self).author_edit_view(local_context)
fragment.add_css_url(self.runtime.local_resource_url(self, 'public/css/problem-builder.css'))
fragment.add_css_url(self.runtime.local_resource_url(self, 'public/css/problem-builder-edit.css'))
fragment.add_css_url(self.runtime.local_resource_url(self, 'public/css/problem-builder-tinymce-content.css'))
fragment.add_javascript_url(self.runtime.local_resource_url(self, 'public/js/util.js'))
fragment.add_javascript_url(self.runtime.local_resource_url(self, 'public/js/container_edit.js'))
fragment.initialize_js('ProblemBuilderContainerEdit')
return fragment
def mentoring_view(self, context=None):
""" Mentoring View """
return self._render_view(context, 'mentoring_view')
def _render_view(self, context, view):
""" Actually renders a view """
rendering_for_studio = False
if context: # Workbench does not provide context |
AleksNeStu/ggrc-core | test/unit/ggrc_workflows/models/test_workflow.py | Python | apache-2.0 | 2,585 | 0.002321 | # Copyright (C) 2017 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""Unit Tests for Workflow model and WorkflowState mixin
"""
from datetime import date
import unittest
from freezegun import freeze_time
from ggrc_workflows.models import cycle_task_group_object_task as cycle_task
from ggrc_workflows.models import cycle
from ggrc_workflows.models import workflow
class TestWorkflowState(unittest.TestCase):
def test_get_state(self):
scenario_list = [
{
"task_states": ["Assigned", "Assigned", "Assigned"],
"result": "Assigned"
},
{
"task_states": ["InProgress", "Assigned", "Assigned"],
"result": "InProgress"
},
{
"task_states": ["Finished", "Assigned", "Assigned"],
"result": "InProgress"
},
{
"tas | k_states": ["Verified", "Assigned", "Assigned"],
"result": "InProgress"
},
{
"task_states": ["InProgress", "InProgress", "InProgress"],
"result": "InProgress"
},
{
"task_states": ["Finished", "InProgress", "Assigned"],
"result": "InProgress"
} | ,
{
"task_states": ["Finished", "Declined", "Assigned"],
"result": "InProgress"
},
{
"task_states": ["Finished", "Finished", "Finished"],
"result": "Finished"
},
{
"task_states": ["Verified", "Finished", "Finished"],
"result": "Finished"
},
{
"task_states": ["Verified", "Verified", "Verified"],
"result": "Verified"
},
]
for scenario in scenario_list:
tasks_on_object = []
for task_status in scenario["task_states"]:
tasks_on_object.append(
cycle_task.CycleTaskGroupObjectTask(status=task_status),
)
self.assertEqual(scenario["result"], workflow
.WorkflowState._get_state(tasks_on_object))
def test_get_object_state(self):
tasks_on_object = [
cycle_task.CycleTaskGroupObjectTask(
end_date=date(2015, 2, 1),
cycle=cycle.Cycle(is_current=True)
),
cycle_task.CycleTaskGroupObjectTask(
end_date=date(2015, 1, 10),
cycle=cycle.Cycle(is_current=True)
),
]
with freeze_time("2015-02-01 13:39:20"):
self.assertEqual("Overdue", workflow
.WorkflowState.get_object_state(tasks_on_object))
|
rimbalinux/MSISDNArea | docutils/parsers/rst/directives/body.py | Python | bsd-3-clause | 5,963 | 0 | # $Id: body.py 5618 2008-07-28 08:37:32Z strank $
# Author: David Goodger <goodger@python.org>
# Copyright: This module has been placed in the public domain.
"""
Directives for additional body elements.
See `docutils.parsers.rst.directives` for API details.
"""
__docformat__ = 'reStructuredText'
import sys
from docutils import nodes
from docutils.parsers.rst import Directive
from docutils.parsers.rst import directives
from docutils.parsers.rst.roles import set_classes
class BasePseudoSection(Directive):
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = True
option_spec = {'class': directives.class_option}
has_content = True
node_class = None
"""Node class to be used (must be set in subclasses)."""
def run(self):
if not (self.state_machine.match_titles
or isinstance(self.state_machine.node, nodes.sidebar)):
raise self.error('The "%s" directive may not be used within '
'topics or body elements.' % self.name)
self.assert_has_content()
title_text = self.arguments[0]
textnodes, messages = self.state.inline_text(title_text, self.lineno)
titles = [nodes.title(title_text, '', *textnodes)]
# Sidebar uses this code.
if 'subtitle' in self.options:
textnodes, more_messages = self.state.inline_text(
self.options['subtitle'], self.lineno)
titles.append(nodes.subtitle(self.options['subtitle'], '',
*textnodes))
messages.extend(more_messages)
text = '\n'.join(self.content)
node = self.node_class(text, *(titles + messages))
node['classes'] += self.options.get('class', [])
if text:
self.state.nested_parse(self.content, self.content_offset, node)
return [node]
class Topic(BasePseudoSection):
node_class = nodes.topic
class Sidebar(BasePseudoSection):
node_class = nodes.sidebar
option_spec = BasePseudoSection.option_spec.copy()
option_spec['subtitle'] = directives.unchanged_required
def run(self):
if isinstance(self.state_machine.node, nodes.sidebar):
raise self.error('The "%s" directive may not be used within a '
'sidebar element.' % self.name)
return BasePseudoSection.run(self)
class LineBlock(Directive):
option_spec = {'class': directives.class_option}
has_content = True
def run(self):
self.assert_has_content()
block = nodes.line_block(classes=self.options.get('class', []))
node_list = [block]
for line_text in self.content:
text_nodes, messages = self.state.inline_text(
line_text.strip(), self.lineno + self.content_offset)
line = nodes.line(line_text, '', *text_nodes)
if line_text.strip():
line.indent = len(line_text) - len(line_text.lstrip())
block += line
node_list.extend(messages)
self.content_offset += 1
self.state.nest_line_block_lines(block)
return node_list
class ParsedLiteral(Directive):
option_spec = {'class': directives.class_option}
has_content = True
def run(self):
set_classes(self.options)
self.assert_has_content()
text = '\n'.join(self.content)
text_nodes, messages = self.state.inline_text(text, self.lineno)
node = nodes.literal_block(text, '', *text_nodes, **self.options)
node.line = self.content_offset + 1
return [node] + messages
class Rubric(Directive):
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = True
option_spec = {'class': directives.class_option}
def run(self):
set_classes(self.options)
rubric_text = self.arguments[0]
textnodes, messages = self.state.inline_text(rubric_text, self.lineno)
rubric = nodes.rubric(rubric_text, '', *textnodes, **self.options)
return [rubric] + messages
class BlockQuote(Directive):
has_content = True
classes = []
def run(self):
self.assert_has_content()
elements = self.state.block_quote(self.content, self.content_offset)
for element i | n elements:
if isinstance(element, nodes.block_quote):
element['classes'] += self.classes
return elements
class Epigraph(BlockQuote):
classes = ['epigraph']
class Highlights(BlockQuote):
classes = ['highlights']
class PullQuote(BlockQuote):
classes = ['pull-quote']
class Compound(Directive):
option_spec = {'class': directives.class_option}
has_content = True
def run(self):
self.assert | _has_content()
text = '\n'.join(self.content)
node = nodes.compound(text)
node['classes'] += self.options.get('class', [])
self.state.nested_parse(self.content, self.content_offset, node)
return [node]
class Container(Directive):
required_arguments = 0
optional_arguments = 1
final_argument_whitespace = True
has_content = True
def run(self):
self.assert_has_content()
text = '\n'.join(self.content)
try:
if self.arguments:
classes = directives.class_option(self.arguments[0])
else:
classes = []
except ValueError:
raise self.error(
'Invalid class attribute value for "%s" directive: "%s".'
% (self.name, self.arguments[0]))
node = nodes.container(text)
node['classes'].extend(classes)
self.state.nested_parse(self.content, self.content_offset, node)
return [node]
|
asposecells/Aspose_Cells_Java | Plugins/Aspose-Cells-Java-for-Python/tests/WorkingWithFiles/Excel2PdfConversion/Excel2PdfConversion.py | Python | mit | 668 | 0.007485 | # To change t | his license header, choose License Headers in Project Properties.
# To change this template file, choose Tools | Templates
# and open the template in the editor.
#if __name__ == "__main__":
# print "Hello World"
from WorkingWithFiles import Excel2PdfConversion
import jpype
import os.path
asposeapispath = os.path.join(os.path.abspath("../../../"), "lib/")
dataDir = os.path.join(os.path.abspath("./"), "dat | a/")
print "You need to put your Aspose.Cells for Java APIs .jars in this folder:\n"+asposeapispath
#print dataDir
jpype.startJVM(jpype.getDefaultJVMPath(), "-Djava.ext.dirs=%s" % asposeapispath)
hw = Excel2PdfConversion(dataDir)
hw.main() |
MSLNZ/msl-equipment | msl/examples/equipment/picotech/picoscope/open_unit_async.py | Python | mit | 1,130 | 0.002655 | """
This example opens the connection in async mode (does not work properly in Python 2.7).
"""
import os
import time
from msl.equipment import (
EquipmentRecord,
ConnectionRecord,
Backend,
)
record = EquipmentRecord(
manufacturer='Pico Technology',
model='5244B', # update for your PicoScope
serial='DY135/055', # update for your PicoScope
connection=ConnectionRecord(
backend=Backend.MSL,
address='SDK::ps5000a.dll', # update for your PicoScope
p | roperties={'open_async': True}, # opening in async mode is done in the properties
)
)
# optional: ensure that the PicoTech DLLs are available on PATH
os.environ['PATH'] += os.pathsep + r'C:\Program Files\Pico Technology\SDK\lib'
t0 = time.time()
scope = record.connect()
while True:
now = time.time()
progress = scope.open_unit_progress()
print('Progress: {}%'.format(progress))
if progress == 100 | :
break
time.sleep(0.02)
print('Took {:.2f} seconds to establish a connection to the PicoScope'.format(time.time()-t0))
# flash the LED light for 5 seconds
scope.flash_led(-1)
time.sleep(5)
|
arriqaaq/hackerrank | Algo- Warmup/cavity-rank.py | Python | apache-2.0 | 508 | 0.037402 | def cavity(l,n):
for i in xrange(1,n-1):
for j in xrange(1,n-1):
if l[i-1][j]!='X' and l[i][j-1]!='X' and l[i+1][j]!='X' and l[i][j+1]!='X' and l[i][j]>l[i-1][j] and l[i][j]>l[i+1][j] and l[i][j]>l[i][j-1] and l[i][j]>l[i][j+1]:
l[i][j]='X'
if __name__ == '__main__':
n = input()
p = []
for _ in xrange(n):
| line = list(raw_input())
p.append(line)
|
cavity(p, n)
for line in p:
print ''.join(line)
|
with-git/tensorflow | tensorflow/python/kernel_tests/variables_test.py | Python | apache-2.0 | 25,860 | 0.014811 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import operator
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_state_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import gradient_descent
from tensorflow.python.util import compat
class VariablesTestCase(test.TestCase):
def testInitialization(self):
with self.test_session():
var0 = variables.Variable(0.0)
self.assertEqual("Variable:0", var0.name)
self.assertEqual([], var0.get_shape())
self.assertEqual([], var0.get_shape())
self.assertEqual([], var0.shape)
var1 = variables.Variable(1.1)
self.assertEqual("Variable_1:0", var1.name)
self.assertEqual([], var1.get_shape())
self.assertEqual([], var1.get_shape())
self.assertEqual([], var1.shape)
with self.assertRaisesOpError("Attempting to use uninitialized value"):
var0.eval()
with self.assertRaisesOpError("Attempting to use uninitialized value"):
var1.eval()
variables.global_variables_initializer().run()
self.assertAllClose(0.0, var0.eval())
self.assertAllClose | (1.1, var1.eval())
def testInitializationOrder(self):
with self.test_session():
rnd = variables.Variable(random_ops.random_uniform([3, 6]), name="rnd")
self.assertEqual("rnd:0", rnd.name)
self.assertEqual([ | 3, 6], rnd.get_shape())
self.assertEqual([3, 6], rnd.get_shape())
self.assertEqual([3, 6], rnd.shape)
dep = variables.Variable(rnd.initialized_value(), name="dep")
self.assertEqual("dep:0", dep.name)
self.assertEqual([3, 6], dep.get_shape())
self.assertEqual([3, 6], dep.get_shape())
self.assertEqual([3, 6], dep.shape)
# Currently have to set the shape manually for Add.
added_val = rnd.initialized_value() + dep.initialized_value() + 2.0
added_val.set_shape(rnd.get_shape())
depdep = variables.Variable(added_val, name="depdep")
self.assertEqual("depdep:0", depdep.name)
self.assertEqual([3, 6], depdep.get_shape())
self.assertEqual([3, 6], depdep.get_shape())
self.assertEqual([3, 6], depdep.shape)
variables.global_variables_initializer().run()
self.assertAllClose(rnd.eval(), dep.eval())
self.assertAllClose(rnd.eval() + dep.eval() + 2.0, depdep.eval())
def testIterable(self):
with self.assertRaisesRegexp(TypeError, "not iterable"):
for _ in variables.Variable(0.0):
pass
with self.assertRaisesRegexp(TypeError, "not iterable"):
for _ in variables.Variable([0.0, 1.0]):
pass
def testAssignments(self):
with self.test_session():
var = variables.Variable(0.0)
plus_one = var.assign_add(1.0)
minus_one = var.assign_sub(2.0)
four = var.assign(4.0)
variables.global_variables_initializer().run()
self.assertAllClose(0.0, var.eval())
self.assertAllClose(1.0, plus_one.eval())
self.assertAllClose(1.0, var.eval())
self.assertAllClose(-1.0, minus_one.eval())
self.assertAllClose(-1.0, var.eval())
self.assertAllClose(4.0, four.eval())
self.assertAllClose(4.0, var.eval())
def testResourceAssignments(self):
with self.test_session(use_gpu=True):
var = resource_variable_ops.ResourceVariable(0.0)
plus_one = var.assign_add(1.0)
minus_one = var.assign_sub(2.0)
four = var.assign(4.0)
variables.global_variables_initializer().run()
self.assertAllClose(0.0, var.eval())
plus_one.eval()
self.assertAllClose(1.0, var.eval())
minus_one.eval()
self.assertAllClose(-1.0, var.eval())
four.eval()
self.assertAllClose(4.0, var.eval())
def testZeroSizeStringAssign(self):
with self.test_session() as sess:
array = variables.Variable(
initial_value=array_ops.zeros((0,), dtype=dtypes.string),
name="foo",
trainable=False,
collections=[ops.GraphKeys.LOCAL_VARIABLES])
sess.run(variables.local_variables_initializer())
old_value = array.value()
copy_op = array.assign(old_value)
self.assertEqual([], list(sess.run(copy_op)))
def _countUpToTest(self, dtype):
with self.test_session():
zero = constant_op.constant(0, dtype=dtype)
var = variables.Variable(zero)
count_up_to = var.count_up_to(3)
variables.global_variables_initializer().run()
self.assertEqual(0, var.eval())
self.assertEqual(0, count_up_to.eval())
self.assertEqual(1, var.eval())
self.assertEqual(1, count_up_to.eval())
self.assertEqual(2, var.eval())
self.assertEqual(2, count_up_to.eval())
self.assertEqual(3, var.eval())
with self.assertRaisesOpError("Reached limit of 3"):
count_up_to.eval()
self.assertEqual(3, var.eval())
with self.assertRaisesOpError("Reached limit of 3"):
count_up_to.eval()
self.assertEqual(3, var.eval())
def testCountUpToInt32(self):
self._countUpToTest(dtypes.int32)
def testCountUpToInt64(self):
self._countUpToTest(dtypes.int64)
def testControlDepsNone(self):
with self.test_session():
c = constant_op.constant(1.0)
with ops.control_dependencies([c]):
# d get the control dep.
d = constant_op.constant(2.0)
# variables do not.
var_x = variables.Variable(2.0)
self.assertEqual([c.op], d.op.control_inputs)
self.assertEqual([], var_x.initializer.control_inputs)
self.assertEqual([], var_x.value().op.control_inputs)
self.assertEqual([], var_x._ref().op.control_inputs) # pylint: disable=protected-access
def testControlFlow(self):
with self.test_session() as sess:
v0 = variables.Variable(0, name="v0")
var_dict = {}
# Call get_variable in each of the cond clauses.
def var_in_then_clause():
v1 = variables.Variable(1, name="v1")
var_dict["v1"] = v1
return v1 + v0
def var_in_else_clause():
v2 = variables.Variable(2, name="v2")
var_dict["v2"] = v2
return v2 + v0
add = control_flow_ops.cond(
math_ops.less(v0, 10), var_in_then_clause, var_in_else_clause)
v1 = var_dict["v1"]
v2 = var_dict["v2"]
# We should be able to initialize and run v1 and v2 without initializing
# v0, even if the variable was created with a control dep on v0.
sess.run(v1.initializer)
self.assertEqual([1], sess.run(v1))
sess.run(v2.initializer)
self.assertEqual([2], sess.run(v2))
# v0 should still be uninitialized.
with self.assertRaisesRegexp(errors_impl.OpError, "uninitialized"):
sess.run(v0)
# We should not be able to run 'add' yet.
with self.assertRaisesRegexp(errors_impl.OpError, "uninitialized"):
sess.run(add)
# If we initialize v0 we should be able to run 'add'.
sess.run(v0.initializer)
s |
glennyonemitsu/funkybomb | funkybomb/node.py | Python | apache-2.0 | 5,062 | 0 | from copy import deepcopy
from funkybomb.exceptions import ChildNodeError
class Node:
"""
The building block of tree based templating.
"""
def __init__(self):
"""
Initialize the Node.
"""
self._children_ = []
def __getattr__(self, key):
if key in self.__dict__:
return self.__dict__[key]
return super().__getattr__(key)
def __setattr__(self, key, value):
if key.startswith('_') and key.endswith('_'):
self.__dict__[key] = value
return
def __repr__(self):
"""
String representation of the Node.
"""
return '<BaseNode>'
def __add__(self, *nodes):
"""
Append nodes from the += notation.
"""
cn = deepcopy(self)
cn._append_(*nodes)
return cn
def __iadd__(self, *nodes):
if len(nodes) == 1 and type(nodes[0]) == tuple:
nodes = nodes[0]
s | elf._append_(*nodes)
return self
def _wash_nodes_(self, *nodes):
for node in nodes:
yield node
def _append_(self, *nodes):
if len(nodes) == 1 and type(nodes[0]) is list:
nodes = nodes[0]
self._children_.extend(list(self._wash_nodes_(*nodes)))
class Renderable(Node):
def __repr__(self):
return '<RenderableNode>'
def __init__(self, **attrs):
super().__init__()
| self._tag_ = None
self._attrs_ = attrs
def __getattr__(self, key):
# required for a bunch of magic methods quirks like with deepcopy()
if key.startswith('__') and key.endswith('__'):
return super().__getattr__(key)
if hide_attribute(key):
return None
if key == '_opener_':
return self._make_opener_()
if key == '_closer_':
return self._make_closer_()
n = Tag(key)
self._append_(n)
return n
def __call__(self, *args, **attrs):
self._append_(*args)
self._attrs_.update(attrs)
return self
def _make_closer_(self):
if self._tag_ is None or self._void_:
return ''
return '</{tag}>'.format(tag=self._tag_)
def _make_opener_(self):
if self._tag_ is None:
return ''
if not self._attrs_:
return '<{tag}>'.format(tag=self._tag_)
attrs = build_attrs(self._attrs_)
return '<{tag} {attrs}>'.format(tag=self._tag_, attrs=attrs)
def _wash_nodes_(self, *nodes):
text_types = {str, int, float}
for node in nodes:
if type(node) in text_types:
yield Text(node)
else:
yield node
class Template(Renderable):
def __init__(self, name=None):
super().__init__()
self._name_ = name
def __repr__(self):
if self._name_:
return '<TemplateNode[{name}]>'.format(name=self._name_)
else:
return '<AnonymousTemplateNode>'
class Tag(Renderable):
# html 5 tag categories according to
# https://www.w3.org/TR/html5/syntax.html#void-elements
_void_tags_ = {
'area', 'base', 'br', 'col', 'embed', 'hr', 'img', 'input', 'keygen',
'link', 'meta', 'param', 'source', 'track', 'wbr'
}
_raw_text_tags_ = {'script', 'style'}
def __init__(self, tag=None, *args, **kwargs):
super().__init__(*args, **kwargs)
self._tag_ = tag
self._void_ = self._tag_ in self._void_tags_
self._raw_text_ = self._tag_ in self._raw_text_tags_
def __repr__(self):
return '<TagNode[{tag}]>'.format(tag=self._tag_)
def _wash_nodes_(self, *nodes):
if self._void_ and nodes:
raise ChildNodeError()
elif self._raw_text_:
text_types = {str, int, float}
for node in nodes:
if type(node) in text_types:
yield Text(node)
elif type(node) is Text:
yield node
else:
raise ChildNodeError()
else:
for node in super()._wash_nodes_(*nodes):
yield node
class Text(Renderable):
def __init__(self, content=''):
super().__init__()
self._content_ = str(content)
self._opener_ = str(content)
def __repr__(self):
return '<TextNode["{text}"]>'.format(text=self._content_)
def build_attrs(attrs):
pairs = []
for key, value in attrs.items():
if key == '_class':
key = 'class'
pairs.append((key, value))
attrs = ' '.join(
'{key}="{value}"'.format(key=key, value=value)
for key, value in pairs)
return attrs
def hide_attribute(key):
if is_ipython():
if key.startswith('_ipython_'):
return True
if key.startswith('_repr_'):
return True
return False
def is_ipython():
try:
__IPYTHON__
except NameError:
return False
else:
return True
|
mick-d/nipype_source | tools/checkspecs.py | Python | bsd-3-clause | 17,253 | 0.003362 | # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""Attempt to check each interface in nipype
"""
# Stdlib imports
import inspect
import os
import re
import sys
import tempfile
import warnings
from nipype.interfaces.base import BaseInterface
# Functions and classes
class InterfaceChecker(object):
"""Class for checking all interface specifications
"""
def __init__(self,
package_name,
package_skip_patterns=None,
module_skip_patterns=None,
class_skip_patterns=None
):
''' Initialize package for parsing
Parameters
----------
package_name : string
Name of the top-level package. *package_name* must be the
name of an importable package
package_skip_patterns : | None or sequence of {strings, regexps}
Sequence of strings giving URIs of packages to be excluded
Operates on the package path, starting at (including) the
first dot in the package path, after *package_name* - so,
if *package_name* is ``sphinx``, then ``sphinx.util`` will
result in ``.util`` being passed for earching by these
regexps. If is None, gives default. Default is:
| ['\.tests$']
module_skip_patterns : None or sequence
Sequence of strings giving URIs of modules to be excluded
Operates on the module name including preceding URI path,
back to the first dot after *package_name*. For example
``sphinx.util.console`` results in the string to search of
``.util.console``
If is None, gives default. Default is:
['\.setup$', '\._']
class_skip_patterns : None or sequence
Sequence of strings giving classes to be excluded
Default is: None
'''
if package_skip_patterns is None:
package_skip_patterns = ['\\.tests$']
if module_skip_patterns is None:
module_skip_patterns = ['\\.setup$', '\\._']
if class_skip_patterns:
self.class_skip_patterns = class_skip_patterns
else:
self.class_skip_patterns = []
self.package_name = package_name
self.package_skip_patterns = package_skip_patterns
self.module_skip_patterns = module_skip_patterns
def get_package_name(self):
return self._package_name
def set_package_name(self, package_name):
"""Set package_name"""
# It's also possible to imagine caching the module parsing here
self._package_name = package_name
self.root_module = __import__(package_name)
self.root_path = self.root_module.__path__[0]
package_name = property(get_package_name, set_package_name, None,
'get/set package_name')
def _get_object_name(self, line):
name = line.split()[1].split('(')[0].strip()
# in case we have classes which are not derived from object
# ie. old style classes
return name.rstrip(':')
def _uri2path(self, uri):
"""Convert uri to absolute filepath
Parameters
----------
uri : string
URI of python module to return path for
Returns
-------
path : None or string
Returns None if there is no valid path for this URI
Otherwise returns absolute file system path for URI
"""
if uri == self.package_name:
return os.path.join(self.root_path, '__init__.py')
path = uri.replace('.', os.path.sep)
path = path.replace(self.package_name + os.path.sep, '')
path = os.path.join(self.root_path, path)
# XXX maybe check for extensions as well?
if os.path.exists(path + '.py'): # file
path += '.py'
elif os.path.exists(os.path.join(path, '__init__.py')):
path = os.path.join(path, '__init__.py')
else:
return None
return path
def _path2uri(self, dirpath):
''' Convert directory path to uri '''
relpath = dirpath.replace(self.root_path, self.package_name)
if relpath.startswith(os.path.sep):
relpath = relpath[1:]
return relpath.replace(os.path.sep, '.')
def _parse_module(self, uri):
''' Parse module defined in *uri* '''
filename = self._uri2path(uri)
if filename is None:
# nothing that we could handle here.
return ([],[])
f = open(filename, 'rt')
functions, classes = self._parse_lines(f, uri)
f.close()
return functions, classes
def _parse_lines(self, linesource, module):
''' Parse lines of text for functions and classes '''
functions = []
classes = []
for line in linesource:
if line.startswith('def ') and line.count('('):
# exclude private stuff
name = self._get_object_name(line)
if not name.startswith('_'):
functions.append(name)
elif line.startswith('class '):
# exclude private stuff
name = self._get_object_name(line)
if not name.startswith('_') and \
self._survives_exclude('.'.join((module, name)),
'class'):
classes.append(name)
else:
pass
functions.sort()
classes.sort()
return functions, classes
def test_specs(self, uri):
"""Check input and output specs in an uri
Parameters
----------
uri : string
python location of module - e.g 'sphinx.builder'
Returns
-------
"""
# get the names of all classes and functions
_, classes = self._parse_module(uri)
if not classes:
#print 'WARNING: Empty -',uri # dbg
return None
# Make a shorter version of the uri that omits the package name for
# titles
uri_short = re.sub(r'^%s\.' % self.package_name, '', uri)
allowed_keys = ['desc', 'genfile', 'xor', 'requires', 'desc',
'nohash', 'argstr', 'position', 'mandatory',
'copyfile', 'usedefault', 'sep', 'hash_files',
'deprecated', 'new_name', 'min_ver', 'max_ver',
'name_source', 'name_template', 'keep_extension',
'units', 'output_name']
in_built = ['type', 'copy', 'parent', 'instance_handler',
'comparison_mode', 'array', 'default', 'editor']
bad_specs = []
for c in classes:
__import__(uri)
try:
with warnings.catch_warnings():
warnings.simplefilter("ignore")
classinst = sys.modules[uri].__dict__[c]
except Exception as inst:
continue
if not issubclass(classinst, BaseInterface):
continue
testdir = os.path.join(*(uri.split('.')[:-1] + ['tests']))
if not os.path.exists(testdir):
os.makedirs(testdir)
nonautotest = os.path.join(testdir, 'test_%s.py' % c)
testfile = os.path.join(testdir, 'test_auto_%s.py' % c)
if os.path.exists(testfile):
os.unlink(testfile)
if not os.path.exists(nonautotest):
with open(testfile, 'wt') as fp:
cmd = ['# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT',
'from nipype.testing import assert_equal',
'from %s import %s' % (uri, c),
'',
'def test_%s_inputs():' % c]
input_fields = ''
for traitname, trait in sorted(classinst.input_spec().traits(transient=None).items()):
input_fields += '%s=dict(' % traitname
|
SMALLplayer/smallplayer-image-creator | storage/.xbmc/addons/script.module.urlresolver/lib/urlresolver/plugins/daclips.py | Python | gpl-2.0 | 3,494 | 0.005724 | """
urlresolver XBMC Addon
Copyright (C) 2011 t0mm0
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. | See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from t0mm0.common.net import Net
from urlresolver.plugnplay.interfaces import UrlResolver
from urlresolver.plugnplay.interfaces import PluginSettings
from urlresolver.plugnplay import Plugin
import urllib2, os, re
from urlresolver import common
#SET ERROR_LOGO# THANKS TO | VOINAGE, BSTRDMKR, ELDORADO
error_logo = os.path.join(common.addon_path, 'resources', 'images', 'redx.png')
class DaclipsResolver(Plugin, UrlResolver, PluginSettings):
implements = [UrlResolver, PluginSettings]
name = "daclips"
def __init__(self):
p = self.get_setting('priority') or 100
self.priority = int(p)
self.net = Net()
#e.g. http://daclips.com/vb80o1esx2eb
self.pattern = 'http://((?:www.)?daclips.(?:in|com))/([0-9a-zA-Z]+)'
def get_media_url(self, host, media_id):
web_url = self.get_url(host, media_id)
""" Human Verification """
try:
resp = self.net.http_GET(web_url)
html = resp.content
r = re.findall(r'<span class="t" id="head_title">404 - File Not Found</span>',html)
if r:
raise Exception ('File Not Found or removed')
post_url = resp.get_url()
form_values = {}
for i in re.finditer('<input type="hidden" name="(.+?)" value="(.+?)">', html):
form_values[i.group(1)] = i.group(2)
html = self.net.http_POST(post_url, form_data=form_values).content
r = re.search('file: "http(.+?)"', html)
if r:
return "http" + r.group(1)
else:
raise Exception ('Unable to resolve Daclips link')
except urllib2.URLError, e:
common.addon.log_error('daclips: got http error %d fetching %s' %
(e.code, web_url))
common.addon.show_small_popup('Error','Http error: '+str(e), 5000, error_logo)
return self.unresolvable(code=3, msg=e)
except Exception, e:
common.addon.log_error('**** Daclips Error occured: %s' % e)
common.addon.show_small_popup(title='[B][COLOR white]DACLIPS[/COLOR][/B]', msg='[COLOR red]%s[/COLOR]' % e, delay=5000, image=error_logo)
return self.unresolvable(code=0, msg=e)
def get_url(self, host, media_id):
#return 'http://(daclips|daclips).(in|com)/%s' % (media_id)
return 'http://daclips.in/%s' % (media_id)
def get_host_and_id(self, url):
r = re.search(self.pattern, url)
if r:
return r.groups()
else:
return False
def valid_url(self, url, host):
if self.get_setting('enabled') == 'false': return False
return re.match(self.pattern, url) or self.name in host
|
Pinecast/pinecast | assets/admin.py | Python | apache-2.0 | 1,531 | 0.003919 | from django.contrib import admin
from django.utils.safestring import mark_safe
from .models import Asset
from dashboard.models import AssetImportRequest
class AssetImportRequestInline(admin.TabularInline):
model = AssetImportRequest
fk_name = 'asset'
readonly_fields = ('resolved', 'failed', )
extra = 0
show_change_link = True
can_delete = False
def get_fields(self, request, obj=None):
return self.get_readonly_fields(request, obj)
class AssetAdmin(admin.ModelAdmin):
search_fields = ('platform_identifier', 'uuid')
list_display = ('uuid', 'internal_type', 'platform_identifier', 'content_type')
list_filter = ('platform', 'internal_type', 'source_environment')
raw_id_fields = ('owner', 'supersedes')
readonly_fields = ('get_url', 'is_superseded', 'superseded_by')
inlines = (AssetImportRequestInline, )
def get_url(self, instance):
return mark_safe('<a href="{url}">{url}</a> or <a href="{url}" download>download</a>'.format(url=instance.get_url()))
get | _url.short_description = 'URL'
def is_superseded(self, instance):
return 'Yes' if instance.is_superseded() else 'No'
is_superseded.short_description = 'Is superseded'
def superseded_by(self, instance):
try:
return Asset.objects.get(supersedes=instance.uuid)
except Exception as e:
return 'Not found'
else:
return None
superseded_by.short_description = 'Superseded by'
admin.site.register(Asset, AssetAdmin) | |
wlach/treeherder | treeherder/log_parser/utils.py | Python | mpl-2.0 | 4,995 | 0.000801 | import logging
import urllib2
import simplejson as json
from django.conf import settings
from treeherder import celery_app
from treeherder.client import (TreeherderArtifactCollection,
TreeherderClient)
from treeherder.credentials.models import Credentials
from treeherder.log_parser.artifactbuildercollection import ArtifactBuilderCollection
from treeherder.log_parser.artifactbuilders import MozlogArtifactBuilder
from treeherder.model.error_summary import get_error_summary_artifacts
logger = logging.getLogger(__name__)
def is_parsed(job_log_url):
# if parse_status is not available, consider it pending
parse_status = job_log_url.get("parse_status", "pending")
return parse_status == "parsed"
def extract_text_log_artifacts(project, log_url, job_guid):
"""Generate a summary artifact for the raw text log."""
# parse a log given its url
artifact_bc = ArtifactBuilderCollection(log_url)
artifact_bc.parse()
artifact_list = []
for name, artifact in artifact_bc.artifacts.items():
if name == 'Job Info':
for detail in artifact['job_details']:
if ('title' in detail and detail['title'] == 'artifact uploaded'
and detail['value'].endswith('_errorsummary.log')):
# using .send_task to avoid an import loop.
celery_app.send_task('store-error-summary',
[project, detail['url'], job_guid],
routing_key='store_error_summary')
artifact_list.append({
"job_guid": job_guid,
"name": name,
"type": 'json',
"blob": json.dumps(artifact)
})
artifact_list.extend(get_error_summary_artifacts(artifact_list))
return artifact_list
def extract_json_log_artifacts(project, log_url, job_guid):
""" Generate a summary artifact for the mozlog json log. """
logger.debug("Parsing JSON log at url: {0}".format(log_url))
ab = MozlogArtifactBuilder(log_url)
ab.parse_log()
return [{
"job_guid": job_guid,
"name": ab.name,
"type": 'json',
"blob": json.dumps(ab.get_artifact())
}]
def post_log_artifacts(project,
job_guid,
job_log_url,
retry_task,
extract_artifacts_cb):
"""Post a list of artifacts to a job."""
def _retry(e):
# Initially retry after 1 minute, then for each subsequent retry
# lengthen the retry time by another minute.
retry_task.retry(exc=e, countdown=(1 + retry_task.request.retries) * 60)
# .retry() raises a RetryTaskError exception,
# so nothing after this function will be executed
log_description = "%s %s (%s)" % (project, job_guid, job_log_url['url'])
logger.debug("Downloading/parsing log for %s", log_description) |
credentials = Credentials.objects.get(client_id=settings.ETL_CLIENT_ID)
client = TreeherderClient(
protocol=settings.TREEHERDER_REQUEST_PROTOCOL,
host=settings.TREEHERDER_REQUEST_HOST,
client_id=credentials.client_id,
secret=str(credentials.secret),
)
try:
artifact_list = extract_artifacts_cb(project, job_log_url['ur | l'],
job_guid)
except Exception as e:
client.update_parse_status(project, job_log_url['id'], 'failed')
# unrecoverable http error (doesn't exist or permission denied)
# (apparently this can happen somewhat often with taskcluster if
# the job fails, so just warn about it -- see
# https://bugzilla.mozilla.org/show_bug.cgi?id=1154248)
if isinstance(e, urllib2.HTTPError) and e.code in (403, 404):
logger.warning("Unable to retrieve log for %s: %s",
log_description, e)
return
# possibly recoverable http error (e.g. problems on our end)
elif isinstance(e, urllib2.URLError):
logger.error("Failed to download log for %s: %s",
log_description, e)
_retry(e)
# parse error or other unrecoverable error
else:
logger.error("Failed to download/parse log for %s: %s",
log_description, e)
# re-raise exception if we're not retrying, so new relic sees the
# error
raise
# store the artifacts generated
tac = TreeherderArtifactCollection()
for artifact in artifact_list:
ta = tac.get_artifact(artifact)
tac.add(ta)
try:
client.post_collection(project, tac)
client.update_parse_status(project, job_log_url['id'], 'parsed')
logger.debug("Finished posting artifact for %s %s", project, job_guid)
except Exception as e:
logger.error("Failed to upload parsed artifact for %s: %s", log_description, e)
_retry(e)
|
scienceopen/CVutils | morecvutils/getaviprop.py | Python | mit | 1,520 | 0 | #!/usr/bin/env python
"""
gets basic info about AVI file using OpenCV
input: filename or cv2.Capture
"""
from pathlib import Path
from struct import pack
from typing import Dict, Any
import cv2
def getaviprop(fn: Path) -> Dict[str, Any]:
if isinstance(fn, (str, Path)): # assuming filename
fn = Path(fn).expanduser()
if not fn.is_file():
raise FileNotFoundError(fn)
v = | cv2.VideoCapture(str(fn))
if v is None:
raise OSError(f'could not read {fn}')
else: # assuming cv2.VideoCapture object
v = fn
if not v.isOpened():
raise OSError(f'cannot read {fn} probable codec issue')
vidparam = {
'nframe': int(v.get(cv2.CAP_PROP_FRAME_COUNT)),
'xy_pixel': (
int(v.get(cv2.CAP_PROP_FRAME_WIDTH)),
int(v.get(cv2.CAP_PROP_FRAME_HEI | GHT)),
),
'fps': v.get(cv2.CAP_PROP_FPS),
'codec': fourccint2ascii(int(v.get(cv2.CAP_PROP_FOURCC))),
}
if isinstance(fn, Path):
v.release()
return vidparam
def fourccint2ascii(fourcc_int: int) -> str:
"""
convert fourcc 32-bit integer code to ASCII
"""
assert isinstance(fourcc_int, int)
return pack('<I', fourcc_int).decode('ascii')
if __name__ == '__main__':
from argparse import ArgumentParser
p = ArgumentParser(description='get parameters of AVI file')
p.add_argument('avifn', help='avi filename')
p = p.parse_args()
vidparam = getaviprop(p.avifn)
print(vidparam)
|
mmunko/skuskovy-system | manage.py | Python | gpl-3.0 | 259 | 0 | #!/usr/bin/env python3
import os
import sys
if __name__ == "__main__":
os.environ.se | tdefault("DJANGO_SETTINGS_MODULE", "skuskovy_system.settings")
from django.core.management import execute_from_command_line
|
execute_from_command_line(sys.argv)
|
neva-nevan/ConfigPy | ConfigPy-Portable/ConfigPy/cgi-bin/restore.py | Python | mit | 516 | 0 | import os
import shutil
from glob import glob
print 'Content-type:text/html\r\n\r\n'
print '<html>'
| found_pages = glob('archive/*.py')
if found_pages:
path = "/cgi-bin/archive/"
moveto = "/cgi-bin/pages/"
files = os.listdir(path)
files.sort()
for f in files:
src = path+f
dst = moveto+f
shutil.move(src, dst)
print 'All pages restored'
print '<meta http-equiv="refresh" content="1";>'
if not found_pages:
print 'Nothing to restore'
p | rint '</html>'
# EOF
|
trilliumtransit/oba_rvtd_monitor | oba_rvtd_monitor/problems.py | Python | apache-2.0 | 216 | 0 | from transitfeed im | port TYPE_ERROR, TYPE_WARNING, TYPE_NOTICE
from oba_rvtd_monitor.feedvalidator import LimitPerTypeProblemAccumulator
class MonitoringProblemAccumulator(LimitPerTypeProblemAccumulat | or):
pass
|
shoopio/shoop | shuup_workbench/settings/__init__.py | Python | agpl-3.0 | 1,411 | 0.000709 | # -*- coding: utf-8 -*-
# This file is part of Shuup.
#
# Copyright (c) 2012-2021, Shuup Commerce Inc. All rights reserved.
#
# This source code is licensed under the OSL-3.0 license found in the
# LICENSE file in the root directory of this source tree.
import os
from django.core.exceptions import ImproperlyConfigured
from shuup.utils.setup import Setup
from . import base_settings
def configure(setup):
base_settings.configure(setup)
local_settings_file = os.getenv("LOCAL_SETTINGS_FILE")
# Backward compatibility: Find from current directory, if
# LOCAL_SETTINGS_FILE environment variables is unset
if local_settings_file is None:
cand = os.path.join(os.path.dirname(__file__), "local_settings.py")
if os.path.exists(cand):
local_settings_file = cand
# Load local settings from file
if local_settings_file:
local_settings_ns = {
"__file__": local_settings_file,
}
with open(local_settings_file, "rb") as fp:
compiled = compile(fp.read(), local_settings_file, " | exec")
exec(compiled, local_settings_ns)
if "configure" not in local_settings_ns:
raise ImproperlyConfigured("Error! No configure in local_settings.")
local_configure = local_settings_ns["configure"]
local_configure(set | up)
return setup
globals().update(Setup.configure(configure))
|
scls19fr/python-lms-tools | tests/test_gift.py | Python | mit | 15,672 | 0.003563 | from lms_tools.aiken import AikenQuiz, AikenQuestion
from lms_tools.gift import GiftQuiz, GiftQuestion, GiftDistractor
def test_gift_question_to_string():
q = GiftQuestion("L'appareil servant à mesurer la vitesse du vent au sol s'appelle :", name="0001", comment="question: 1 name: 0001")
q.append_distractor(GiftDistractor("une girouette.", 0))
q.append_distractor(GiftDistractor("une rose des vents.", 0))
q.append_distractor(GiftDistractor("un baromètre.", 0))
q.append_distractor(GiftDistractor("un anémomètre.", 1))
assert q.is_binary()
assert not q.is_correct_answer(0)
assert not q.is_correct_answer(1)
assert not q.is_correct_answer(2)
assert q.is_correct_answer(3)
expected_gift_text = """// question: 1 name: 0001
::0001::L'appareil servant à mesurer la vitesse du vent au sol s'appelle \\:{
\t~une girouette.
\t~une rose des vents.
\t~un baromètre.
\t=un anémomètre.
}"""
assert q.to_string() == expected_gift_text
def test_gift_question_set_correct_answer_binary():
q = GiftQuestion("L'appareil servant à mesurer la vitesse du vent au sol s'appelle :", name="0001", comment="question: 1 name: 0001")
q.append_distractor("une girouette.")
q.append_distractor("une r | ose des vents.")
q.append_distr | actor("un baromètre.")
q.append_distractor("un anémomètre.")
q.set_correct_answer(3)
assert not q.is_correct_answer(0)
assert not q.is_correct_answer(1)
assert not q.is_correct_answer(2)
assert q.is_correct_answer(3)
def test_gift_question_to_string_with_escaped_char():
q = GiftQuestion("Identifier les éléments 1, 2 et 3 de la structure :", name="0001", comment="question: 1 name: 0001")
q.append_distractor("1 = nervure, 2 = couple, 3 = lisse.")
q.append_distractor("1 = longeron, 2 = nervure, 3 = entretoise.")
q.append_distractor("1 = poutre, 2 = traverse, 3 = semelle.")
q.append_distractor("1 = couple, 2 = entretoise, 3 = traverse.")
q.set_correct_answer(1)
expected_gift_text = """// question: 1 name: 0001
::0001::Identifier les éléments 1, 2 et 3 de la structure \\:{
\t~1 \\= nervure, 2 \\= couple, 3 \\= lisse.
\t=1 \\= longeron, 2 \\= nervure, 3 \\= entretoise.
\t~1 \\= poutre, 2 \\= traverse, 3 \\= semelle.
\t~1 \\= couple, 2 \\= entretoise, 3 \\= traverse.
}"""
assert q.to_string() == expected_gift_text
def test_gift_question_not_is_binary():
q = GiftQuestion("Question", name="0001", comment="question: 1 name: 0001")
q.append_distractor(GiftDistractor("Bonne réponse", 1))
q.append_distractor(GiftDistractor("Bonne réponse", 1))
q.append_distractor(GiftDistractor("Bonne réponse", 1))
q.append_distractor(GiftDistractor("Mauvaise réponse", 0))
q.append_distractor(GiftDistractor("Mauvaise réponse", 0))
assert not q.is_binary()
assert not q.is_correct_answer(0)
assert q.is_partially_correct_answer(0)
assert q.is_partially_correct_answer(1)
assert q.is_partially_correct_answer(2)
assert not q.is_partially_correct_answer(3)
assert not q.is_partially_correct_answer(4)
expected_gift_text = """// question: 1 name: 0001
::0001::Question{
\t~33.33333%Bonne réponse
\t~33.33333%Bonne réponse
\t~33.33333%Bonne réponse
\t~0%Mauvaise réponse
\t~0%Mauvaise réponse
}"""
assert q.to_string() == expected_gift_text
def test_gift_question_set_correct_answer_not_binary():
q = GiftQuestion("Question", name="0001", comment="question: 1 name: 0001")
q.append_distractor("Bonne réponse")
q.append_distractor("Bonne réponse")
q.append_distractor("Bonne réponse")
q.append_distractor("Mauvaise réponse")
q.append_distractor("Mauvaise réponse")
q.set_correct_answer(0)
q.set_correct_answer(1)
q.set_correct_answer(2)
assert q.is_partially_correct_answer(0)
assert q.is_partially_correct_answer(1)
assert q.is_partially_correct_answer(2)
assert q.is_incorrect_answer(3)
assert q.is_incorrect_answer(4)
def test_gift_quiz():
quiz = GiftQuiz()
q = GiftQuestion("L'appareil servant à mesurer la vitesse du vent au sol s'appelle :", name="0001", comment="question: 1 name: 0001")
q.append_distractor(GiftDistractor("une girouette.", 0))
q.append_distractor(GiftDistractor("une rose des vents.", 0))
q.append_distractor(GiftDistractor("un baromètre.", 0))
q.append_distractor(GiftDistractor("un anémomètre.", 1))
quiz.append(q)
q = GiftQuestion("L'unité de pression utilisée dans le système international et en aéronautique est :", name="0002", comment="question: 2 name: 0002")
q.append_distractor(GiftDistractor("le pascal.", 1))
q.append_distractor(GiftDistractor("le newton.", 0))
q.append_distractor(GiftDistractor("le joule.", 0))
q.append_distractor(GiftDistractor("le millimètre de mercure.", 0))
quiz.append(q)
assert len(quiz) == 2
header = """// question: 0 name: Switch category to $module$/Défaut pour BIA 2016 Météorologie et aérologie
$CATEGORY: $module$/Défaut pour BIA 2016 Météorologie et aérologie
"""
footer = """
// end of quiz"""
expected_gift_text = """// question: 0 name: Switch category to $module$/Défaut pour BIA 2016 Météorologie et aérologie
$CATEGORY: $module$/Défaut pour BIA 2016 Météorologie et aérologie
// question: 1 name: 0001
::0001::L'appareil servant à mesurer la vitesse du vent au sol s'appelle \\:{
\t~une girouette.
\t~une rose des vents.
\t~un baromètre.
\t=un anémomètre.
}
// question: 2 name: 0002
::0002::L'unité de pression utilisée dans le système international et en aéronautique est \\:{
\t=le pascal.
\t~le newton.
\t~le joule.
\t~le millimètre de mercure.
}
// end of quiz"""
assert quiz.to_string(header=header, footer=footer) == expected_gift_text
def test_aiken_question_to_gift_question():
q = AikenQuestion("L'appareil servant à mesurer la vitesse du vent au sol s'appelle :")
q.append_distractor("une girouette.")
q.append_distractor("une rose des vents.")
q.append_distractor("un baromètre.")
q.append_distractor("un anémomètre.")
q.set_correct_answer(3)
gift_question = GiftQuestion.from_aiken(q)
assert isinstance(gift_question, GiftQuestion)
assert q.stem == gift_question.stem
assert len(gift_question) == len(q)
assert gift_question.is_binary()
assert gift_question.is_correct_answer(3)
def test_aiken_quiz_to_gift_quiz():
aiken_text = """L'appareil servant à mesurer la vitesse du vent au sol s'appelle :
A) une girouette.
B) une rose des vents.
C) un baromètre.
D) un anémomètre.
ANSWER: D
L'unité de pression utilisée dans le système international et en aéronautique est :
A) le pascal.
B) le newton.
C) le joule.
D) le millimètre de mercure.
ANSWER: A"""
aiken_quiz = AikenQuiz.parse(aiken_text)
gift_quiz = GiftQuiz.from_aiken(aiken_quiz)
for i, question in enumerate(gift_quiz.iter_questions()):
val = i + 1
question.comment = "question: %d name: %04d" % (val, val)
question.name = "%04d" % val
gift_text = gift_quiz.to_string()
expected_gift_text = """// question: 1 name: 0001
::0001::L'appareil servant à mesurer la vitesse du vent au sol s'appelle \\:{
\t~une girouette.
\t~une rose des vents.
\t~un baromètre.
\t=un anémomètre.
}
// question: 2 name: 0002
::0002::L'unité de pression utilisée dans le système international et en aéronautique est \\:{
\t=le pascal.
\t~le newton.
\t~le joule.
\t~le millimètre de mercure.
}"""
assert expected_gift_text == gift_text
assert len(aiken_quiz) == len(gift_quiz)
def test_gift_quiz_join():
quiz1 = GiftQuiz()
q = GiftQuestion("L'appareil servant à mesurer la vitesse du vent au sol s'appelle :", name="0001", comment="question: 1 name: 0001")
q.append_distractor(GiftDistractor("une girouette.", 0))
q.append_distractor(GiftDistractor("une rose des vents.", 0))
q.append_distractor(GiftDistractor("un baromètre.", 0))
q.append_distractor(GiftDistractor("un anémomètre.", 1))
quiz1.append(q)
q = GiftQuestion("L'unité de pression utilisée dans le système international et en aéronautique est :", name="0002", comment="question: 2 name: 0002")
q.append_distractor(GiftDis |
artzers/MachineLearning | LinearRegression/SimpleLinearReg.py | Python | mit | 588 | 0.032313 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Wed Jan 18 09:23:01 2017
@author: zhouhang
"""
import numpy as np
from matplotlib import pyplot as plt
X = np.arange(-5.,9.,0.1)
print X
X=np.random.permutation(X)
print X
b=5.
y=0.5 * X ** 2.0 +3. * X + b + np.random.random(X.shape)* 10.
#plt.scatter(X,y)
#plt.show()
#
X_ | = np.mat(X).T
X_ = np.hstack((np.square(X_) , X_))
X_ = np.hstack((X_, np.mat(np.ones(len(X))).T))
A=(X_.T*X_).I*X_.T * np.mat(y).T
y_ = X_ * A
plt.hold(True)
plt.plot(X,y,'r.',fillstyle='none')
plt.plot(X,y_,'bo',fillstyle= | 'none')
plt.show() |
vathpela/anaconda | pyanaconda/core/timer.py | Python | gpl-2.0 | 3,465 | 0.001443 | # Timer class for scheduling methods after some time.
#
# Copyright (C) 2018 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
# Author(s): Jiri Konecny <jkonecny@redhat.com>
#
from pyanaconda.core.glib import timeout_add, timeout_add_ | seconds, idle_add, source_remove
class Timer(object):
"""Object to schedule functions and methods to the GLib event loop.
Everything scheduled by Timer is ran on the main thread!
"""
def __init__(self):
self._id = 0
def timeout_sec(self, seconds, callback, *args, **kwargs):
"""Schedule method to be run after given amount of seconds.
.. NOTE::
The callback will be repeated | ly called until the callback will return False or
`cancel()` is called.
:param seconds: Number of seconds after which the callback will be called.
:type seconds: int
:param callback: Callback which will be called.
:type callback: Function.
:param args: Arguments passed to the callback.
:param kwargs: Keyword arguments passed to the callback.
"""
self._id = timeout_add_seconds(seconds, callback, *args, **kwargs)
def timeout_msec(self, miliseconds, callback, *args, **kwargs):
"""Schedule method to be run after given amount of miliseconds.
.. NOTE::
The callback will be repeatedly called until the callback will return False or
`cancel()` is called.
:param miliseconds: Number of miliseconds after which the callback will be called.
:type miliseconds: int
:param callback: Callback which will be called.
:type callback: Function.
:param args: Arguments passed to the callback.
:param kwargs: Keyword arguments passed to the callback.
"""
self._id = timeout_add(miliseconds, callback, *args, **kwargs)
def timeout_now(self, callback, *args, **kwargs):
"""Schedule method to be run when event loop will be empty (idle).
.. NOTE::
This method is mainly used to run something in the main thread.
:param callback: Callback which will be called.
:type callback: Function.
:param args: Arguments passed to the callback.
:param kwargs: Keyword arguments passed to the callback.
"""
self._id = idle_add(callback, *args, **kwargs)
def cancel(self):
"""Cancel scheduled callback.
This way the schedule_sec and schedule_msec repetition can be canceled.
"""
source_remove(self._id)
self._id = 0
|
girisagar46/flask_restipy | api/companies/unittest/update_company_test.py | Python | mit | 558 | 0.008961 | from api.lib.testutils import BaseTestCase
import api.companies.unittest
class TestUpdateCompany(BaseTestCase):
def test_update_company(self):
resp = self.app.put('/companies/exponential',
data='{"name": "Exponential.io", "city": "Menlo Park"}',
headers={'content-type':'application/json'})
self.assertEqual(resp.status_code, 200)
| assert 'Palo Alto' not in resp.data
assert 'Menlo Park' in resp.data
if __name__ == "__mai | n__":
api.companies.unittest.main() |
hazrpg/calibre | src/calibre/customize/profiles.py | Python | gpl-3.0 | 26,074 | 0.011506 | # vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
from __future__ import with_statement
__license__ = 'GPL 3'
__copyright__ = '2009, Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
from itertools import izip
from calibre.customize import Plugin as _Plugin
FONT_SIZES = [('xx-small', 1),
('x-small', None),
('small', 2),
('medium', 3),
('large', 4),
('x-large', 5),
('xx-large', 6),
(None, 7)]
class Plugin(_Plugin):
fbase = 12
fsizes = [5, 7, 9, 12, 13.5, 17, 20, 22, 24]
screen_size = (1600, 1200)
dpi = 100
def __init__(self, *args, **kwargs):
_Plugin.__init__(self, *args, **kwargs)
self.width, self.height = self.screen_size
fsizes = list(self.fsizes)
self.fkey = list(self.fsizes)
self.fsizes = []
for (name, num), size in izip(FONT_SIZES, fsizes):
self.fsizes.append((name, num, float(size)))
self.fnames = dict((name, sz) for name, _, sz in self.fsizes if name)
self.fnums = dict((num, sz) for _, num, sz in self.fsizes if num)
self.width_pts = self.width * 72./self.dpi
self.height_pts = self.height * 72./self.dpi
# Input profiles {{{
class InputProfile(Plugin):
author = 'Kovid Goyal'
supported_platforms = set(['windows', 'osx', 'linux'])
can_be_disabled = False
type = _('Input profile')
name = 'Default Input Profile'
short_name = 'default' # Used in the CLI so dont use spaces etc. in it
description = _('This profile tries to provide sane defaults and is useful '
'if you know nothing about the input document.')
class SonyReaderInput(InputProfile):
name = 'Sony Reader'
short_name = 'sony'
description = _('This profile is intended for the SONY PRS line. '
'The 500/505/600/700 etc.')
screen_size = (584, 754)
dpi = 168.451
fbase = 12
fsizes = [7.5, 9, 10, 12, 15.5, 20, 22, 24]
class SonyReader300Input(SonyReaderInput):
name = 'Sony Reader 300'
short_name = 'sony300'
description = _('This profile is intended for the SONY PRS 300.')
dpi = 200
class SonyReader900Input(SonyReaderInput):
author = 'John Schember'
name = 'Sony Reader 900'
| short_name = 'sony900'
description = _('This profile is intended for the SONY PRS-900.')
screen_size = (584, 978)
class MSReaderInput(InputProfile):
name = 'Microsoft Reader'
short_name = 'msreader'
description = _('This profile is intended for the Microsoft Reader.')
screen_size = (480, 652)
dpi = 96
fbase = 13
fsizes = [10, 11, 13, 16, 18, 20, 2 | 2, 26]
class MobipocketInput(InputProfile):
name = 'Mobipocket Books'
short_name = 'mobipocket'
description = _('This profile is intended for the Mobipocket books.')
# Unfortunately MOBI books are not narrowly targeted, so this information is
# quite likely to be spurious
screen_size = (600, 800)
dpi = 96
fbase = 18
fsizes = [14, 14, 16, 18, 20, 22, 24, 26]
class HanlinV3Input(InputProfile):
name = 'Hanlin V3'
short_name = 'hanlinv3'
description = _('This profile is intended for the Hanlin V3 and its clones.')
# Screen size is a best guess
screen_size = (584, 754)
dpi = 168.451
fbase = 16
fsizes = [12, 12, 14, 16, 18, 20, 22, 24]
class HanlinV5Input(HanlinV3Input):
name = 'Hanlin V5'
short_name = 'hanlinv5'
description = _('This profile is intended for the Hanlin V5 and its clones.')
# Screen size is a best guess
screen_size = (584, 754)
dpi = 200
class CybookG3Input(InputProfile):
name = 'Cybook G3'
short_name = 'cybookg3'
description = _('This profile is intended for the Cybook G3.')
# Screen size is a best guess
screen_size = (600, 800)
dpi = 168.451
fbase = 16
fsizes = [12, 12, 14, 16, 18, 20, 22, 24]
class CybookOpusInput(InputProfile):
author = 'John Schember'
name = 'Cybook Opus'
short_name = 'cybook_opus'
description = _('This profile is intended for the Cybook Opus.')
# Screen size is a best guess
screen_size = (600, 800)
dpi = 200
fbase = 16
fsizes = [12, 12, 14, 16, 18, 20, 22, 24]
class KindleInput(InputProfile):
name = 'Kindle'
short_name = 'kindle'
description = _('This profile is intended for the Amazon Kindle.')
# Screen size is a best guess
screen_size = (525, 640)
dpi = 168.451
fbase = 16
fsizes = [12, 12, 14, 16, 18, 20, 22, 24]
class IlliadInput(InputProfile):
name = 'Illiad'
short_name = 'illiad'
description = _('This profile is intended for the Irex Illiad.')
screen_size = (760, 925)
dpi = 160.0
fbase = 12
fsizes = [7.5, 9, 10, 12, 15.5, 20, 22, 24]
class IRexDR1000Input(InputProfile):
author = 'John Schember'
name = 'IRex Digital Reader 1000'
short_name = 'irexdr1000'
description = _('This profile is intended for the IRex Digital Reader 1000.')
# Screen size is a best guess
screen_size = (1024, 1280)
dpi = 160
fbase = 16
fsizes = [12, 14, 16, 18, 20, 22, 24]
class IRexDR800Input(InputProfile):
author = 'Eric Cronin'
name = 'IRex Digital Reader 800'
short_name = 'irexdr800'
description = _('This profile is intended for the IRex Digital Reader 800.')
screen_size = (768, 1024)
dpi = 160
fbase = 16
fsizes = [12, 14, 16, 18, 20, 22, 24]
class NookInput(InputProfile):
author = 'John Schember'
name = 'Nook'
short_name = 'nook'
description = _('This profile is intended for the B&N Nook.')
# Screen size is a best guess
screen_size = (600, 800)
dpi = 167
fbase = 16
fsizes = [12, 12, 14, 16, 18, 20, 22, 24]
input_profiles = [InputProfile, SonyReaderInput, SonyReader300Input,
SonyReader900Input, MSReaderInput, MobipocketInput, HanlinV3Input,
HanlinV5Input, CybookG3Input, CybookOpusInput, KindleInput, IlliadInput,
IRexDR1000Input, IRexDR800Input, NookInput]
input_profiles.sort(cmp=lambda x,y:cmp(x.name.lower(), y.name.lower()))
# }}}
class OutputProfile(Plugin):
author = 'Kovid Goyal'
supported_platforms = set(['windows', 'osx', 'linux'])
can_be_disabled = False
type = _('Output profile')
name = 'Default Output Profile'
short_name = 'default' # Used in the CLI so dont use spaces etc. in it
description = _('This profile tries to provide sane defaults and is useful '
'if you want to produce a document intended to be read at a '
'computer or on a range of devices.')
#: The image size for comics
comic_screen_size = (584, 754)
#: If True the MOBI renderer on the device supports MOBI indexing
supports_mobi_indexing = False
#: If True output should be optimized for a touchscreen interface
touchscreen = False
touchscreen_news_css = ''
#: A list of extra (beyond CSS 2.1) modules supported by the device
#: Format is a cssuti |
kevroy314/msl-iposition-pipeline | cogrecon/core/cogrecon_globals.py | Python | gpl-3.0 | 991 | 0 | # File globals
data_coordinates_file_suffix = 'position_data_coordinates.txt'
actual_coordinates_file_suffix = 'actual_coordinates.txt'
category_file_suffix = 'categories.txt'
order_file_suffix = 'order.txt'
# Parameter globals
default_z_value = 1.96
default_pipeline_flags = 3
default_dimensions = 2
# Visualization globals
default_animation_durat | ion = 2
default_animation_ticks = 20
default_visualization_transformed_points_c | olor = 'b'
default_visualization_transformed_points_alpha = 0.5
default_visualization_actual_points_color = 'g'
default_visualization_data_points_color = 'r'
default_visualization_actual_points_size = 50
default_visualization_data_points_size = 50
default_visualization_font_size = 20
default_visualization_accuracies_corrected_alpha = 0.3
default_visualization_accuracies_incorrect_color = 'r'
default_visualization_accuracies_correct_color = 'g'
default_visualization_accuracies_uncorrected_color = 'b'
default_visualization_accuracies_uncorrected_alpha = 0.1
|
bckwltn/SickRage | sickbeard/metadata/mediabrowser.py | Python | gpl-3.0 | 22,239 | 0.002248 | # Author: Nic Wolfe <nic@wolfeden.ca>
# | URL: http://code.google.com/p/sickbeard/
#
# This file is part of SickRage.
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software | Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
import datetime
import os
import re
import sickbeard
import generic
from sickbeard import logger, exceptions, helpers
from sickbeard import encodingKludge as ek
from sickbeard.exceptions import ex
import xml.etree.cElementTree as etree
class MediaBrowserMetadata(generic.GenericMetadata):
"""
Metadata generation class for Media Browser 2.x/3.x - Standard Mode.
The following file structure is used:
show_root/series.xml (show metadata)
show_root/folder.jpg (poster)
show_root/backdrop.jpg (fanart)
show_root/Season ##/folder.jpg (season thumb)
show_root/Season ##/filename.ext (*)
show_root/Season ##/metadata/filename.xml (episode metadata)
show_root/Season ##/metadata/filename.jpg (episode thumb)
"""
def __init__(self,
show_metadata=False,
episode_metadata=False,
fanart=False,
poster=False,
banner=False,
episode_thumbnails=False,
season_posters=False,
season_banners=False,
season_all_poster=False,
season_all_banner=False):
generic.GenericMetadata.__init__(self,
show_metadata,
episode_metadata,
fanart,
poster,
banner,
episode_thumbnails,
season_posters,
season_banners,
season_all_poster,
season_all_banner)
self.name = 'MediaBrowser'
self._ep_nfo_extension = 'xml'
self._show_metadata_filename = 'series.xml'
self.fanart_name = "backdrop.jpg"
self.poster_name = "folder.jpg"
# web-ui metadata template
self.eg_show_metadata = "series.xml"
self.eg_episode_metadata = "Season##\\metadata\\<i>filename</i>.xml"
self.eg_fanart = "backdrop.jpg"
self.eg_poster = "folder.jpg"
self.eg_banner = "banner.jpg"
self.eg_episode_thumbnails = "Season##\\metadata\\<i>filename</i>.jpg"
self.eg_season_posters = "Season##\\folder.jpg"
self.eg_season_banners = "Season##\\banner.jpg"
self.eg_season_all_poster = "<i>not supported</i>"
self.eg_season_all_banner = "<i>not supported</i>"
# Override with empty methods for unsupported features
def retrieveShowMetadata(self, folder):
# while show metadata is generated, it is not supported for our lookup
return (None, None, None)
def create_season_all_poster(self, show_obj):
pass
def create_season_all_banner(self, show_obj):
pass
def get_episode_file_path(self, ep_obj):
"""
Returns a full show dir/metadata/episode.xml path for MediaBrowser
episode metadata files
ep_obj: a TVEpisode object to get the path for
"""
if ek.ek(os.path.isfile, ep_obj.location):
xml_file_name = helpers.replaceExtension(ek.ek(os.path.basename, ep_obj.location), self._ep_nfo_extension)
metadata_dir_name = ek.ek(os.path.join, ek.ek(os.path.dirname, ep_obj.location), 'metadata')
xml_file_path = ek.ek(os.path.join, metadata_dir_name, xml_file_name)
else:
logger.log(u"Episode location doesn't exist: " + str(ep_obj.location), logger.DEBUG)
return ''
return xml_file_path
def get_episode_thumb_path(self, ep_obj):
"""
Returns a full show dir/metadata/episode.jpg path for MediaBrowser
episode thumbs.
ep_obj: a TVEpisode object to get the path from
"""
if ek.ek(os.path.isfile, ep_obj.location):
tbn_file_name = helpers.replaceExtension(ek.ek(os.path.basename, ep_obj.location), 'jpg')
metadata_dir_name = ek.ek(os.path.join, ek.ek(os.path.dirname, ep_obj.location), 'metadata')
tbn_file_path = ek.ek(os.path.join, metadata_dir_name, tbn_file_name)
else:
return None
return tbn_file_path
def get_season_poster_path(self, show_obj, season):
"""
Season thumbs for MediaBrowser go in Show Dir/Season X/folder.jpg
If no season folder exists, None is returned
"""
dir_list = [x for x in ek.ek(os.listdir, show_obj.location) if
ek.ek(os.path.isdir, ek.ek(os.path.join, show_obj.location, x))]
season_dir_regex = '^Season\s+(\d+)$'
season_dir = None
for cur_dir in dir_list:
# MediaBrowser 1.x only supports 'Specials'
# MediaBrowser 2.x looks to only support 'Season 0'
# MediaBrowser 3.x looks to mimic KODI/Plex support
if season == 0 and cur_dir == "Specials":
season_dir = cur_dir
break
match = re.match(season_dir_regex, cur_dir, re.I)
if not match:
continue
cur_season = int(match.group(1))
if cur_season == season:
season_dir = cur_dir
break
if not season_dir:
logger.log(u"Unable to find a season dir for season " + str(season), logger.DEBUG)
return None
logger.log(u"Using " + str(season_dir) + "/folder.jpg as season dir for season " + str(season), logger.DEBUG)
return ek.ek(os.path.join, show_obj.location, season_dir, 'folder.jpg')
def get_season_banner_path(self, show_obj, season):
"""
Season thumbs for MediaBrowser go in Show Dir/Season X/banner.jpg
If no season folder exists, None is returned
"""
dir_list = [x for x in ek.ek(os.listdir, show_obj.location) if
ek.ek(os.path.isdir, ek.ek(os.path.join, show_obj.location, x))]
season_dir_regex = '^Season\s+(\d+)$'
season_dir = None
for cur_dir in dir_list:
# MediaBrowser 1.x only supports 'Specials'
# MediaBrowser 2.x looks to only support 'Season 0'
# MediaBrowser 3.x looks to mimic KODI/Plex support
if season == 0 and cur_dir == "Specials":
season_dir = cur_dir
break
match = re.match(season_dir_regex, cur_dir, re.I)
if not match:
continue
cur_season = int(match.group(1))
if cur_season == season:
season_dir = cur_dir
break
if not season_dir:
logger.log(u"Unable to find a season dir for season " + str(season), logger.DEBUG)
return None
logger.log(u"Using " + str(season_dir) + "/banner.jpg as season dir for season " + str(season), logger.DEBUG)
return ek.ek(os.path.join, show_obj.location, season_dir, 'banner.jpg')
def _show_data(self, show_obj):
"""
Creates an elementTree XML structure for a MediaBrowser-style series.xml
returns the resulting data object.
show_obj: a TVShow instance to create the NFO for
"""
indexer_lang = show_obj.lang
# There's gotta be |
hryamzik/ansible | lib/ansible/modules/clustering/k8s/_kubernetes.py | Python | gpl-3.0 | 16,555 | 0.001993 | #!/usr/bin/python
# Copyright: (c) 2015, Google Inc. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future | __ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['deprecated'],
'supported_by': 'community'}
DOCUM | ENTATION = '''
---
module: kubernetes
version_added: "2.1"
deprecated:
removed_in: "2.9"
why: This module used the oc command line tool, where as M(k8s_raw) goes over the REST API.
alternative: Use M(k8s_raw) instead.
short_description: Manage Kubernetes resources
description:
- This module can manage Kubernetes resources on an existing cluster using
the Kubernetes server API. Users can specify in-line API data, or
specify an existing Kubernetes YAML file.
- Currently, this module
(1) Only supports HTTP Basic Auth
(2) Only supports 'strategic merge' for update, http://goo.gl/fCPYxT
SSL certs are not working, use C(validate_certs=off) to disable.
options:
api_endpoint:
description:
- The IPv4 API endpoint of the Kubernetes cluster.
required: true
aliases: [ endpoint ]
inline_data:
description:
- The Kubernetes YAML data to send to the API I(endpoint). This option is
mutually exclusive with C('file_reference').
required: true
file_reference:
description:
- Specify full path to a Kubernets YAML file to send to API I(endpoint).
This option is mutually exclusive with C('inline_data').
patch_operation:
description:
- Specify patch operation for Kubernetes resource update.
- For details, see the description of PATCH operations at
U(https://github.com/kubernetes/kubernetes/blob/release-1.5/docs/devel/api-conventions.md#patch-operations).
default: Strategic Merge Patch
choices: [ JSON Patch, Merge Patch, Strategic Merge Patch ]
aliases: [ patch_strategy ]
version_added: 2.4
certificate_authority_data:
description:
- Certificate Authority data for Kubernetes server. Should be in either
standard PEM format or base64 encoded PEM data. Note that certificate
verification is broken until ansible supports a version of
'match_hostname' that can match the IP address against the CA data.
state:
description:
- The desired action to take on the Kubernetes data.
required: true
choices: [ absent, present, replace, update ]
default: present
url_password:
description:
- The HTTP Basic Auth password for the API I(endpoint). This should be set
unless using the C('insecure') option.
aliases: [ password ]
url_username:
description:
- The HTTP Basic Auth username for the API I(endpoint). This should be set
unless using the C('insecure') option.
default: admin
aliases: [ username ]
insecure:
description:
- Reverts the connection to using HTTP instead of HTTPS. This option should
only be used when execuing the M('kubernetes') module local to the Kubernetes
cluster using the insecure local port (locahost:8080 by default).
validate_certs:
description:
- Enable/disable certificate validation. Note that this is set to
C(false) until Ansible can support IP address based certificate
hostname matching (exists in >= python3.5.0).
type: bool
default: 'no'
author:
- Eric Johnson (@erjohnso) <erjohnso@google.com>
'''
EXAMPLES = '''
# Create a new namespace with in-line YAML.
- name: Create a kubernetes namespace
kubernetes:
api_endpoint: 123.45.67.89
url_username: admin
url_password: redacted
inline_data:
kind: Namespace
apiVersion: v1
metadata:
name: ansible-test
labels:
label_env: production
label_ver: latest
annotations:
a1: value1
a2: value2
state: present
# Create a new namespace from a YAML file.
- name: Create a kubernetes namespace
kubernetes:
api_endpoint: 123.45.67.89
url_username: admin
url_password: redacted
file_reference: /path/to/create_namespace.yaml
state: present
# Do the same thing, but using the insecure localhost port
- name: Create a kubernetes namespace
kubernetes:
api_endpoint: 123.45.67.89
insecure: true
file_reference: /path/to/create_namespace.yaml
state: present
'''
RETURN = '''
# Example response from creating a Kubernetes Namespace.
api_response:
description: Raw response from Kubernetes API, content varies with API.
returned: success
type: complex
contains:
apiVersion: "v1"
kind: "Namespace"
metadata:
creationTimestamp: "2016-01-04T21:16:32Z"
name: "test-namespace"
resourceVersion: "509635"
selfLink: "/api/v1/namespaces/test-namespace"
uid: "6dbd394e-b328-11e5-9a02-42010af0013a"
spec:
finalizers:
- kubernetes
status:
phase: "Active"
'''
import base64
import json
try:
import yaml
HAS_LIB_YAML = True
except ImportError:
HAS_LIB_YAML = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.urls import fetch_url
############################################################################
############################################################################
# For API coverage, this Anislbe module provides capability to operate on
# all Kubernetes objects that support a "create" call (except for 'Events').
# In order to obtain a valid list of Kubernetes objects, the v1 spec file
# was referenced and the below python script was used to parse the JSON
# spec file, extract only the objects with a description starting with
# 'create a'. The script then iterates over all of these base objects
# to get the endpoint URL and was used to generate the KIND_URL map.
#
# import json
# from urllib2 import urlopen
#
# r = urlopen("https://raw.githubusercontent.com/kubernetes"
# "/kubernetes/master/api/swagger-spec/v1.json")
# v1 = json.load(r)
#
# apis = {}
# for a in v1['apis']:
# p = a['path']
# for o in a['operations']:
# if o["summary"].startswith("create a") and o["type"] != "v1.Event":
# apis[o["type"]] = p
#
# def print_kind_url_map():
# results = []
# for a in apis.keys():
# results.append('"%s": "%s"' % (a[3:].lower(), apis[a]))
# results.sort()
# print("KIND_URL = {")
# print(",\n".join(results))
# print("}")
#
# if __name__ == '__main__':
# print_kind_url_map()
############################################################################
############################################################################
KIND_URL = {
"binding": "/api/v1/namespaces/{namespace}/bindings",
"configmap": "/api/v1/namespaces/{namespace}/configmaps",
"endpoints": "/api/v1/namespaces/{namespace}/endpoints",
"limitrange": "/api/v1/namespaces/{namespace}/limitranges",
"namespace": "/api/v1/namespaces",
"node": "/api/v1/nodes",
"persistentvolume": "/api/v1/persistentvolumes",
"persistentvolumeclaim": "/api/v1/namespaces/{namespace}/persistentvolumeclaims", # NOQA
"pod": "/api/v1/namespaces/{namespace}/pods",
"podtemplate": "/api/v1/namespaces/{namespace}/podtemplates",
"replicationcontroller": "/api/v1/namespaces/{namespace}/replicationcontrollers", # NOQA
"resourcequota": "/api/v1/namespaces/{namespace}/resourcequotas",
"secret": "/api/v1/namespaces/{namespace}/secrets",
"service": "/api/v1/namespaces/{namespace}/services",
"serviceaccount": "/api/v1/namespaces/{namespace}/serviceaccounts",
"daemonset": "/apis/extensions/v1beta1/namespaces/{namespace}/daemonsets",
"deployment": "/apis/extensions/v1beta1/namespaces/{namespace}/deployments",
"horizontalpodautoscaler": "/apis/extensions/v1beta1/namespaces/{namespace}/horizontalpodautoscalers", # NOQA
"ingress": "/apis/extensions/v1beta1/namespaces/{namespace}/ingresses",
"job": "/apis/extensio |
Jycraft/jycraft-web | servedir.py | Python | bsd-3-clause | 212 | 0.004717 | import Simp | leHTTPServer
import SocketServer
PORT = 8000
Handler = SimpleHTTPServer.SimpleHTTPRequestHandler
httpd = SocketServer.TCPServer(("", PORT), Handler)
print "serving at | port", PORT
httpd.serve_forever() |
joakim-hove/ert | res/simulator/simulation_context.py | Python | gpl-3.0 | 6,507 | 0.000768 | from res.job_queue import JobQueueManager, ForwardModelStatus
from res.enkf import ErtRunContext, EnkfSimulationRunner
from res.enkf.enums import EnkfRunType, HookRuntime
from threading import Thread
from time import sleep
class SimulationContext(object):
def __init__(self, ert, sim_fs, mask, itr, case_data):
self._ert = ert
""" :type: res.enkf.EnKFMain """
max_runtime = ert.analysisConfig().get_max_runtime()
self._mask = mask
job_queue = ert.get_queue_config().create_job_queue()
job_queue.set_max_job_duration(max_runtime)
self._queue_manager = JobQueueManager(job_queue)
subst_list = self._ert.getDataKW()
path_fmt = self._ert.getModelConfig().getRunpathFormat()
jobname_fmt = self._ert.getModelConfig().getJobnameFormat()
self._run_context = ErtRunContext(
EnkfRunType.ENSEMBLE_EXPERIMENT,
sim_fs,
None,
mask,
path_fmt,
jobname_fmt,
subst_list,
itr,
)
# fill in the missing geo_id data
for sim_id, (geo_id, _) in enumerate(case_data):
if mask[sim_id]:
run_arg = self._run_context[sim_id]
run_arg.geo_id = geo_id
self._ert.getEnkfSimulationRunner().createRunPath(self._run_context)
EnkfSimulationRunner.runWorkflows(HookRuntime.PRE_SIMULATION, self._ert)
self._sim_thread = self._run_simulations_simple_step()
# Wait until the queue is active before we finish the creation
# to ensure sane job status while running
while self.isRunning() and not self._queue_manager.isRunning():
sleep(0.1)
def get_run_args(self, iens):
"""
raises an exception if no iens simulation found
:param iens: realization number
:return: run_args for the realization
"""
for run_arg in self._run_context:
if run_arg is not None and run_arg.iens == iens:
return run_arg
raise KeyError("No such simulation: %s" % iens)
def _run_simulations_simple_step(self):
sim_thread = Thread(
target=lambda: self._ert.getEnkfSimulationRunner().runSimpleStep(
self._queue_manager.queue, self._run_context
)
)
sim_thread.start()
return sim_thread
def __len__(self):
return self._mask.count()
def isRunning(self):
# TODO: Should separate between running jobs and having loaded all data
return self._si | m_thread.is_alive() or self._queue_manager.isRunning()
def getNumPending(self):
return self._queue_manager.getNumPending()
def getNumRunning(self):
return self._queue_manager.getNumRunning()
def getNumSuccess(self):
return self._queue_manager.getNumSuccess()
def getNumFailed(self):
return self._queue_manager.get | NumFailed()
def getNumWaiting(self):
return self._queue_manager.getNumWaiting()
def didRealizationSucceed(self, iens):
queue_index = self.get_run_args(iens).getQueueIndex()
return self._queue_manager.didJobSucceed(queue_index)
def didRealizationFail(self, iens):
# For the purposes of this class, a failure should be anything (killed job, etc) that is not an explicit success.
return not self.didRealizationSucceed(iens)
def isRealizationQueued(self, iens):
# an exception will be raised if it's not queued
self.get_run_args(iens)
return True
def isRealizationFinished(self, iens):
run_arg = self.get_run_args(iens)
if run_arg.isSubmitted():
queue_index = run_arg.getQueueIndex()
return self._queue_manager.isJobComplete(queue_index)
else:
return False
def __repr__(self):
running = "running" if self.isRunning() else "not running"
numRunn = self.getNumRunning()
numSucc = self.getNumSuccess()
numFail = self.getNumFailed()
numWait = self.getNumWaiting()
fmt = "%s, #running = %d, #success = %d, #failed = %d, #waiting = %d"
fmt = fmt % (running, numRunn, numSucc, numFail, numWait)
return "SimulationContext(%s)" % fmt
def get_sim_fs(self):
return self._run_context.get_sim_fs()
def get_run_context(self):
return self._run_context
def stop(self):
self._queue_manager.stop_queue()
self._sim_thread.join()
def job_progress(self, iens):
"""Will return a detailed progress of the job.
The progress report is obtained by reading a file from the filesystem,
that file is typically created by another process running on another
machine, and reading might fail due to NFS issues, simultanoues write
and so on. If loading valid json fails the function will sleep 0.10
seconds and retry - eventually giving up and returning None. Also for
jobs which have not yet started the method will return None.
When the method succeeds in reading the progress file from the file
system the return value will be an object with properties like this:|
progress.start_time
progress.end_time
progress.run_id
progress.jobs =[ (job1.name, job1.start_time, job1.end_time, job1.status, job1.error_msg),
(job2.name, job2.start_time, job2.end_time, job2.status, job2.error_msg),
....
(jobN.name, jobN.start_time, jobN.end_time, jobN.status, jobN.error_msg) ]
"""
run_arg = self.get_run_args(iens)
try:
# will throw if not yet submitted (is in a limbo state)
queue_index = run_arg.getQueueIndex()
except ValueError:
return None
if self._queue_manager.isJobWaiting(queue_index):
return None
return ForwardModelStatus.load(run_arg.runpath)
def run_path(self, iens):
"""
Will return the path to the simulation.
"""
return self.get_run_args(iens).runpath
def job_status(self, iens):
"""Will query the queue system for the status of the job."""
run_arg = self.get_run_args(iens)
try:
queue_index = run_arg.getQueueIndex()
except ValueError:
return None
return self._queue_manager.getJobStatus(queue_index)
|
dims/nova | nova/cmd/api_metadata.py | Python | apache-2.0 | 1,706 | 0 | # Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License | .
"""Starter script for Nova Metadata API."""
import sys
from oslo_log import log as logging
from oslo_reports import guru_meditation_report as gmr
from nova.conducto | r import rpcapi as conductor_rpcapi
import nova.conf
from nova import config
from nova import objects
from nova.objects import base as objects_base
from nova import service
from nova import utils
from nova import version
CONF = nova.conf.CONF
CONF.import_opt('enabled_ssl_apis', 'nova.service')
def main():
config.parse_args(sys.argv)
logging.setup(CONF, "nova")
utils.monkey_patch()
objects.register_all()
gmr.TextGuruMeditation.setup_autorun(version)
if not CONF.conductor.use_local:
objects_base.NovaObject.indirection_api = \
conductor_rpcapi.ConductorAPI()
should_use_ssl = 'metadata' in CONF.enabled_ssl_apis
server = service.WSGIService('metadata', use_ssl=should_use_ssl)
service.serve(server, workers=server.workers)
service.wait()
|
jbaber/bup | lib/bup/hashsplit.py | Python | lgpl-2.1 | 7,757 | 0.002578 | import math, os
from bup import _helpers, helpers
from bup.helpers import sc_page_size
_fmincore = getattr(helpers, 'fmincore', None)
BLOB_MAX = 8192*4 # 8192 is the "typical" blob size for bupsplit
BLOB_READ_SIZE = 1024*1024
MAX_PER_TREE = 256
progress_callback = None
fanout = 16
GIT_MODE_FILE = 0100644
GIT_MODE_TREE = 040000
GIT_MODE_SYMLINK = 0120000
assert(GIT_MODE_TREE != 40000) # 0xxx should be treated as octal
# The purpose of this type of buffer is to avoid copying on peek(), get(),
# and eat(). We do copy the buffer contents on put(), but that should
# be ok if we always only put() large amounts of data at a time.
class Buf:
def __init__(self):
self.data = ''
self.start = 0
def put(self, s):
if s:
self.data = buffer(self.data, self.start) + s
self.start = 0
def peek(self, count):
return buffer(self.data, self.start, count)
def eat(self, count):
self.start += count
def get(self, count):
v = buffer(self.data, self.start, count)
self.start += count
return v
def used(self):
return len(self.data) - self.start
def _fadvise_pages_done(fd, first_page, count):
assert(first_page >= 0)
assert(count >= 0)
if count > 0:
_helpers.fadvise_done(fd,
first_page * sc_page_size,
count * sc_page_size)
def _nonresident_page_regions(status_bytes, max_region_len=None):
"""Return (start_page, count) pairs in ascending start_page order for
each contiguous region of nonresident pages indicated by the
mincore() status_bytes. Limit the number of pages in each region
to max_region_len."""
assert(max_region_len is None or max_region_len > 0)
start = None
for i, x in enumerate(status_bytes):
in_core = x & helpers.MINCORE_INCORE
if start is None:
if not in_core:
| start = i
else:
count = i - start
if in_core:
yield (start, count)
start = None
elif max_region_len and count >= max_region | _len:
yield (start, count)
start = i
if start is not None:
yield (start, len(status_bytes) - start)
def _uncache_ours_upto(fd, offset, first_region, remaining_regions):
"""Uncache the pages of fd indicated by first_region and
remaining_regions that are before offset, where each region is a
(start_page, count) pair. The final region must have a start_page
of None."""
rstart, rlen = first_region
while rstart is not None and (rstart + rlen) * sc_page_size <= offset:
_fadvise_pages_done(fd, rstart, rlen)
rstart, rlen = next(remaining_regions, (None, None))
return (rstart, rlen)
def readfile_iter(files, progress=None):
for filenum,f in enumerate(files):
ofs = 0
b = ''
fd = rpr = rstart = rlen = None
if _fmincore and hasattr(f, 'fileno'):
fd = f.fileno()
max_chunk = max(1, (8 * 1024 * 1024) / sc_page_size)
rpr = _nonresident_page_regions(_fmincore(fd), max_chunk)
rstart, rlen = next(rpr, (None, None))
while 1:
if progress:
progress(filenum, len(b))
b = f.read(BLOB_READ_SIZE)
ofs += len(b)
if rpr:
rstart, rlen = _uncache_ours_upto(fd, ofs, (rstart, rlen), rpr)
if not b:
break
yield b
if rpr:
rstart, rlen = _uncache_ours_upto(fd, ofs, (rstart, rlen), rpr)
def _splitbuf(buf, basebits, fanbits):
while 1:
b = buf.peek(buf.used())
(ofs, bits) = _helpers.splitbuf(b)
if ofs:
if ofs > BLOB_MAX:
ofs = BLOB_MAX
level = 0
else:
level = (bits-basebits)//fanbits # integer division
buf.eat(ofs)
yield buffer(b, 0, ofs), level
else:
break
while buf.used() >= BLOB_MAX:
# limit max blob size
yield buf.get(BLOB_MAX), 0
def _hashsplit_iter(files, progress):
assert(BLOB_READ_SIZE > BLOB_MAX)
basebits = _helpers.blobbits()
fanbits = int(math.log(fanout or 128, 2))
buf = Buf()
for inblock in readfile_iter(files, progress):
buf.put(inblock)
for buf_and_level in _splitbuf(buf, basebits, fanbits):
yield buf_and_level
if buf.used():
yield buf.get(buf.used()), 0
def _hashsplit_iter_keep_boundaries(files, progress):
for real_filenum,f in enumerate(files):
if progress:
def prog(filenum, nbytes):
# the inner _hashsplit_iter doesn't know the real file count,
# so we'll replace it here.
return progress(real_filenum, nbytes)
else:
prog = None
for buf_and_level in _hashsplit_iter([f], progress=prog):
yield buf_and_level
def hashsplit_iter(files, keep_boundaries, progress):
if keep_boundaries:
return _hashsplit_iter_keep_boundaries(files, progress)
else:
return _hashsplit_iter(files, progress)
total_split = 0
def split_to_blobs(makeblob, files, keep_boundaries, progress):
global total_split
for (blob, level) in hashsplit_iter(files, keep_boundaries, progress):
sha = makeblob(blob)
total_split += len(blob)
if progress_callback:
progress_callback(len(blob))
yield (sha, len(blob), level)
def _make_shalist(l):
ofs = 0
l = list(l)
total = sum(size for mode,sha,size, in l)
vlen = len('%x' % total)
shalist = []
for (mode, sha, size) in l:
shalist.append((mode, '%0*x' % (vlen,ofs), sha))
ofs += size
assert(ofs == total)
return (shalist, total)
def _squish(maketree, stacks, n):
i = 0
while i < n or len(stacks[i]) >= MAX_PER_TREE:
while len(stacks) <= i+1:
stacks.append([])
if len(stacks[i]) == 1:
stacks[i+1] += stacks[i]
elif stacks[i]:
(shalist, size) = _make_shalist(stacks[i])
tree = maketree(shalist)
stacks[i+1].append((GIT_MODE_TREE, tree, size))
stacks[i] = []
i += 1
def split_to_shalist(makeblob, maketree, files,
keep_boundaries, progress=None):
sl = split_to_blobs(makeblob, files, keep_boundaries, progress)
assert(fanout != 0)
if not fanout:
shal = []
for (sha,size,level) in sl:
shal.append((GIT_MODE_FILE, sha, size))
return _make_shalist(shal)[0]
else:
stacks = [[]]
for (sha,size,level) in sl:
stacks[0].append((GIT_MODE_FILE, sha, size))
_squish(maketree, stacks, level)
#log('stacks: %r\n' % [len(i) for i in stacks])
_squish(maketree, stacks, len(stacks)-1)
#log('stacks: %r\n' % [len(i) for i in stacks])
return _make_shalist(stacks[-1])[0]
def split_to_blob_or_tree(makeblob, maketree, files,
keep_boundaries, progress=None):
shalist = list(split_to_shalist(makeblob, maketree,
files, keep_boundaries, progress))
if len(shalist) == 1:
return (shalist[0][0], shalist[0][2])
elif len(shalist) == 0:
return (GIT_MODE_FILE, makeblob(''))
else:
return (GIT_MODE_TREE, maketree(shalist))
def open_noatime(name):
fd = _helpers.open_noatime(name)
try:
return os.fdopen(fd, 'rb', 1024*1024)
except:
try:
os.close(fd)
except:
pass
raise
|
Feawel/MachineLearningProject | demo_good.py | Python | mit | 3,204 | 0.01623 | # -*- coding: utf-8 -*-
import collections, itertools
import nltk.classify.util, nltk.metrics
from nltk.classify import NaiveBayesClassifier
from nltk.corpus import movie_reviews, stopwords
from nltk.collocations import BigramCollocationFinder
from nltk.metrics import BigramAssocMeasures
from nltk.probability import FreqDist, ConditionalFreqDist
def evaluate_classifier(featx):
negids = movie_reviews.fileids('neg')
posids = movie_reviews.fileids('pos')
negfeats = [(featx(movie_reviews.words(fileids=[f])), 'neg') for f in negids]
posfeats = [(featx(movie_reviews.words(fileids=[f])), 'pos') for f in posids]
negcutoff = len(negfeats)*3/4
poscutoff = len(posfea | ts)*3/4
trainfeats = negfeats[:negcutoff] + posfeats[:poscutoff]
testfeats = negfeats[negcutoff:] + posfeats[poscutoff:]
classifier = NaiveBayesClassifier.train(trainfeats)
refsets = collections.defaultdict(set)
testsets = collections.defaultdict(set)
for i, (feats, label) in enumerate(testfeats):
refsets[label].add(i)
observed = classifier.classify(feats)
testsets[observed].add(i)
print 'accuracy:', nltk.classify.util.accuracy(classifier, testfeats)
print 'pos precision:', nltk.metrics. | precision(refsets['pos'], testsets['pos'])
print 'pos recall:', nltk.metrics.recall(refsets['pos'], testsets['pos'])
print 'neg precision:', nltk.metrics.precision(refsets['neg'], testsets['neg'])
print 'neg recall:', nltk.metrics.recall(refsets['neg'], testsets['neg'])
classifier.show_most_informative_features()
def word_feats(words):
return dict([(word, True) for word in words])
print 'evaluating single word features'
evaluate_classifier(word_feats)
word_fd = FreqDist()
label_word_fd = ConditionalFreqDist()
for word in movie_reviews.words(categories=['pos']):
word_fd.inc(word.lower())
label_word_fd['pos'].inc(word.lower())
for word in movie_reviews.words(categories=['neg']):
word_fd.inc(word.lower())
label_word_fd['neg'].inc(word.lower())
# n_ii = label_word_fd[label][word]
# n_ix = word_fd[word]
# n_xi = label_word_fd[label].N()
# n_xx = label_word_fd.N()
pos_word_count = label_word_fd['pos'].N()
neg_word_count = label_word_fd['neg'].N()
total_word_count = pos_word_count + neg_word_count
word_scores = {}
for word, freq in word_fd.iteritems():
pos_score = BigramAssocMeasures.chi_sq(label_word_fd['pos'][word],
(freq, pos_word_count), total_word_count)
neg_score = BigramAssocMeasures.chi_sq(label_word_fd['neg'][word],
(freq, neg_word_count), total_word_count)
word_scores[word] = pos_score + neg_score
best = sorted(word_scores.iteritems(), key=lambda (w,s): s, reverse=True)[:10000]
bestwords = set([w for w, s in best])
def best_word_feats(words):
return dict([(word, True) for word in words if word in bestwords])
print 'evaluating best word features'
evaluate_classifier(best_word_feats)
def best_bigram_word_feats(words, score_fn=BigramAssocMeasures.chi_sq, n=200):
bigram_finder = BigramCollocationFinder.from_words(words)
bigrams = bigram_finder.nbest(score_fn, n)
d = dict([(bigram, True) for bigram in bigrams])
d.update(best_word_feats(words))
return d
print 'evaluating best words + bigram chi_sq word features'
evaluate_classifier(best_bigram_word_feats) |
meissnert/StarCluster-Plugins | Pysam_0_8_4.py | Python | mit | 905 | 0.022099 | from starcluster.clustersetup import ClusterSetup
from starcluster.logger import log
class PysamInstaller(ClusterSetup):
def run(self, nodes, master, user, user_shell, volumes):
for node in nodes:
log.info("Installing PySam 0.8.4 on %s" % (node.alias))
node.ssh.execute('mkdir -p /opt/software/pysam')
node.ssh.execute('pip install --target=d:\/opt/software/pysam pysam') #https://github.com/pysam-developers/pysam/archive | /v0.8.4.tar.gz')
node.ssh.execute('mkdir -p /usr/local/Modules/applications/pysam/;touch /usr/local/Modules/applications/pysam/0.8.4')
node.ssh.execute('echo "#%Module" >> /usr/local/Modules/applications/pysam/0.8.4')
node.ssh.execute('echo "set root /opt/software/pysam/pysam-0.8.4" >> /usr/local/Modules/applica | tions/pysam/0.8.4')
node.ssh.execute('echo -e "prepend-path\tPATH\t\$root" >> /usr/local/Modules/applications/pysam/0.8.4')
|
drzax/databot | app/config.py | Python | mit | 1,253 | 0.003192 | """ file: config.py (syns)
author: Jess Robertson
description: Config file for running Flask app, lifted and lightly modified from
Miguel's 'Flask Web Development' book.
"""
import os
BASEDIR = os.path.abspath(os.path.dirname(__file__))
class Config:
""" Base configuration class
"""
SECRET_KEY = os.environ.get('SECRET_KEY') \
or 'something great goes in here'
# Key for storieswithdata
SLACK_API_KEY = "8s0OQPVpz1I85Km5rRtNI0ff"
# Redis stuff
MEASUREMENT_LIST_KEY_PREFIX | = 'measurements'
MEASUREMENT_LIST_MAX_LENGTH = 100
@staticmethod
def init_app(app):
pass
class DevelopmentConfig(Config):
""" Configuration for development environment
"""
DEBUG = True
REDIS_URL = "redis://localhost:6379/0"
USE_UUID_NAMESPACE = False
class TestingConfig(Config):
""" Configuration for testing
"""
TE | STING = True
REDIS_URL = "redis://localhost:6379/0"
class ProductionConfig(Config):
""" Configuration for production
"""
REDIS_URL = "redis://localhost:6379/0"
config = {
'development': DevelopmentConfig,
'testing': TestingConfig,
'production': ProductionConfig,
'default': DevelopmentConfig
} |
android-ia/platform_external_chromium_org | tools/telemetry/telemetry/core/platform/android_device.py | Python | bsd-3-clause | 2,217 | 0.004511 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
from telemetry.core import util
from telemetry.core.backends import adb_commands
from telemetry.core.platform import device
from telemetry.core.platform.profiler import monsoon
class AndroidDevice(device.Device):
""" Class rep | resents information for connecting to an android device.
|
Attributes:
device_id: the device's serial string created by adb to uniquely
identify an emulator/device instance. This string can be found by running
'adb devices' command
enable_performance_mode: when this is set to True, android platform will be
set to high performance mode after browser is started.
"""
def __init__(self, device_id, enable_performance_mode=False):
super(AndroidDevice, self).__init__(
name='Android device %s' % device_id, guid=device_id)
self._device_id = device_id
self._enable_performance_mode = enable_performance_mode
@classmethod
def GetAllConnectedDevices(cls):
device_serials = adb_commands.GetAttachedDevices()
# The monsoon provides power for the device, so for devices with no
# real battery, we need to turn them on after the monsoon enables voltage
# output to the device.
if not device_serials:
try:
m = monsoon.Monsoon(wait=False)
m.SetUsbPassthrough(1)
m.SetVoltage(3.8)
m.SetMaxCurrent(8)
logging.warn("""
Monsoon power monitor detected, but no Android devices.
The Monsoon's power output has been enabled. Please now ensure that:
1. The Monsoon's front and back USB are connected to the host.
2. The device is connected to the Monsoon's main and USB channels.
3. The device is turned on.
Waiting for device...
""")
util.WaitFor(adb_commands.GetAttachedDevices, 600)
device_serials = adb_commands.GetAttachedDevices()
except IOError:
return []
return [AndroidDevice(s) for s in device_serials]
@property
def device_id(self):
return self._device_id
@property
def enable_performance_mode(self):
return self._enable_performance_mode
|
ddcatgg/dglib | dglib/qt/qt_utils2.py | Python | mit | 27,371 | 0.028785 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import os
import re
import time
import threading
import win32process
import win32con
import functools
from PyQt4 import QtGui, QtCore
import sip
UM_SHOW = win32con.WM_USER + 100
__all__ = ['UM_SHOW', 'TrayMixIn', 'QssMixIn', 'EmitCallMixIn', 'EmitCallDecorator',
'ThreadingInvokeStubInMainThread', 'threadingInvokeStubInMainThread', 'callFromThread',
'callFromThread_wrap', 'RowHeightItemDelegateMixIn', 'HighlightFixItemDelegateMixIn',
'MixedItemDelegate', 'QssMsgBox', 'QssInputBox', 'AnimationImgMixIn', 'QClickableLabel',
'QDoubleClickableLabel', 'GET_X_LPARAM', 'GET_Y_LPARAM', 'check_resize_pos', 'msgbox',
'inputbox', 'resolve_xp_font_problem', 'center_window', 'move_center',
'move_rightbottom', 'center_to', 'change_widget_class', 'gbk', 'utf8', 'uni', 'uni8',
'loadqss']
class TrayMixIn(object):
def __init__(self, app, title, style=1):
self.app = app
self.title = title
self.style = style # 0=使用最小化菜单项风格,1=使用灰字标题菜单项风格
# 初始化托盘图标
self.create_trayactions()
self.create_trayicon()
app.installEventFilter(self)
def center_window(self, width, height):
center_window(self, width, height)
def create_trayactions(self):
self.actTitle = QtGui.QAction(self.title, self)
self.actTitle.setEnabled(False)
self.actMinimize = QtGui.QAction('最小化', self)
self.connect(self.actMinimize, QtCore.SIGNAL('triggered()'), QtCore.SLOT('hide()'))
self.actRestore = QtGui.QAction('显示窗体', self)
self.connect(self.actRestore, QtCore.SIGNAL('triggered()'), QtCore.SLOT('show()'))
self.actQuit = QtGui.QAction('退出', self)
self.connect(self.actQuit, QtCore.SIGNAL('triggered()'), self.on_actQuit_triggered)
def create_trayicon(self):
self.mnuTray = QtGui.QMenu()
self.mnuTray.setStyleSheet('font: 9pt "宋体";')
if self.style == 0:
self.mnuTray.addAction(self.actMinimize)
self.mnuTray.addAction(self.actRestore)
else:
self.mnuTray.addAction(self.actTitle)
self.mnuTray.addSeparator()
self.mnuTray.addAction(self.actQuit)
self.trayIcon = QtGui.QSystemTrayIcon(self)
self.trayIcon.setContextMenu(self.mnuTray)
self.connect(self.trayIcon, QtCore.SIGNAL('activated(QSystemTrayIcon::ActivationReason)'),
self.trayIconActivated)
self.icon = QtGui.QIcon(':/images/images/icon.png')
self.trayIcon.setIcon(self.icon)
self.trayIcon.setToolTip(self.windowTitle())
self.trayIcon.show()
self.setWindowIcon(self.icon)
def enable_trayicon(self, enable=True):
'''
可禁用右键菜单防止用户重复点击
@param enable: 是否启用
'''
self.trayIcon.setContextMenu(self.mnuTray if enable else None)
def trayIconActivated(self, reason):
'''
当用户点击右下角托盘图标时调用
@param reason: 鼠标事件(双击或是单击等)
'''
if reason == QtGui.QSystemTrayIcon.DoubleClick:
state = int(self.windowState())
if state & QtCore.Qt.WindowMaximized:
self.showMaximized()
elif state & QtCore.Qt.WindowFullScreen:
self.showFullScreen()
else:
self.showNormal()
self.raise_()
self.activateWindow()
@QtCore.pyqtSignature('')
def on_actQuit_triggered(self):
'''
菜单:退出
'''
if msgbox(self, '真的要退出吗?', title=self.title, question=True):
self.quit()
def closeEvent(self, event):
'''
点右上角关闭时最小化到托盘
'''
event.ignore()
self.hide()
def eventFilter(self, target, event):
if event.type() == QtCore.QEvent.WindowStateChange and self.isMinimized():
# 设置隐藏
QtCore.QTimer.singleShot(0, self, QtCore.SLOT('hide()'))
return True
return self.eventFilter2(target, event)
def eventFilter2(self, target, event):
'''
派生类可以重载此函数
'''
return self.app.eventFilter(target, event)
def winEvent(self, msg):
if msg.message == UM_SHOW:
print 'UM_SHOW'
self.show()
self.raise_()
self.activateWindow()
return False, 0
def quit(self, force=False):
self.trayIcon.setToolTip('正在退出...')
self.hide()
self.trayIcon.hide()
if force:
win32process.ExitProcess(0) # 强制退出,避免退出较慢的问题。
self.app.exit()
class QssMixIn(object):
'''
这个类帮助在使用QSS美化界面的时候,自动处理标题栏最大化/最小化按钮事件和状态,支持和TrayMixIn一起使用。
标题栏必须是一个命名为fraTitleBar的QFrame,其中有4个按钮btHelp、btMin、btMax、btClose。
隐藏btMax可以禁止缩放/最大化窗口。
'''
def __init__(self, app, qss_file):
self.app = app
self.qss_file = qss_file
self.qss_enabled = True
self.qss_encoding = 'gbk'
# 初始化标题栏按钮提示
self.btHelp.setToolTip('帮助')
self.btMin.setToolTip('最小化')
self.btMax.setToolTip('最大化')
self.btClose.setToolTip('关闭')
# 去除标题栏按钮的焦点
self.btHelp.setFocusPolicy(QtCore.Qt.NoFocus)
self.btMin.setFocusPolicy(QtCore.Qt.NoFocus)
self.btMax.setFocusPolicy(QtCore.Qt.NoFocus)
self.btClose.setFocusPolicy(QtCore.Qt.NoFocus)
# 挂接winEvent
self.prv_winEvent = getattr(self, 'winEvent', None)
self.winEvent = self.qss_winEvent
# 挂接eventFilter
self.prv_eventFilter = getattr(self, 'eventFilter', None)
self.eventFilter = self.qss_eventFilter
self.app.installEventFilter(self)
def qss_avalible(self, qss_file=''):
if not qss_file:
qss_file = self.qss_file
return os.path.isfile(qss_file)
def qss_apply_style(self, qss_file=''):
if not qss_file:
qss_file = self.qss_file
if self.qss_avalible(qss_file):
# 设置窗体为无标题栏无边框样式
if self.windowFlags() & QtCore.Qt.FramelessWindowHint != QtCore.Qt.FramelessWindowHint:
self.setWindowFlags(QtCore.Qt.FramelessWindowHint)
QtGui.qApp.setStyleSheet(unicode(open(qss_file).read(), self.qss_encoding))
@QtCore.pyqtSignature('')
def on_btHelp_clicked(self):
self.qss_apply_style()
@QtCore.pyqtSignature('')
def on_btMin_clicked(self):
self.close()
@QtCore.pyqtSignature('')
def on_btMax_clicked(self):
if self.btMax.objectName() == 'btMax':
self.showMaximized()
else:
self.showNormal()
@QtCore.pyqtSignature('')
def on_btClose_clicked(self):
self.close()
def qss_allow_maximize(self):
return self.btMax.isVisible()
def qss_winEvent(self, msg):
if self.qss_enabled:
if msg.message == win32con.WM_NCHITTEST:
x | = GET_X_LPARAM(msg.lParam) - self.frameGeometry().x()
y = GET_Y_LPARAM(msg.lParam) - self.frameGeometry().y()
# 判断是否RESIZE区域
is_rszpos = check_resize_pos(self, x, y)
if is_rszpos and self.qss_allow_maximize() and not self.isMaximized():
return True, is_rszpos
# 标题栏区域为扩展后的fraTop区域(包括窗口的2个像素边框)
rect = self.fraTitleBar.geometry()
rect.setTop(0)
rect.setLef | t(0)
rect.setWidth(self.width())
# 判断标题栏区域时排除标题栏的按钮
if rect.contains(x, y) and \
not isinstance(self.childAt(x, y), QtGui.QPushButton):
# 如果窗口已经最大化,为避免被拖动,必须返回不在非客户区。
# 同时为了实现双击标题栏还原,需要处理 WM_LBUTTONDBLCLK。
if not self.isMaximized():
return True, win32con.HTCAPTION
elif msg.message == win32con.WM_NCLBUTTONDBLCLK:
if self.qss_allow_maximize():
# 最大化 <-> 还原
if self.isMaximized():
self.showNormal()
else:
x = GET_X_LPARAM(msg.lParam) - self.frameGeometry().x()
y = GET_Y_LPARAM(msg.lParam) - self.frameGeometry().y()
# 判断是否RESIZE区域
is_rszpos = check_resize_pos(self, x, y)
if not is_rszpos:
self.showMaximized()
return True, 0
# 实现窗口最大化后可双击标题栏还原
elif msg.message == win32con.WM_LBUTTONDBLCLK:
if self.qss_allow_maximize():
x = GET_X_LPARAM(msg.lParam) - self.frameGeometry().x()
y = GET_Y_LPARAM(msg.lParam) - self.frameGeometry().y()
if self.isMaximized():
# 标题栏区域为扩展后的fraTop区域(包括窗口的2个像素边框)
rect = self.fraTitleBar.geometry()
rect.setTop(0)
rect.setLeft(0)
rect.setWidth(self.width())
# 判断标题栏区域时排除标题栏的按钮
if rect.contains(x, y) and \
not isinstance(self.childAt(x, y), QtGui.QPushButton):
self.showNormal()
return True, 0
if self.prv_winEvent:
return self.prv_winEvent(msg)
return False, 0
def qss_eventFilter(self, target, event):
if self.qss_enabled:
if event.type() == QtCore.QEvent.WindowStateChange:
if self.isMaximized():
# 修正最大化后按钮状态仍然是hover的问题
ev = QtGu |
RobertoMaurizzi/waliki | waliki/management/commands/moin_migration_cleanup.py | Python | bsd-3-clause | 6,418 | 0.00187 | import re
from waliki.signals import page_saved
from optparse import make_option
from django.core.management.base import BaseCommand, CommandError
from waliki.models import Page
from django.utils.translation import ugettext_lazy as _
from django.utils.text import get_text_list
try:
from waliki.attachments.models import Attachment
except ImportError:
Attachment = None
try:
from sh import pandoc, echo
pandoc = pandoc.bake(_tty_out=False)
echo = echo.bake(_tty_out=False)
except ImportError:
pandoc = None
def clean_meta(rst_content):
"""remove moinmoin metada from the top of the file"""
rst = rst_content.split('\n')
for i, line in enumerate(rst):
if line.startswith('#'):
continue
break
return '\n'.join(rst[i:])
def delete_relative_links(rst_content):
"""remove links relatives. Waliki point them correctly implicitly"""
return re.sub(r'^(\.\. .*: \.\./.*)\n$', '', rst_content, flags=re.MULTILINE)
def attachments(rst_content, slug):
def rep(matchobj):
for filename in matchobj.groups(1):
try:
a = Attachment.objects.filter(file__endswith=filename, page__slug=slug)[0]
except IndexError:
print('Cant find %s in %s' % (filename, slug))
return None
return '`%s <%s>`_' % (filename, a.get_absolute_url())
return re.sub(r'`attachment:(.*)`_', rep, rst_content, flags=re.MULTILINE)
def directives(rst_content):
for directive in re.findall(r':(\w+):`.*`', rst_content, flags=re.MULTILINE):
rst_content += """
.. role:: {directive}
:class: {directive}
""".format(directive=directive)
return rst_content
def emojis(rst_content):
# require
emojis_map = {
':)': 'smile',
':-)': 'smile',
';)': 'wink',
';-)': 'wink',
':-?': 'smirk',
':?': 'smirk',
':(': 'confused',
':-(': 'confused',
':D': 'laughing',
':-D': 'laughing',
':-P': 'stuck_out_tongue_closed_eyes',
':P': 'stuck_out_tongue_closed_eyes',
":'(": 'cry',
":'-(": 'cry',
}
def replace_emoji(match):
replacement = emojis_map.get(match.groups()[0], '')
if replacement:
return '|%s|' % replacement
return ''
result = re.sub(r'\|((?:\:|;).{1,3})\|', replace_emoji, rst_content, flags=re.MULTILINE)
return result
def email(rst_content):
pattern = r'`\[\[MailTo\((.*)\)\]\]`_(?:\.\.)?'
return re.sub(pattern, r'``\1``', rst_content)
def title_level(rst_content):
def dashrepl(matchobj):
return '-' * len(matchobj.group(0))
pattern = r'^~+$'
return re.sub(pattern, dashrepl, rst_content, flags=re.MULTILINE)
def code(rst_content):
if not pandoc:
return rst_content
pattern = r'^\:\:\n\s+\.\. raw:: html\n\s+(<span class\=\"line\"\>.*?|\s+?<\/span\>)\n\s*$'
def convert(match):
source = match.groups()[0]
source = '\n'.join(l.strip() for l in source.split('\n'))
source = "<pre>%s</pre>" % source
rst_source = pandoc(echo(source), f='html', t='rst').stdout.decode('utf8')
# rst_source = rst_source.strip().replace('\n', '\n ') + '\n'
return rst_source
result = re.sub(pattern, convert, rst_content, flags=re.DOTALL | re.MULTILINE)
return result
class Command(BaseCommand):
help = 'Cleanup filters for a moin2git import'
option_list = (
make_option('--limit-to',
dest='slug',
default='',
help="optional namespace"),
make_option('--filters',
dest='filters',
default='all',
help="comma separated list of filter functions to apply"),
make_option('--message',
dest='message',
default=_("RestructuredText clean up"),
help="log message"),
) + BaseCommand.option_list
def handle(self, *args, **options):
valid_filters = ['meta', 'links',
'attachments', 'directives',
'emojis', 'title', 'email', 'code', 'title_level']
slug = options['slug']
filters = options['filters']
if filters == 'all':
filters = valid_filters
else:
filters = [f.strip() for f in filters.split(',')]
if not set(filters).issubset(valid_filters):
valid = get_text_list(valid_filters, 'and')
raise CommandError("At least one filter is unknown. Valid filters are:\n %s" % valid)
if slug:
pages = Page.objects.filter(slug__startswith=slug)
else:
pages = Page.objects.all()
for page in pages:
title = None
print('\nApplying filter/s %s to %s' % (get_text_list(filters, 'and'), page.slug))
raw = page.raw
if 'meta' in filters:
raw = clean_meta(raw)
if 'links' in filters:
raw = delete_relative_links(raw)
if 'attachments' in filters:
raw = attachments(raw, page.slug)
if 'directives' in filters:
raw = directives(raw)
if 'emojis' in filters:
raw = emojis(raw)
if 'email' in fil | ters:
raw = email(raw)
if 'title | _level' in filters:
raw = title_level(raw)
if 'code' in filters:
if not pandoc:
print('The filter "code" need Pandoc installed in your system. Ignoring')
else:
raw = code(raw)
if 'title' in filters and not page.title:
title = page._get_part('get_document_title')
if raw != page.raw or title:
if title:
page.title = title
if raw != page.raw:
page.raw = raw
page.save()
page_saved.send_robust(sender='moin',
page=page,
author=None,
message=options['message'],
form_extra_data={})
else:
print('Nothing changed. Ignoring update')
|
rrwen/google_streetview | google_streetview/helpers.py | Python | mit | 2,418 | 0.016129 | # -*- coding: utf-8 -*-
from itertools import product
import requests
import shutil
def api_list(apiargs):
"""Google Street View Image API results.
Constructs a list of `Google Street View Image API queries <https://developers.google.com/maps/documentation/streetview/>`_
from a dictionary.
Args:
apiargs (listof dict):
| Dict containing `street view URL parameters <https://developers.google.com/maps/documentation/streetview/intro>`_.
Each parameter can have multiple values if separated by ``;``.
Returns:
A ``listof dict`` containing single query requests per dictionary for Google Street View Image API.
Examples:
::
# Import google | _streetview for the api and helper module
import google_streetview.api
import google_streetview.helpers
# Create a dictionary with multiple parameters separated by ;
apiargs = {
'location': '46.414382,10.013988;40.720032,-73.988354',
'size': '640x300;640x640',
'heading': '0;90;180;270',
'fov': '0;90;120',
'pitch': '-90;0;90'
}
# Get a list of all possible queries from multiple parameters
api_list = google_streetview.helpers.api_list(apiargs)
# Create a results object for all possible queries
results = google_streetview.api.results(api_list)
# Preview results
results.preview()
# Download images to directory 'downloads'
results.download_links('downloads')
# Save metadata
results.save_metadata('metadata.json')
"""
# (api_query) Query combinations for each parameter
api_queries = {}
keywords = [k for k in apiargs]
for k in keywords:
if k in apiargs:
api_queries[k] = apiargs[k].split(';')
apiargs.pop(k, None)
# (api_list) Build list of api requests based on query combinations
out = []
keys = [k for k in api_queries]
queries = [api_queries[k] for k in api_queries]
combinations = product(*queries)
for combo in combinations:
api_copy = apiargs.copy()
for k, parameter in zip(keys, combo):
api_copy[k] = parameter
out.append(api_copy)
return(out)
def download(url, file_path):
r = requests.get(url, stream=True)
if r.status_code == 200: # if request is successful
with open(file_path, 'wb') as f:
r.raw.decode_content = True
shutil.copyfileobj(r.raw, f)
|
ArchiFleKs/magnum | magnum/conf/octavia.py | Python | apache-2.0 | 1,970 | 0 | # Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
from oslo_config import cfg
from magnum.i18n import _
octavia_group = cfg.OptGroup(name='octavia_client',
title='Options for the Octavia client')
octavia_client_opts = [
cfg.StrOpt('region_nam | e',
help=_('Region in | Identity service catalog to use for '
'communication with the OpenStack service.')),
cfg.StrOpt('endpoint_type',
default='publicURL',
help=_('Type of endpoint in Identity service catalog to use '
'for communication with the OpenStack service.'))]
common_security_opts = [
cfg.StrOpt('ca_file',
help=_('Optional CA cert file to use in SSL connections.')),
cfg.StrOpt('cert_file',
help=_('Optional PEM-formatted certificate chain file.')),
cfg.StrOpt('key_file',
help=_('Optional PEM-formatted file that contains the '
'private key.')),
cfg.BoolOpt('insecure',
default=False,
help=_("If set, then the server's certificate will not "
"be verified."))]
ALL_OPTS = list(itertools.chain(
octavia_client_opts,
common_security_opts
))
def register_opts(conf):
conf.register_group(octavia_group)
conf.register_opts(ALL_OPTS, group=octavia_group)
def list_opts():
return {
octavia_group: ALL_OPTS
}
|
mdwint/hype | hype/cloud/amazon.py | Python | mit | 1,420 | 0.010563 | from hype.cloud.cloudprovider import CloudProvider
from boto import ec2
class AmazonEC2(CloudProvider):
def __init__(self, cfg):
CloudProvider.__in | it__(self, cfg)
self.conn = ec2.connect_to_region(str(self.cfg["region"]),
aws_access_key_id=str(self.cfg["access-key-id"]),
aws_secret_access_key=str(self.cfg["secret-access-key"]))
def __del__(self):
self.conn.close()
def get_started_no | des(self):
running = []
for i in self.pending:
try:
i.update()
except boto.exception.EC2ResponseError:
continue
for node in [i for i in self.pending if i.state == "running"]:
self.pending.remove(node)
running.append(node)
self.running.extend(running)
return [i.public_dns_name for i in running]
def start_nodes(self, n, bootstrap):
if not n: return []
reservation = self.conn.run_instances(self.cfg["image-id"],
min_count=n, max_count=n, key_name=self.cfg["key-pair"],
instance_type=self.cfg["instance-type"],
security_groups=[self.cfg["security-group"]],
user_data=bootstrap)
self.pending.extend(reservation.instances)
def stop_nodes(self, n):
m = min(n, len(self.running))
if not m: return []
stopped = self.running[-m:]
self.conn.terminate_instances([i.id for i in stopped])
for node in stopped:
self.running.remove(node)
return [i.public_dns_name for i in stopped]
|
ionomy/ion | test/functional/test_framework/__init__.py | Python | mit | 465 | 0.002151 | # Copyright (c) 2018 The Bitcoin Unlimited developers
# Copyright (c) 2020 The Ion Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_framework.ionlib i | mport init, bin2hex, signTxInput, randombytes, pubkey, spendscript, addrbin, txid, SIGHA | SH_ALL, SIGHASH_NONE, SIGHASH_SINGLE, SIGHASH_ANYONECANPAY, ScriptMachine, ScriptFlags, ScriptError, Error, ION
|
kiyukuta/chainer | chainer/functions/evaluation/r2_score.py | Python | mit | 2,473 | 0 | from chainer import cuda
from chainer import function
from chainer.utils import type_check
class R2_score(function.Function):
def __init__(self, sample_weight, multioutput):
if sample_weight is not None:
raise NotImplementedError()
if multioutput in ['uniform_average', 'raw_values']:
self.multioutput = multioutput
else:
raise ValueError("invalid multioutput argument")
def check_type_forward(self, in_types):
type_check.expect(in_types.size() == 2)
pred_type, true_type = in_types
type_check.expect(
pred_type.dtype.kind == 'f',
true_type.dtype.kind == 'f'
)
type_check.expect(
pred_type.shape == true_type.shape,
)
def forward(self, inputs):
xp = cuda.get_array_module(*inputs)
pred, true = inputs
SS_res = xp.sum((pred - true) ** 2, axis=0)
SS_tot = xp.sum((true - xp.mean(true, axis=0)) ** 2, axis=0)
ret = xp.where(SS_tot != 0, 1 - SS_res / SS_tot, 0.0)\
.astype(pred.dtype)
if self.multioutput == 'uniform_average':
return xp.asarray(ret.mean()),
elif self.multioutput == 'raw_values':
return ret,
def r2_score( | pred, true, sample_weight=None, multioutput='uniform_average'):
"""Computes R | ^2(coefficient of determination) regression score function.
Args:
pred(Variable): Variable holding a vector, matrix or tensor of
estimated target values.
true(Variable): Variable holding a vector, matrix or tensor of
correct target values.
sample_weight: This argument is for compatibility with scikit-learn's
implementation of r2_score. Current implementation admits None
only.
multioutput(string): ['uniform_average', 'raw_values']. if
'uniform_average', this function returns an average of R^2
score of multiple output. If 'raw_average', this function
return a set of R^2 score of multiple output.
Returns:
Variable: A Variable holding a scalar array of the R^2 score if
'multioutput' is 'uniform_average' or a vector of R^2 scores if
'multioutput' is 'raw_values'.
.. note:: This function is non-differentiable.
"""
return R2_score(sample_weight=sample_weight,
multioutput=multioutput)(pred, true)
|
DarkPurpleShadow/ConnectFour | urwid/font.py | Python | bsd-3-clause | 24,937 | 0.008535 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Urwid BigText fonts
# Copyright (C) 2004-2006 Ian Ward
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# Urwid web site: http://excess.org/urwid/
from urwid.escape import SAFE_ASCII_DEC_SPECIAL_RE
from urwid.util import apply_target_encoding, str_util
from urwid.canvas import TextCanvas
def separate_glyphs(gdata, height):
"""return (dictionary of glyphs, utf8 required)"""
gl = gdata.split("\n")
del gl[0]
del gl[-1]
for g in gl:
assert "\t" not in g
assert len(gl) == height+1, repr(gdata)
key_line = gl[0]
del gl[0]
c = None # current character
key_index = 0 # index into character key line
end_col = 0 # column position at end of glyph
start_col = 0 # column position at start of glyph
jl = [0]*height # indexes into lines of gdata (gl)
dout = {}
utf8_required = False
while True:
if c is None:
if key_index >= len(key_line):
break
c = key_line[key_index]
if key_index < len(key_line) and key_line[key_index] == c:
end_col += str_util.get_width(ord(c))
key_index += 1
continue
out = []
for k in range(height):
l = gl[k]
j = jl[k]
y = 0
fill = 0
while y < end_col - start_col:
if j >= len(l):
fill = end_col - start_col - y
break
y += str_util.get_width(ord(l[j]))
j += 1
assert y + fill == end_col - start_col, \
repr((y, fill, end_col))
segment = l[jl[k]:j]
if not SAFE_ASCII_DEC_SPECIAL_RE.match(segment):
| utf8_required = True
out.append(segment + " " * fill)
jl[k] = j
start_col = end_col
dout[c] = (y + fill, out)
c = None
return dout, utf8_required
_all_fonts = []
def get_all_fonts():
"""
Return a list of (font name, font class) tuples.
"""
return _all_fonts[:]
def add_font(name, cls):
_all_f | onts.append((name, cls))
class Font(object):
def __init__(self):
assert self.height
assert self.data
self.char = {}
self.canvas = {}
self.utf8_required = False
for gdata in self.data:
self.add_glyphs(gdata)
def add_glyphs(self, gdata):
d, utf8_required = separate_glyphs(gdata, self.height)
self.char.update(d)
self.utf8_required |= utf8_required
def characters(self):
l = list(self.char.keys())
l.sort()
return "".join(l)
def char_width(self, c):
if c in self.char:
return self.char[c][0]
return 0
def char_data(self, c):
return self.char[c][1]
def render(self, c):
if c in self.canvas:
return self.canvas[c]
width, l = self.char[c]
tl = []
csl = []
for d in l:
t, cs = apply_target_encoding(d)
tl.append(t)
csl.append(cs)
canv = TextCanvas(tl, None, csl, maxcol=width,
check_width=False)
self.canvas[c] = canv
return canv
#safe_palette = u"┘┐┌└┼─├┤┴┬│"
#more_palette = u"═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬○"
#block_palette = u"▄#█#▀#▌#▐#▖#▗#▘#▙#▚#▛#▜#▝#▞#▟"
class Thin3x3Font(Font):
height = 3
data = ["""
000111222333444555666777888999 !
┌─┐ ┐ ┌─┐┌─┐ ┐┌─ ┌─ ┌─┐┌─┐┌─┐ │
│ │ │ ┌─┘ ─┤└─┼└─┐├─┐ ┼├─┤└─┤ │
└─┘ ┴ └─ └─┘ ┴ ─┘└─┘ ┴└─┘ ─┘ .
""", r"""
"###$$$%%%'*++,--.///:;==???[[\\\]]^__`
" ┼┼┌┼┐O /' /.. _┌─┐┌ \ ┐^ `
┼┼└┼┐ / * ┼ ─ / ., _ ┌┘│ \ │
└┼┘/ O , ./ . └ \ ┘ ──
"""]
add_font("Thin 3x3",Thin3x3Font)
class Thin4x3Font(Font):
height = 3
data = Thin3x3Font.data + ["""
0000111122223333444455556666777788889999 ####$$$$
┌──┐ ┐ ┌──┐┌──┐ ┐┌── ┌── ┌──┐┌──┐┌──┐ ┼─┼┌┼┼┐
│ │ │ ┌──┘ ─┤└──┼└──┐├──┐ ┼├──┤└──┤ ┼─┼└┼┼┐
└──┘ ┴ └── └──┘ ┴ ──┘└──┘ ┴└──┘ ──┘ └┼┼┘
"""]
add_font("Thin 4x3",Thin4x3Font)
class HalfBlock5x4Font(Font):
height = 4
data = ["""
00000111112222233333444445555566666777778888899999 !!
▄▀▀▄ ▄█ ▄▀▀▄ ▄▀▀▄ ▄ █ █▀▀▀ ▄▀▀ ▀▀▀█ ▄▀▀▄ ▄▀▀▄ █
█ █ █ ▄▀ ▄▀ █▄▄█ █▄▄ █▄▄ ▐▌ ▀▄▄▀ ▀▄▄█ █
█ █ █ ▄▀ ▄ █ █ █ █ █ █ █ █ █ ▀
▀▀ ▀▀▀ ▀▀▀▀ ▀▀ ▀ ▀▀▀ ▀▀ ▀ ▀▀ ▀▀ ▀
""", '''
"""######$$$$$$%%%%%&&&&&((()))******++++++,,,-----..////:::;;
█▐▌ █ █ ▄▀█▀▄ ▐▌▐▌ ▄▀▄ █ █ ▄ ▄ ▄ ▐▌
▀█▀█▀ ▀▄█▄ █ ▀▄▀ ▐▌ ▐▌ ▄▄█▄▄ ▄▄█▄▄ ▄▄▄▄ █ ▀ ▀
▀█▀█▀ ▄ █ █ ▐▌▄ █ ▀▄▌▐▌ ▐▌ ▄▀▄ █ ▐▌ ▀ ▄▀
▀ ▀ ▀▀▀ ▀ ▀ ▀▀ ▀ ▀ ▄▀ ▀ ▀
''', r"""
<<<<<=====>>>>>?????@@@@@@[[[[\\\\]]]]^^^^____```{{{{||}}}}~~~~''´´´
▄▀ ▀▄ ▄▀▀▄ ▄▀▀▀▄ █▀▀ ▐▌ ▀▀█ ▄▀▄ ▀▄ ▄▀ █ ▀▄ ▄ █ ▄▀
▄▀ ▀▀▀▀ ▀▄ ▄▀ █ █▀█ █ █ █ ▄▀ █ ▀▄ ▐▐▌▌
▀▄ ▀▀▀▀ ▄▀ ▀ █ ▀▀▀ █ ▐▌ █ █ █ █ ▀
▀ ▀ ▀ ▀▀▀ ▀▀▀ ▀ ▀▀▀ ▀▀▀▀ ▀ ▀ ▀
""", '''
AAAAABBBBBCCCCCDDDDDEEEEEFFFFFGGGGGHHHHHIIJJJJJKKKKK
▄▀▀▄ █▀▀▄ ▄▀▀▄ █▀▀▄ █▀▀▀ █▀▀▀ ▄▀▀▄ █ █ █ █ █ █
█▄▄█ █▄▄▀ █ █ █ █▄▄ █▄▄ █ █▄▄█ █ █ █▄▀
█ █ █ █ █ ▄ █ █ █ █ █ ▀█ █ █ █ ▄ █ █ ▀▄
▀ ▀ ▀▀▀ ▀▀ ▀▀▀ ▀▀▀▀ ▀ ▀▀ ▀ ▀ ▀ ▀▀ ▀ ▀
''', '''
LLLLLMMMMMMNNNNNOOOOOPPPPPQQQQQRRRRRSSSSSTTTTT
█ █▄ ▄█ ██ █ ▄▀▀▄ █▀▀▄ ▄▀▀▄ █▀▀▄ ▄▀▀▄ ▀▀█▀▀
█ █ ▀ █ █▐▌█ █ █ █▄▄▀ █ █ █▄▄▀ ▀▄▄ █
█ █ █ █ ██ █ █ █ █ ▌█ █ █ ▄ █ █
▀▀▀▀ ▀ ▀ ▀ ▀ ▀▀ ▀ ▀▀▌ ▀ ▀ ▀▀ ▀
''', '''
UUUUUVVVVVVWWWWWWXXXXXXYYYYYYZZZZZ
█ █ █ █ █ █ █ █ █ █ ▀▀▀█
█ █ ▐▌ ▐▌ █ ▄ █ ▀▄▀ ▀▄▀ ▄▀
█ █ █ █ ▐▌█▐▌ ▄▀ ▀▄ █ █
▀▀ ▀ ▀ ▀ ▀ ▀ ▀ ▀▀▀▀
''', '''
aaaaabbbbbcccccdddddeeeeeffffggggghhhhhiijjjjkkkkk
█ █ ▄▀▀ █ ▄ ▄ █
▀▀▄ █▀▀▄ ▄▀▀▄ ▄▀▀█ ▄▀▀▄ ▀█▀ ▄▀▀▄ █▀▀▄ ▄ ▄ █ ▄▀
▄▀▀█ █ █ █ ▄ █ █ █▀▀ █ ▀▄▄█ █ █ █ █ █▀▄
▀▀▀ ▀▀▀ ▀▀ ▀▀▀ ▀▀ ▀ ▄▄▀ ▀ ▀ ▀ ▄▄▀ ▀ ▀
''', '''
llmmmmmmnnnnnooooopppppqqqqqrrrrssssstttt
█ █
█ █▀▄▀▄ █▀▀▄ ▄▀▀▄ █▀▀▄ ▄▀▀█ █▀▀ ▄▀▀▀ ▀█▀
█ █ █ █ █ █ █ █ █ █ █ █ █ ▀▀▄ █
▀ ▀ ▀ ▀ ▀ ▀▀ █▀▀ ▀▀█ ▀ ▀▀▀ ▀
''', '''
uuuuuvvvvvwwwwwwxxxxxxyyyyyzzzzz
█ █ █ █ █ ▄ █ ▀▄ ▄▀ █ █ ▀▀█▀
█ █ ▐▌▐▌ ▐▌█▐▌ ▄▀▄ ▀▄▄█ ▄▀
▀▀ ▀▀ ▀ ▀ ▀ ▀ ▄▄▀ ▀▀▀▀
''']
add_font("Half Block 5x4",HalfBlock5x4Font)
class HalfBlock6x5Font(Font):
height = 5
data = ["""
000000111111222222333333444444555555666666777777888888999999 ..::////
▄▀▀▀▄ ▄█ ▄▀▀▀▄ ▄▀▀▀▄ ▄ █ █▀▀▀▀ ▄▀▀▀ ▀▀▀▀█ ▄▀▀▀▄ ▄▀▀▀▄ █
█ █ █ █ █ █ █ █ █ ▐▌ █ █ █ █ ▀ ▐▌
█ █ █ ▄▀ ▀▀▄ ▀▀▀█▀ ▀▀▀▀▄ █▀▀▀▄ █ ▄▀▀▀▄ ▀▀▀█ ▄ █
█ █ █ ▄▀ ▄ █ █ █ █ █ ▐▌ █ █ █ ▐▌
▀▀▀ ▀▀▀ ▀▀▀▀▀ ▀▀▀ ▀ ▀▀▀▀ ▀▀▀ ▀ ▀▀▀ ▀▀▀ ▀ ▀
"""]
add_font("Half Block 6x5",HalfBlock6x5Font)
class HalfBlockHeavy6x5Font(Font):
height = 5
data = ["""
000000111111222222333333444444555555666666777777888888999999 ..::////
▄███▄ ▐█▌ ▄███▄ ▄███▄ █▌ █████ ▄███▄ █████ ▄███▄ ▄███▄ █▌
█▌ ▐█ ▀█▌ ▀ ▐█ ▀ ▐█ █▌ █▌ █▌ █▌ █▌ █▌ ▐█ █▌ ▐█ █▌ ▐█
█▌ ▐█ █▌ ▄█▀ ██▌ █████ ████▄ ████▄ ▐█ ▐███▌ ▀████ █▌
█▌ ▐█ |
carvalhomb/tsmells | guess/Tools/jythonc/PythonVisitor.py | Python | gpl-2.0 | 17,603 | 0.003522 | # Copyright (c) Corporation for National Research Initiatives
from org.python.parser import Visitor, SimpleNode
from org.python.parser.PythonGrammarTreeConstants import *
from org.python.parser import SimpleNode
from org.python.compiler import Future
comp_ops = {JJTLESS_CMP : 'lt',
JJTEQUAL_CMP : 'eq',
JJTGREATER_CMP : 'gt',
JJTGREATER_EQUAL_CMP: 'ge',
JJTLESS_EQUAL_CMP : 'le',
JJTNOTEQUAL_CMP : 'ne',
JJTIS_NOT_CMP : 'isnot',
JJTIS_CMP : 'is',
JJTIN_CMP : 'in',
JJTNOT_IN_CMP : 'notin',
JJTLEFTDIREDGE : 'lde',
JJTRIGHTDIREDGE : 'rde',
JJTBIDIREDGE : 'bde',
JJTLIKE : 'like'
}
def nodeToList(node, start=0):
nodes = []
for i in range(start, node.numChildren):
nodes.append(node.getChild(i))
return nodes
def nodeToStrings(node, start=0):
names = []
for i in range(start, node.numChildren):
names.append(node.getChild(i).getInfo())
return names
def getDocString(suite):
if suite.numChildren > 0:
n = suite.getChild(0)
if n.id == JJTEXPR_STMT and n.getChild(0).id == JJTSTRING:
return n.getChild(0).getInfo()
return None
class PythonVisitor(Visitor):
def __init__(self, walker):
# a SimpleCompiler
self.walker = walker
# The fast_locals arg is supplied when this method is used for
# names in local scope.
def getName(self, node, fast_locals=0):
if not node.id == JJTNAME:
return None
s = node.getInfo()
if fast_locals:
r | eturn s
if s[:2] = | = '__' and s[-2:] != '__' and self.walker.className:
s = "_%s%s" % (self.walker.className, s)
return s
def walk(self, node):
self.suite(node)
def startnode(self, node):
self.walker.setline(node.beginLine)
def suite(self, node):
return self.walker.suite(nodeToList(node))
file_input = suite
single_input = suite
eval_input = suite
def exec_stmt(self, node):
self.startnode(node)
code = node.getChild(0).visit(self)
globs = locs = None
if node.numChildren > 1:
globs = node.getChild(1).visit(self)
if node.numChildren > 2:
locs = node.getChild(2).visit(self)
return self.walker.exec_stmt(code, globs, locs)
def assert_stmt(self, node):
self.startnode(node)
test = node.getChild(0).visit(self)
if node.numChildren > 1:
message = node.getChild(1).visit(self)
else:
message = None
return self.walker.assert_stmt(test, message)
def pass_stmt(self, node):
self.startnode(node)
return self.walker.pass_stmt()
def break_stmt(self, node):
self.startnode(node)
return self.walker.break_stmt()
def continue_stmt(self, node):
self.startnode(node)
return self.walker.continue_stmt()
def return_stmt(self, node):
self.startnode(node)
if node.numChildren == 0:
return self.walker.return_stmt()
else:
return self.walker.return_stmt(node.getChild(0))
def global_stmt(self, node):
self.startnode(node)
return self.walker.global_stmt(nodeToStrings(node))
def raise_stmt(self, node):
self.startnode(node)
exc_type = exc_value = exc_traceback = None
n = node.numChildren
if n > 0:
exc_type = node.getChild(0).visit(self)
if n > 1:
exc_value = node.getChild(1).visit(self)
if n > 2:
exc_traceback = node.getChild(2).visit(self)
return self.walker.raise_stmt(exc_type, exc_value, exc_traceback)
def Import(self, node):
self.startnode(node)
names = self.import_as_name(node, 0)
return self.walker.import_stmt(names)
def ImportFrom(self, node):
Future.checkFromFuture(node) # future stmt support
self.startnode(node)
if node.numChildren > 1:
names = self.import_as_name(node, 1)
return self.walker.importfrom_stmt(node.getChild(0).visit(self),
names)
else:
return self.walker.importfrom_stmt(
node.getChild(0).visit(self), "*")
def import_as_name(self, node, startnode=0):
names = []
for i in range(startnode, node.numChildren):
n = node.getChild(i)
if n.id == JJTDOTTED_AS_NAME:
dotted = n.getChild(0).visit(self)
asname = n.getChild(1).getInfo()
elif n.id == JJTIMPORT_AS_NAME:
dotted = n.getChild(0).getInfo()
asname = n.getChild(1).getInfo()
elif n.id == JJTDOTTED_NAME:
dotted = n.visit(self)
asname = None
else:
dotted = n.getInfo()
asname = None
names.append((dotted, asname))
return names
def dotted_name(self, node):
return nodeToStrings(node)
def print_ext(self, node):
# There better be exactly one subnode
self.startnode(node)
return node.getChild(0)
def print_stmt(self, node):
self.startnode(node)
n = node.numChildren
rets = []
printext = 0
file = None
start = 0
nochildren = 0
# extended print?
if n > 0 and node.getChild(0).id == JJTPRINT_EXT:
printext = 1
file = self.print_ext(node.getChild(0))
start = 1
nochildren = 1
for i in range(start, n-1):
child = node.getChild(i)
if printext:
rets.append(self.walker.print_continued_to(file, child))
else:
rets.append(self.walker.print_continued(child))
if n == nochildren:
if printext:
rets.append(self.walker.print_line_to(file))
else:
rets.append(self.walker.print_line())
elif node.getChild(n-1).id != JJTCOMMA:
child = node.getChild(n-1)
if printext:
rets.append(self.walker.print_line_to(file, child))
else:
rets.append(self.walker.print_line(child))
return rets
def if_stmt(self, node):
self.startnode(node)
tests = []
else_body = None
n = node.numChildren
for i in range(0,n-1,2):
tests.append( (node.getChild(i), node.getChild(i+1)) )
if n % 2 == 1:
else_body = node.getChild(n-1)
return self.walker.if_stmt(tests, else_body)
def while_stmt(self, node):
self.startnode(node)
test = node.getChild(0)
suite = node.getChild(1)
if node.numChildren == 3:
else_body = node.getChild(2)
else:
else_body = None
return self.walker.while_stmt(test, suite, else_body)
def for_stmt(self, node):
self.startnode(node)
index = node.getChild(0)
sequence = node.getChild(1)
body = node.getChild(2)
if node.numChildren == 4:
else_body = node.getChild(3)
else:
else_body = None
return self.walker.for_stmt(index, sequence, body, else_body)
def expr_stmt(self, node):
if node.getNumChildren() == 1:
n = node.getChild(0)
if n.id >= JJTAUG_PLUS and n.id <= JJTAUG_POWER:
return self.walker.visit(n)
self.startnode(node)
rhs = node.getChild(node.numChildren-1)
return self.walker.expr_stmt(nodeToList(node)[:-1], rhs)
def del_stmt(self, node) |
ScreamingUdder/mantid | Framework/PythonInterface/plugins/algorithms/WorkflowAlgorithms/IndirectILLEnergyTransfer.py | Python | gpl-3.0 | 17,107 | 0.006079 | from __future__ import (absolute_import, division, print_function)
import os
import numpy as np
from mantid import config, mtd, logger
from mantid.kernel import StringListValidator, Direction
from mantid.api import PythonAlgorithm, MultipleFileProperty, FileProperty, \
WorkspaceGroupProperty, FileAction, Progress
from mantid.simpleapi import * # noqa
def _ws_or_none(s):
return mtd[s] if s != '' else None
def _extract_workspace(ws, ws_out, x_start, x_end):
"""
Extracts a part of the workspace and
shifts the x-axis to start from 0
@param ws :: input workspace name
@param ws_out :: output workspace name
@param x_start :: start bin of workspace to be extracted
@param x_ | end :: end bin of workspace to be extracted
"""
CropWorkspace(InputWorkspace=ws, OutputWorkspace=ws_out, XMin=x_start, XMax=x_end)
ScaleX(InputWorkspace=ws_out, OutputWorkspace=ws_out, Factor=-x_start, Operation='Add')
class IndirectILLEnergyTransfer(PythonAlgorithm):
_run_file = None
_map_file = None
_parameter_file = None
_reduction_type = None
_mirror_sense = None
_doppler_energy = None
_velocity_profile = None
_in | strument_name = None
_instrument = None
_analyser = None
_reflection = None
_dead_channels = None
_ws = None
_red_ws = None
_psd_int_range = None
_use_map_file = None
_spectrum_axis = None
_efixed = None
def category(self):
return "Workflow\\MIDAS;Workflow\\Inelastic;Inelastic\\Indirect;Inelastic\\Reduction;ILL\\Indirect"
def summary(self):
return 'Performs initial energy transfer reduction for ILL indirect geometry data, instrument IN16B.'
def seeAlso(self):
return [ "IndirectILLReductionQENS","IndirectILLReductionFWS" ]
def name(self):
return "IndirectILLEnergyTransfer"
def PyInit(self):
self.declareProperty(MultipleFileProperty('Run', extensions=['nxs']),
doc='File path of run (s).')
self.declareProperty(FileProperty('MapFile', '',
action=FileAction.OptionalLoad,
extensions=['map','xml']),
doc='Filename of the detector grouping map file to use. \n'
'By default all the pixels will be summed per each tube. \n'
'Use .map or .xml file (see GroupDetectors documentation) '
'only if different range is needed for each tube.')
self.declareProperty(name='ManualPSDIntegrationRange',defaultValue=[1,128],
doc='Integration range of vertical pixels in each PSD tube. \n'
'By default all the pixels will be summed per each tube. \n'
'Use this option if the same range (other than default) '
'is needed for all the tubes.')
self.declareProperty(name='Analyser',
defaultValue='silicon',
validator=StringListValidator(['silicon']),
doc='Analyser crystal.')
self.declareProperty(name='Reflection',
defaultValue='111',
validator=StringListValidator(['111', '311']),
doc='Analyser reflection.')
self.declareProperty(name='CropDeadMonitorChannels',defaultValue=False,
doc='Whether or not to exclude the first and last few channels '
'with 0 monitor count in the energy transfer formula.')
self.declareProperty(WorkspaceGroupProperty('OutputWorkspace', '',
direction=Direction.Output),
doc='Group name for the reduced workspace(s).')
self.declareProperty(name='SpectrumAxis', defaultValue='SpectrumNumber',
validator=StringListValidator(['SpectrumNumber', '2Theta', 'Q', 'Q2']),
doc='The spectrum axis conversion target.')
def validateInputs(self):
issues = dict()
self._psd_int_range = self.getProperty('ManualPSDIntegrationRange').value
if not self.getPropertyValue('MapFile'):
if len(self._psd_int_range) != 2:
issues['ManualPSDIntegrationRange'] = 'Specify comma separated pixel range, e.g. 1,128'
elif self._psd_int_range[0] < 1 or self._psd_int_range[1] > 128 \
or self._psd_int_range[0] >= self._psd_int_range[1]:
issues['ManualPSDIntegrationRange'] = 'Start or end pixel number out is of range [1-128], or has wrong order'
return issues
def setUp(self):
self._run_file = self.getPropertyValue('Run').replace(',','+') # automatic summing
self._analyser = self.getPropertyValue('Analyser')
self._map_file = self.getPropertyValue('MapFile')
self._reflection = self.getPropertyValue('Reflection')
self._dead_channels = self.getProperty('CropDeadMonitorChannels').value
self._red_ws = self.getPropertyValue('OutputWorkspace')
self._spectrum_axis = self.getPropertyValue('SpectrumAxis')
if self._map_file or (self._psd_int_range[0] == 1 and self._psd_int_range[1] == 128):
self._use_map_file = True
else:
self._use_map_file = False
def _load_map_file(self):
"""
Loads the detector grouping map file
@throws RuntimeError :: if neither the user defined nor the default file is found
"""
self._instrument_name = self._instrument.getName()
self._analyser = self.getPropertyValue('Analyser')
self._reflection = self.getPropertyValue('Reflection')
idf_directory = config['instrumentDefinition.directory']
ipf_name = self._instrument_name + '_' + self._analyser + '_' + self._reflection + '_Parameters.xml'
self._parameter_file = os.path.join(idf_directory, ipf_name)
self.log().information('Set parameter file : {0}'.format(self._parameter_file))
if self._use_map_file:
if self._map_file == '':
# path name for default map file
if self._instrument.hasParameter('Workflow.GroupingFile'):
grouping_filename = self._instrument.getStringParameter('Workflow.GroupingFile')[0]
self._map_file = os.path.join(config['groupingFiles.directory'], grouping_filename)
else:
raise RuntimeError("Failed to find default detector grouping file. Please specify manually.")
self.log().information('Set detector map file : {0}'.format(self._map_file))
def _mask(self, ws, xstart, xend):
"""
Masks the first and last bins
@param ws :: input workspace name
@param xstart :: MaskBins between x[0] and x[xstart]
@param xend :: MaskBins between x[xend] and x[-1]
"""
x_values = mtd[ws].readX(0)
if xstart > 0:
logger.debug('Mask bins smaller than {0}'.format(xstart))
MaskBins(InputWorkspace=ws, OutputWorkspace=ws, XMin=x_values[0], XMax=x_values[xstart])
if xend < len(x_values) - 1:
logger.debug('Mask bins larger than {0}'.format(xend))
MaskBins(InputWorkspace=ws, OutputWorkspace=ws, XMin=x_values[xend + 1], XMax=x_values[-1])
def _convert_to_energy(self, ws):
"""
Converts the x-axis from raw channel number to energy transfer
@param ws :: input workspace name
"""
x = mtd[ws].readX(0)
size = mtd[ws].blocksize()
mid = (x[-1] + x[0])/ 2.
scale = 0.001 # from micro ev to mili ev
factor = size / (size - 1)
if self._doppler_energy != 0:
formula = '(x/{0} - 1)*{1}'.format(mid, self._doppler_energy * scale * factor)
else:
# Center the data for elastic fixed windo |
Weasyl/weasyl | weasyl/controllers/moderation.py | Python | apache-2.0 | 6,202 | 0.002904 | import arrow
from pyramid.httpexceptions import HTTPSeeOther
from pyramid.response import Response
from weasyl import define, macro, moderation, note, profile, report
from weasyl.controllers.decorators import moderator_only, token_checked
from weasyl.error import WeasylError
# Moderator control panel functions
@moderator_only
def modcontrol_(request):
return Response(define.webpage(request.userid, "modcontrol/modcontrol.html", title="Moderator Control Panel"))
@moderator_only
def modcontrol_suspenduser_get_(request):
return Response(define.webpage(request.userid, "modcontrol/suspenduser.html",
(moderation.BAN_TEMPLATES,), title="User Suspensions"))
@moderator_only
@token_checked
def modcontrol_suspenduser_post_(request):
form = request.web_input(userid="", username="", mode="", reason="", day="", month="", year="", datetype="",
duration="", durationunit="")
moderation.setusermode(request.userid, form)
raise HTTPSeeOther(location="/modcontrol")
@moderator_only
def modcontrol_report_(request):
form = request.web_input(reportid='')
r = report.select_view(request.userid, form)
blacklisted_tags = moderation.gallery_blacklisted_tags(request.userid, r.target.userid)
return Response(define.webpage(request.userid, "modcontrol/report.html", [
request.userid,
r,
blacklisted_tags,
], title="View Reported " + r.target_type.title()))
@moderator_only
def modcontrol_reports_(request):
form = request.web_input(status="open", violation="", submitter="")
return Response(define.webpage(request.userid, "modcontrol/reports.html", [
# Method
{"status": form.status, "violation": int(form.violation or -1), "submitter": form.submitter},
# Reports
report.select_list(request.userid, form),
macro.MACRO_REPORT_VIOLATION,
], title="Reported Content"))
@moderator_only
@token_checked
def modcontrol_closereport_(request):
form = request.web_input(reportid='', action='')
report.close(request.userid, form)
raise HTTPSeeOther(location="/modcontrol/report?reportid=%d" % (int(form.reportid),))
@moderator_only
def modcontrol_contentbyuser_(request):
form = request.web_input(name='', features=[])
# Does the target user exist? There's no sense in displaying a blank page if not.
target_userid = profile.resolve(None, None, form.name)
| if not target_userid:
raise WeasylError("userRecordMissing")
submissions = moderation.submissionsbyuser(target_userid) if 's' in form.features else []
characters = moderation.charactersbyuser(target_userid) if 'c' in form.features else []
journals = moderation.journalsbyuser(target_userid) if 'j' in form.features else []
return Response(define.webpage(request.userid, "modcontrol/contentbyuser.html", [
form.name,
sorted(submiss | ions + characters + journals, key=lambda item: item['unixtime'], reverse=True),
], title=form.name + "'s Content"))
@moderator_only
@token_checked
def modcontrol_massaction_(request):
form = request.web_input(action='', name='', submissions=[], characters=[], journals=[])
if form.action.startswith("zap-"):
# "Zapping" cover art or thumbnails is not a bulk edit.
if not form.submissions:
raise WeasylError("Unexpected")
submitid = int(form.submissions[0])
type = form.action.split("zap-")[1]
if type == "cover":
moderation.removecoverart(request.userid, submitid)
elif type == "thumb":
moderation.removethumbnail(request.userid, submitid)
elif type == "both":
moderation.removecoverart(request.userid, submitid)
moderation.removethumbnail(request.userid, submitid)
else:
raise WeasylError("Unexpected")
raise HTTPSeeOther(location="/submission/%i" % (submitid,))
return Response(
content_type='text/plain',
body=moderation.bulk_edit(
request.userid,
form.action,
list(map(int, form.submissions)),
list(map(int, form.characters)),
list(map(int, form.journals)),
),
)
@moderator_only
def modcontrol_manageuser_(request):
form = request.web_input(name="")
return Response(define.webpage(request.userid, "modcontrol/manageuser.html", [
moderation.manageuser(request.userid, form),
], title="User Management"))
@moderator_only
@token_checked
def modcontrol_removeavatar_(request):
form = request.web_input(userid="")
moderation.removeavatar(request.userid, define.get_int(form.userid))
raise HTTPSeeOther(location="/modcontrol")
@moderator_only
@token_checked
def modcontrol_removebanner_(request):
form = request.web_input(userid="")
moderation.removebanner(request.userid, define.get_int(form.userid))
raise HTTPSeeOther(location="/modcontrol")
@moderator_only
@token_checked
def modcontrol_editprofiletext_(request):
form = request.web_input(userid="", content="")
moderation.editprofiletext(request.userid, define.get_int(form.userid), form.content)
raise HTTPSeeOther(location="/modcontrol")
@moderator_only
@token_checked
def modcontrol_editcatchphrase_(request):
form = request.web_input(userid="", content="")
moderation.editcatchphrase(request.userid, define.get_int(form.userid), form.content)
raise HTTPSeeOther(location="/modcontrol")
@moderator_only
@token_checked
def modcontrol_copynotetostaffnotes_post_(request):
form = request.web_input(noteid=None)
notedata = note.select_view(request.userid, int(form.noteid))
staff_note_title = u"Received note from {sender}, dated {date}, with subject: “{subj}”.".format(
sender=notedata['sendername'],
date=arrow.get(notedata['unixtime']).format('YYYY-MM-DD HH:mm:ss ZZ'),
subj=notedata['title'],
)
moderation.note_about(
userid=request.userid,
target_user=notedata['senderid'],
title=staff_note_title,
message=notedata['content'],
)
raise HTTPSeeOther("/staffnotes/" + notedata['sendername'])
|
atodorov/anaconda | pyanaconda/payload/dnf/payload.py | Python | gpl-2.0 | 71,993 | 0.001389 | # DNF/rpm software payload management.
#
# Copyright (C) 2019 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
import configparser
import functools
import multiprocessing
import os
import shutil
import sys
import threading
import dnf
import dnf.logging
import dnf.exceptions
import dnf.module
import dnf.module.module_base
import dnf.repo
import dnf.subject
import libdnf.conf
import libdnf.repo
import rpm
import pyanaconda.localization
from blivet.size import Size
from dnf.const import GROUP_PACKAGE_TYPES
from fnmatch import fnmatch
from glob import glob
from pykickstart.constants import GROUP_ALL, GROUP_DEFAULT, KS_MISSING_IGNORE, KS_BROKEN_IGNORE
from pyanaconda import errors as errors
from pyanaconda import isys
from pyanaconda.anaconda_loggers import get_dnf_logger, get_packaging_logger
from pyanaconda.core import constants, util
from pyanaconda.core.configuration.anaconda import conf
from pyanaconda.core.constants import INSTALL_TREE, ISO_DIR, DRACUT_REPODIR, DRACUT_ISODIR
from pyanaconda.core.i18n import N_, _
from pyanaconda.core.util import ProxyString, ProxyStringError, decode_bytes
from pyanaconda.flags import flags
from pyanaconda.kickstart import RepoData
from pyanaconda.modules.common.constants.objects import DEVICE_TREE
from pyanaconda.modules.common.constants.services import LOCALIZATION, STORAGE
from pyanaconda.modules.payloads.source.utils import is_valid_install_disk
from pyanaconda.payload import utils as payload_utils
from pyanaconda.payload.base import Payload
from pyanaconda.payload.dnf.utils import DNF_CACHE_DIR, DNF_PLUGINCONF_DIR, REPO_DIRS, \
DNF_LIBREPO_LOG, DNF_PACKAGE_CACHE_DIR_SUFFIX, BONUS_SIZE_ON_FILE, YUM_REPOS_DIR, \
go | _to_failure_limbo, do_transaction, get_df_map, pick_mount_point
from pyanaconda.payload. | dnf.download_progress import DownloadProgress
from pyanaconda.payload.dnf.repomd import RepoMDMetaHash
from pyanaconda.payload.errors import MetadataError, PayloadError, NoSuchGroup, DependencyError, \
PayloadInstallError, PayloadSetupError
from pyanaconda.payload.image import findFirstIsoImage, mountImage, verify_valid_installtree, \
find_optical_install_media
from pyanaconda.progress import progressQ, progress_message
from pyanaconda.simpleconfig import SimpleConfigFile
log = get_packaging_logger()
__all__ = ["DNFPayload"]
class DNFPayload(Payload):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.install_device = None
self._rpm_macros = []
# Used to determine which add-ons to display for each environment.
# The dictionary keys are environment IDs. The dictionary values are two-tuples
# consisting of lists of add-on group IDs. The first list is the add-ons specific
# to the environment, and the second list is the other add-ons possible for the
# environment.
self._environment_addons = {}
self._base = None
self._download_location = None
self._updates_enabled = True
self._configure()
# Protect access to _base.repos to ensure that the dictionary is not
# modified while another thread is attempting to iterate over it. The
# lock only needs to be held during operations that change the number
# of repos or that iterate over the repos.
self._repos_lock = threading.RLock()
# save repomd metadata
self._repoMD_list = []
self._req_groups = set()
self._req_packages = set()
self.requirements.set_apply_callback(self._apply_requirements)
def unsetup(self):
super().unsetup()
self._base = None
self._configure()
self._repoMD_list = []
def _replace_vars(self, url):
"""Replace url variables with their values.
:param url: url string to do replacement on
:type url: string
:returns: string with variables substituted
:rtype: string or None
Currently supports $releasever and $basearch.
"""
if url:
return libdnf.conf.ConfigParser.substitute(url, self._base.conf.substitutions)
return None
def _add_repo(self, ksrepo):
"""Add a repo to the dnf repo object.
:param ksrepo: Kickstart Repository to add
:type ksrepo: Kickstart RepoData object.
:returns: None
"""
repo = dnf.repo.Repo(ksrepo.name, self._base.conf)
url = self._replace_vars(ksrepo.baseurl)
mirrorlist = self._replace_vars(ksrepo.mirrorlist)
metalink = self._replace_vars(ksrepo.metalink)
if url and url.startswith("nfs://"):
(server, path) = url[6:].split(":", 1)
# DNF is dynamically creating properties which seems confusing for Pylint here
# pylint: disable=no-member
mountpoint = "%s/%s.nfs" % (constants.MOUNT_DIR, repo.name)
self._setup_NFS(mountpoint, server, path, None)
url = "file://" + mountpoint
if url:
repo.baseurl = [url]
if mirrorlist:
repo.mirrorlist = mirrorlist
if metalink:
repo.metalink = metalink
repo.sslverify = not ksrepo.noverifyssl and conf.payload.verify_ssl
if ksrepo.proxy:
try:
repo.proxy = ProxyString(ksrepo.proxy).url
except ProxyStringError as e:
log.error("Failed to parse proxy for _add_repo %s: %s",
ksrepo.proxy, e)
if ksrepo.cost:
repo.cost = ksrepo.cost
if ksrepo.includepkgs:
repo.include = ksrepo.includepkgs
if ksrepo.excludepkgs:
repo.exclude = ksrepo.excludepkgs
if ksrepo.sslcacert:
repo.sslcacert = ksrepo.sslcacert
if ksrepo.sslclientcert:
repo.sslclientcert = ksrepo.sslclientcert
if ksrepo.sslclientkey:
repo.sslclientkey = ksrepo.sslclientkey
# If this repo is already known, it's one of two things:
# (1) The user is trying to do "repo --name=updates" in a kickstart file
# and we should just know to enable the already existing on-disk
# repo config.
# (2) It's a duplicate, and we need to delete the existing definition
# and use this new one. The highest profile user of this is livecd
# kickstarts.
if repo.id in self._base.repos:
if not url and not mirrorlist and not metalink:
self._base.repos[repo.id].enable()
else:
with self._repos_lock:
self._base.repos.pop(repo.id)
self._base.repos.add(repo)
# If the repo's not already known, we've got to add it.
else:
with self._repos_lock:
self._base.repos.add(repo)
if not ksrepo.enabled:
self.disable_repo(repo.id)
log.info("added repo: '%s' - %s", ksrepo.name, url or mirrorlist or metalink)
def _fetch_md(self, repo_name):
"""Download repo metadata
:param repo_name: name/id of repo to fetch
:type repo_name: str
:returns: None
"""
repo = self._base.repos[repo_name]
repo.enable()
try:
# Load |
Zhong-Lab-UCSD/Genomic-Interactive-Visualization-Engine | includes/constants_template.py | Python | apache-2.0 | 175 | 0.028571 | _CPB_EDIT_HOST='CPB_EDIT_HOST'
_CPB_EDIT_USER='CPB_EDIT_USE | R'
_CPB_EDIT_PASS='CPB_EDIT_PASS'
_NCBI_URI='https://ftp.ncbi. | nih.gov'
_NCBI_PATH='/gene/DATA/GENE_INFO/Mammalia/'
|
erudit/eruditorg | eruditorg/apps/public/journal/viewmixins.py | Python | gpl-3.0 | 15,146 | 0.002905 | import json
import string
import structlog
from django.db.models import Q
from django.urls import reverse
from django.http import Http404
from django.http.response import HttpResponseRedirect
from django.utils import timezone
from django.utils.crypto import get_random_string
from django.utils.functional import cached_property
from django.utils.translation import gettext as _
from ipware import get_client_ip
from prometheus_client import Counter
from typing import List, Optional
from erudit.models import Article
from erudit.models import Issue
from erudit.models import Journal
from erudit.models import JournalInformation
from erudit.solr.models import (
SolrData,
get_solr_data,
)
from .article_access_log import (
ArticleAccessLog,
ArticleAccessType,
)
embargoed_article_views_by_subscription_type = Counter(
"eruditorg_embargoed_article_views_by_subscription_type",
_("Nombre de consultations d'articles sous embargo par type d'abonnement"),
["subscription_type"],
)
logger = structlog.get_logger(__name__)
class SingleJournalMixin:
""" Simply allows retrieving a Journal instance using its code or localidentifier. """
def get_journal_queryset(self):
return Journal.internal_objects.select_related("previous_journal", "next_journal")
def get_journal(self):
try:
return self.get_journal_queryset().get(
Q(code=self.kwargs["code"]) | Q(localidentifier=self.kwargs["code"])
)
except Journal.DoesNotExist:
raise Http404
def get_object(self, queryset=None):
return self.get_journal()
@cached_property
def journal(self):
return self.get_journal()
class ContentAccessCheckMixin:
""" Defines a way to check whether the current user can browse a given Érudit content. """
def get_content(self):
"""Returns the considered content.
By default the method will try to fetch the content using the ``object`` attribute. If this
attribute is not available the
:meth:`get_object<django:django.views.generic.detail.SingleObjectMixin.get_object>` method
will be used. But subclasses can override this to control the way the content is retrieved.
"""
if hasattr(self, "object") and self.object is not None:
return self.object
return self.get_object()
def _get_subscriptions_kwargs_for_content(self):
content = self.get_content()
kwargs = {}
if isinstance(content, Article):
# 1- Is the article in open access? Is the article subject to a movable limitation?
kwargs["article"] = content
elif isinstance(content, Issue):
kwargs["issue"] = content
elif isinstance(content, Journal):
kwargs["journal"] = content
return kwargs
def dispatch(self, *args, **kwargs):
response = super().dispatch(*args, **kwargs)
if hasattr(self, "request") and hasattr(self.request, "subscriptions"):
self.request.subscriptions.set_active_subscription_for(
**self._get_subscriptions_kwargs_for_content()
)
return response
def get_context_data(self, **kwargs):
""" Inserts a flag indicating if the content can be accessed in the context. """
context = super(ContentAccessCheckMixin, self).get_context_data(**kwargs)
context["content_access_granted"] = self.content_access_granted
active_subscription = self.request.subscriptions.active_subscription
if active_subscription:
context["subscription_type"] = active_subscription.get_subscription_type()
return context
@cached_property
def content_access_granted(self):
"""Returns a boolean indicating if the content can be accessed.
The following verifications are performed in order to determine if a given content
can be browsed:
1- it is in open access or not embargoed
2- a valid prepublication ticket is provided
3- the current user has access to it with its individual account
4- the current IP address is inside the IP address ranges allowed to access to it
"""
content = self.get_content()
if isinstance(content, Article):
issue = content.issue
elif isinstance(content, Issue):
issue = content
else:
issue = None
| if issu | e:
# If the issue is in open access or if it's not embargoed, the access should always be
# granted.
if issue.journal.open_access or not issue.embargoed:
return True
# If the issue is not published, the access should only be granted if a valid
# prepublication ticket is provided.
if not issue.is_published:
return issue.is_prepublication_ticket_valid(self.request.GET.get("ticket"))
# Otherwise, check if the user has a valid subscription that provides access to the article.
kwargs = self._get_subscriptions_kwargs_for_content()
return self.request.subscriptions.provides_access_to(**kwargs)
class SolrDataMixin:
@property
def solr_data(self) -> SolrData:
return get_solr_data()
class SingleArticleMixin(SolrDataMixin):
def __init__(self):
# TODO: make this a private instance variable
# if this is only used for caching, it should not be accessible directly.
self.object = None
def get_object(self, queryset=None) -> Article:
# We support two IDing scheme here: full PID or localidentifier-only. If we have the full
# PID, great! that saves us a request to Solr. If not, it's alright too, we just need to
# fetch the full PID from Solr first.
if self.object is not None:
return self.object
journal_code = self.kwargs.get("journal_code")
issue_localid = self.kwargs.get("issue_localid")
localidentifier = self.kwargs.get("localid")
if not (journal_code and issue_localid):
fedora_ids = self.solr_data.get_fedora_ids(localidentifier)
if fedora_ids is None:
raise Http404()
journal_code, issue_localid, localidentifier = fedora_ids
try:
self.object = Article.from_fedora_ids(journal_code, issue_localid, localidentifier)
return self.object
except Article.DoesNotExist:
raise Http404()
class SingleArticleWithScholarMetadataMixin(SingleArticleMixin):
""" Add Google Scholar Metadata to the context """
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
article = self.get_object()
context["citation_title_metadata"] = article.title
context[
"citation_journal_title_metadata"
] = article.erudit_object.get_formatted_journal_title()
context["citation_references"] = article.erudit_object.get_references(html=False)
return context
class PrepublicationTokenRequiredMixin:
def get(self, request, *args, **kwargs):
object = self.get_object()
if isinstance(object, Article):
issue = object.issue
elif isinstance(object, Issue):
issue = object
else:
raise ValueError("This mixin should only be used with Article and Issue objects")
if not issue.is_published and not issue.is_prepublication_ticket_valid(
request.GET.get("ticket")
):
return HttpResponseRedirect(
reverse("public:journal:journal_detail", args=(issue.journal.code,))
)
return super().get(request, *args, **kwargs)
class ArticleViewMetricCaptureMixin:
tracking_article_view_granted_metric_name = "erudit__journal__article_view"
tracking_view_type = "html"
def dispatch(self, request, *args, **kwargs):
response = super().dispatch(request, *args, **kwargs)
subscription = request.subscriptions.active_subscription
if response.status_code == 200 and subscription is not None and |
andreimaximov/algorithms | codeforces/mister-b-and-flight-to-the-moon/main.py | Python | mit | 2,960 | 0.000338 | #!/usr/bin/env python3
from collections import defaultdict
DEBUG = False
def main():
if DEBUG:
test()
n = int(input())
paths = cycles(n)
print(len(paths))
for p in paths:
print('%d %s' % (len(p), ' '.join([str(v) for v in p])))
def cycles(n):
"""Builds a set of cycles for a fully connected graph with n vertices."""
if n % 2 == 0:
| return even(n)
else:
return odd(n)
def even(n):
"""Builds a set of cycles that a graph with even vertices."""
assert n % 2 == 0
# Base case for complete graph such that V = {1, 2, 3, 4}.
cycles = [[1, 2, 3], [2, 3, 4], [3, 4, 1], [4, 1, 2]]
for i in range(6, n + 1, 2):
a, b = i, i - 1
# Use edges (a, 1), (a, 0), (b, 1), (b, 0), (a, b) exactly twice each.
cycle | s += [[a, 1, b], [a, 2, b], [a, 1, b, 2]]
# Similar to odd(...) as we are left with 2n - 2 edges to use
# connected to i - 4 of the vertices V' = {3 ... i - 2}. Notice that
# |V'| is even so we can apply the same strategy as in odd(...).
for k in range(3, i - 1, 2):
c, d = k, k + 1
cycles += [[a, c, b, d]] * 2
return cycles
def odd(n):
"""Builds a set of cycles that a graph with odd vertices."""
assert n % 2 == 1
# Base case for complete graph such that V = {1, 2, 3}.
cycles = [[1, 2, 3]] * 2
for i in range(5, n + 1, 2):
a, b = i, i - 1
# Say the new vertices are {a, b}. Since the graph is fully connected
# adding these 2 vertices results in 2n + 1 more edges. We use a length
# 3 cycle a -> b -> 1 > a twice to use up 3 of these edges.
cycles += [[a, b, 1]] * 2
# At this point we are left with 2n + 1 - 3 = 2n - 2 edges to use
# connected to i - 3 of the vertices V' = {2 ... i - 2}. Notice that
# |V'| is even. To use these edges and cover vertices V' we take pairs
# c, d in V' and create two of each path a -> c -> b -> d -> a.
for k in range(2, i - 1, 2):
c, d = k, k + 1
cycles += [[a, c, b, d]] * 2
return cycles
def test():
"""Checks the cycles(...) solver for a bunch of inputs."""
print('Testing...')
for n in range(3, 300, 21):
check(n, cycles(n))
print('Tests pass!')
def check(n, paths):
"""Checks the solution for errors."""
# Check that all vertices are covered.
vertices = set(sum(paths, list()))
assert vertices == set(range(1, n + 1))
# Check that each edge is used exactly twice.
counts = defaultdict(int)
for p in paths:
assert len(p) == 3 or len(p) == 4
assert len(set(p)) == len(p)
for i in range(len(p)):
key = tuple(sorted([p[i - 1], p[i]]))
counts[key] += 1
for i in range(1, n + 1):
for j in range(i + 1, n + 1):
assert counts[(i, j)] == 2
if __name__ == '__main__':
main()
|
Smarties89/Jockle | jockle.py | Python | mit | 8,737 | 0.00412 | #!/bin/python
# coding: utf-8
# Standard libraries
import logging
from sys import argv
from urlparse import urljoin
from zipfile import ZipFile
from flask import render_template, Flask, request, redirect, Response, make_response
from fest.decorators import requireformdata
import requests
from statuscodes import statuscodes
from mimes import mimes
from dbs import RouteDatabaseJSON
import exportplugins
log = logging.getLogger("jockle")
app = Flask(__name__)
def debug():
# TODO: implementer
return True
if debug():
logging.basicConfig()
log.setLevel(logging.DEBUG)
else:
logging.basicConfig()
log.setLevel(logging.INFO)
#################################################
########### API for controlling jockle. ########
#################################################
def addapi(api):
log.info("Adding '{}' as api".format(api['url']))
try:
app.add_url_rule(
api['url'], # path
api['url'], # name
lambda: Response(api['returndata'], mimetype=api['type']),
methods=[api['method']])
except Exception as e:
log.warning("{} could not be added. Properly because it was a malformed url or already exists. Exception: {}".
format(api['url'], e))
@app.route("/insertjockle", methods=["POST"])
@requireformdata(["url", "method", "type", "returndata", "returncode"])
def insertjockle(url, method, type, returndata, returncode):
db.insertroute(url, method, type, returndata, returncode, "JSON", "")
return redirect("/jockle")
@app.route("/updatejockle", methods=["POST"])
@requireformdata(["id", "url", "method", "type", "returndata", "returncode", "inputtype", "inputvars"])
def updatejockle(id, url, method, type, returndata, returncode, inputtype, inputvars):
db.update(
id,
url,
method,
type,
returndata,
int(returncode),
inputtype,
inputvars)
return redirect("/jockle")
@app.route("/updatejockleproxy", methods=["POST"])
@requireformdata(["proxyurl"])
def updateproxy(proxyurl):
db.setproxyurl(proxyurl)
return redirect("/jockle")
@app.route("/exportjockle")
def useexportplugin():
pluginnr = int(request.args['pluginnr'])
e = exportplugins.plugins[pluginnr]("some name")
for api in db.listpaths():
e.addapi(api)
z = ZipFile("static/exported.zip", "w")
for f in e.export():
# Makes a new file called name.
z.writestr(f['filename'], f['data'])
z.close()
return app.send_static_file("exported.zip")
@app.route("/jockle")
def index():
apis = db.listpaths()
return render_template(
"index.html",
apis=apis,
mimes=mimes,
statuscodes=statuscodes,
proxyurl=db.proxyurl(),
exportplugins=exportplugins.plugins
), 200
@app.route("/jockledelete")
def jockledelete():
url = request.args.get("url")
log.info("Deleting {}".format(url))
db.delete(url)
return redirect("/jockle")
################################################
########### API for proxying calls. ############
################################################
# This is handling the proxy functionality.
@app.errorhandler(404)
def not_found(error=None):
try:
url = urljoin(db.proxyurl(), request.path) # .encode("ASCII")
log.info("Preparing proxy call to {}".format(url))
exres = externalcall(url)
log.debug("Creating the response")
resp = create_response(exres)
log.debug("Response created. Now returning it")
return resp # exres.content # resp
except Exception as e:
return "404 or could not reach proxy server. Exception: {}".format(e), 404
def create_response(exres):
resp = make_response(exres.content)
log.debug("create_response: Adding headers to response from exres")
for i in exres.headers:
# It might be chunked, and we send it back in one piece.
if i.lower() == "transfer-encoding":
log.debug("create_response: \tNot added transfer-encoding: '{}' from resp header".
format(exres.headers[i]))
continue
# This is appended on afterwards. Python-Flask requires it to
# be added as 'resp.mimetype = xxx' instead of as a header
if i.lower() == "content-type":
log.debug("create_response: \tNot added content-type: '{}' from resp header".
format(exres.headers[i]))
continue
# TODO: Set referer
# This is because we might get gzip, but python-request will
# automatically unzip it and what we respond to the client
# will not be zipped.
if i.lower() == "content-encoding":
log.debug("create_response: \tNot added content-encoding.")
continue
# We get a cookie
# if i.lower() == "cookie":
# resp.headers['Set-Cookies'] = exres.headers[i]
# continue
resp.headers[i] = exres.headers[i]
log.debug("create_response: \t- '{}' = '{}'".format(i, exres.headers[i]))
log.debug("create_response: Setting status_code to {}".format(exres.status_code))
resp.status_code = exres.status_code
# Setting the mimetype. Python flask sets the charset automatically
if 'content-type' in exres.headers:
log.debug("create_response: Setting mimetype to {}".
format(exres.headers['content-type'].split(";")[0]))
resp.mimetype = exres.headers['content-type'].split(";")[0]
resp.headers['Access-Control-Allow-Origin'] = '*'
return resp
def gethostforproxy():
"""
Takes the proxy url and strips it so it will work as "host"
in the request header sent to the external service.
"""
host = db.proxyurl()
if host.startswith("https://"):
host = host.lstrip("https://")
elif host.startswith("http://"):
host = host.lstrip("http://")
if host.endswith("/"):
host = host.rstrip("/")
return host
def externalcall(url):
"""
This does the actual proxy call and returns the response
"""
# The request.headers are a untypically format, and
# needs to transfered to a normal dict object, so that
# python-request can understand it.
headers = {}
for r in request.header.keys():
headers[r] = request.header[r] # .encode("ASCII")
# We overwrite Host, so proxying will work more invisible.
headers['Host'] = gethostforproxy()
log.debug("externalcall: Original host: '{}' now host:'{}'".format(db.proxyurl(), headers['Host']))
# python requset already automatically adds this.
del headers["Content-Length"]
log.debug("externalcall: headers for ")
for h in headers:
log.debug("externalcall: \t- '{}':'{}'".format(h, headers[h]))
log.info("externalcall: requesting {} [{}] data: '{}'".format(url, request.method, request.environ['body_copy']))
exres = requests.request(
request.method,
url,
data=request.environ['body_copy'],
allow_redirects=False,
headers=headers)
return exres
# Thanks to jhasi at stackoverf | low
# http://stackoverflow.com/questions/10999990/get-raw-post-body-in-python-flask-regardless-of-content-type-header
class WSGICopyBody(object):
def __init__(self, app | lication):
self.application = application
def __call__(self, environ, start_response):
from cStringIO import StringIO
length = environ.get('CONTENT_LENGTH', '0')
length = 0 if length == '' else int(length)
body = environ['wsgi.input'].read(length)
environ['body_copy'] = body
environ['wsgi.input'] = StringIO(body)
# Call the wrapped application
app_iter = self.application(environ,
self._sr_callback(start_response))
# Return modified response
return app_iter
def _sr_callback(self, start_response):
def callback(status, headers, exc_info=None):
# Call upstream start_response
start_response(status, headers, exc_info)
re |
thedanotto/google-maps-urlerator | manage.py | Python | mit | 264 | 0.003788 | #!/usr/bin/ | env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "google_maps_urlerator.setti | ngs")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
ftrautsch/testEvolution | parser/resultparser.py | Python | apache-2.0 | 2,937 | 0.012938 | '''
Created on 03.02.2016
@author: fabian
'''
import os
import collections
import csv
class ResultParser(object):
'''
classdocs
'''
def __init__(self, resultFolder):
'''
Constructor
'''
self.resultFolder = resultFolder
def getAllCommitsWithJacocoErrors(self):
commitsWithJacocoErrors = []
for commit in self.getImmediateSubdirectories(self.resultFolder):
errorFile = self.resultFolder+"/"+commit+"/jacoco_errors.txt"
if(os.path.exists(errorFile) and os.path.isfile(errorFile)):
commitsWithJacocoErrors.append(commit)
return commitsWithJacocoErrors
def getAllCommitsWithPitErrors(self):
commitsWithPitErrors = []
for commit in self.getImmediateSubdirectories(self.resultFolder):
errorFile = self.resultFolder+"/"+commit+"/pit_errors.txt"
if(os.path.exists(errorFile) and os.path.isfile(errorFile)):
commitsWithPitErrors.append(commit)
return commitsWithPitErrors
def createCSVFile(self, outputFile):
jacocoErrors = self.getAllCommitsWithJacocoErrors()
pitErrors = se | lf.getAllCommitsWithPitErrors()
result = {}
for commit in self.getImmediateSubdirectories(self.resultFolder):
if(not os.listdir(self.resultFolder+"/"+commit) == []):
parts = | commit.split("-")
result[int(parts[0])] = {'hash': parts[1],'jacocoError' : (commit in jacocoErrors), 'pitError' : (commit in pitErrors)}
sortedResults = collections.OrderedDict(sorted(result.items()))
writer = csv.writer(open(outputFile, 'w'))
writer.writerow(['Number', 'Hash', 'HasJacocoError', 'HasPitError'])
for key, value in sortedResults.items():
writer.writerow([key, value['hash'], value['jacocoError'], value['pitError']])
def getImmediateSubdirectories(self, a_dir):
""" Helper method, which gets the **immediate** subdirectoriesof a path. Is helpful, if one want to create a
parser, which looks if certain folders are there.
:param a_dir: directory from which **immediate** subdirectories should be listed """
return [name for name in os.listdir(a_dir)
if os.path.isdir(os.path.join(a_dir, name))]
if __name__ == "__main__":
#resultParser = ResultParser("/home/fabian/Arbeit/testEvolution/results/checkstyle")
#print(resultParser.getAllCommitsWithJacocoErrors())
#resultParser.createCSVFile("/home/fabian/test.csv")
with open("/home/fabian/Arbeit/testEvolution/tmp/checkstyle_working/src/checkstyle/com/puppycrawl/tools/checkstyle/gui/ParseTreeInfoPanel.java", "rb") as sourceFile:
data = sourceFile.readlines()
sourceFile.close()
|
yrunts/python-for-qa | 3-python-intermediate/examples/file.py | Python | cc0-1.0 | 148 | 0 |
f = open('test_content.txt', | 'r')
print(f.read())
f.close()
# using context manager |
with open('test_content.txt', 'r') as f:
print(f.read())
|
ARMmbed/yotta_osx_installer | workspace/lib/python2.7/site-packages/requests/packages/urllib3/util/connection.py | Python | apache-2.0 | 3,341 | 0 | import socket
try:
from select import poll, POLLIN
except ImportError: # `poll` doesn't exist on OSX and other platforms
poll = False
try:
from select import select
except ImportError: # `select` doesn't exist on AppEngine.
select = False
def is_connection_dropped(conn): # Platform-specific
"""
Returns True if the connection is dropped and should be closed.
:param conn:
:class:`httplib.HTTPConnection` object.
Note: For platforms like AppEngine, this will always return ``False`` to
let the platform handle connection recycling transparently for us.
"""
sock = getattr(conn, 'sock', False)
if sock is False: # Platform-specific: AppEngine
return False
if sock is None: # Connection already closed (such as by httplib).
return True
if not poll:
if not select: # Platform-specific: AppEngine
return False
try:
return select([sock], [], [], 0.0)[0]
except socket.error:
return True
# This version is better on platforms that support it.
p = poll()
p.register(sock, POLLIN)
for (fno, ev) in p.poll(0.0):
if fno == sock.fileno():
# Either data is buffered (bad), or the connection is dropped.
return True
# This function is copied from socket.py in the Python 2.7 standard
# library test suite. Added to its signature is only `socket_options`.
def create_connection(address, timeout=socket._GLOBAL_DEFAULT_TIMEOUT,
source_address=None, socket_options=None):
"""Connect to *address* and return the socket object.
Convenience function. Connect to *address* (a 2-tuple ``(host,
port)``) and return the socket object. Passing the optional
*timeout* parameter will set the timeout on the socket instance
before attempting to connect. If no *timeout* is supplied, the
global default timeout setting returned by :func:`getdefaulttimeout`
is used. If *source_address* is set it must be a tuple of (host, port)
for the socket to bind as a source address before making the connection.
An host of '' or port 0 tells the OS to use the default.
"""
host, port = address
if host.startswith('['):
host = host.strip('[]')
err = None
for res in socket.getaddrinfo(host, port, 0, socket.SOCK_STREAM):
af, socktype, proto, canonname, sa = res
sock = None
try:
sock = socket.socket(af, socktype, proto)
# If provided, set socket level options before connecting.
# This is the only addition urllib3 makes to this function.
_set_socket_options(sock, socket_options)
if timeout is not socket._GLOBAL_DEFAULT_TIMEOUT:
sock.settimeout(timeout)
if source_address:
sock.bind(source_address)
sock.connect(sa)
return sock
e | xcept socket.error as e:
err = e
if sock is not None:
sock.close()
sock = None
if err is not None:
raise err
raise socket.error("getaddrinfo returns an empty list")
def _set_socket_options(sock, options):
if options is None:
return
for opt | in options:
sock.setsockopt(*opt)
|
lxki/pjsip | tests/pjsua/scripts-sendto/331_srtp_prefer_rtp_avp.py | Python | gpl-2.0 | 827 | 0.020556 | # $Id: 331_srtp_prefer_rtp_avp.py 2081 2008-06-27 21:59:15Z bennylp $
import inc_sip as sip
import inc_sdp as sdp
# W | hen SRTP is NOT enabled in pjsua, it should prefer to use
# RTP/AVP media line if there are multiple m=audio lines
sdp = \
"""
v=0
o=- 0 0 IN IP4 127.0.0.1
s=-
c=IN IP4 127.0.0.1
t=0 0
m=audio 5000 RTP/SAVP 0
a=crypto:1 aes_cm_128_hmac_sha1_80 inline:WnD7c1ksDGs+dIefCEo8omPg4uO8DYIinNGL5yxQ
m=audio 4000 RTP/AVP 0
"""
pjsua_args = "--null-audio --auto-answ | er 200 --use-srtp 0"
extra_headers = ""
include = ["Content-Type: application/sdp", # response must include SDP
"m=audio 0 RTP/SAVP[\\s\\S]+m=audio [1-9]+[0-9]* RTP/AVP"
]
exclude = ["a=crypto"]
sendto_cfg = sip.SendtoCfg("Prefer RTP/SAVP", pjsua_args, sdp, 200,
extra_headers=extra_headers,
resp_inc=include, resp_exc=exclude)
|
peterayeni/rapidsms | rapidsms/contrib/handlers/handlers/keyword.py | Python | bsd-3-clause | 4,432 | 0 | #!/usr/bin/env python
# vim: ai ts=4 sts=4 et sw=4
import re
from django.core.exceptions import ObjectDoesNotExist
from ..exceptions import HandlerError
from .base import BaseHandler
class KeywordHandler(BaseHandler):
"""
This handler type can be subclassed to create simple keyword-based
handlers. When a message is received, it is checked against the mandatory
``keyword`` attribute (a regular expression) for a prefix match. For
example::
>>> class AbcHandler(KeywordHandler):
... keyword = "abc"
...
... def help(self):
... self.respond("Here is some help.")
...
... def handle(self, text):
... self.respond("You said: %s." % text)
If the keyword is matched and followed by some text, the ``handle`` method
is called::
>>> AbcHandler.test("abc waffles")
['You said: waffles.']
If *just* the keyword is matched, the ``help`` method is called::
>>> AbcHandler.test("abc")
['Here is some help.']
All other messages are silently ignored (as usual), to allow other apps or
handlers to catch them.
"""
#: A string specifying a regular expression matched against the
#: beginning of the message. Not case sensitive.
keyword = None
def help(self):
"""Called when the keyword matches but no text follows"""
raise NotImplementedError
def handle(self, text):
"""Called when the keyword matches and text follows
:param text: The text that follows the keyword. Any whitespace
between the keyword and the text is not included.
"""
raise NotImplementedError
@classmethod
def _keyword(cls):
if hasattr(cls, "keyword") and cls.keyword:
# The 'keyword' is inside non-grouping parentheses so that a
# user could set the keyword to a regex - e.g.
# keyword = r'one|two|three'
prefix = r"""
^\s* # discard leading whitespace
(?:{keyword}) # require the keyword or regex
[\s,;:]* # consume any whitespace , ; or :
([^\s,;:].*)? # capture rest of line if any, starting
# with the first non-whitespace
$ # match all the way to the end
""".format(keyword=cls.keyword)
return re.compile(prefix, re.IGNORECASE | re.VERBOSE | re.DOTALL)
raise HandlerError('KeywordHandler must define a keyword.')
@classmethod
def dispatch(cls, router, msg):
keyword = cls._keyword()
match = keyword.match(msg.text)
if match is None:
return False
# spawn an instance of this handler, and stash
# the low(er)-level router and message object
inst = cls(router, msg)
# if any non-whitespace content was send after the keyword, send
| # it along to the handle method. the instance can always find
# the original text via self.msg if it really needs it.
text = match.group(1)
if text is not None and text.strip() != "":
try:
inst.handle(text)
# special case: if an object was expected but not found,
# return the (rather appropriate) "%s matching query does
# not exist." message. this can, of course, be overridden by
| # catching the exception within the ``handle`` method.
except ObjectDoesNotExist as err:
return inst.respond_error(
str(err))
# another special case: if something was miscast to an int
# (it was probably a string from the ``text``), return a
# more friendly (and internationalizable) error.
except ValueError as err:
p = r"^invalid literal for int\(\) with base (\d+?): '(.+?)'$"
m = re.match(p, str(err))
# allow other valueerrors to propagate.
if m is None:
raise
return inst.respond_error(
"Not a valid number: %(string)s" % dict(
string=m.group(2)))
# if we received _just_ the keyword, with
# no content, some help should be sent back
else:
inst.help()
return True
|
pytrainer/pytrainer | pytrainer/extension.py | Python | gpl-2.0 | 5,489 | 0.032246 | # -*- coding: utf-8 -*-
#Copyright (C) Fiz Vazquez vud1@sindominio.net
#This program is free software; you can redistribute it and/or
#modify it under the terms of the GNU General Public License
#as published by the Free Software Foundation; either version 2
#of the License, or (at your option) any later version.
#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#You should have received a copy of the GNU General Public License
#along with this program; if not, write to the Free Software
#Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
import os, sys
import logging
from .lib.xmlUtils import XMLParser
from .gui.windowextensions import WindowExtensions
class Extension:
def __init__(self, data_path = None, parent = None):
self.data_path=data_path
self.parent = parent
self.pytrainer_main = parent
def getActiveExtensions(self):
retorno = []
for extension in self.getExtensionList():
if self.getExtensionInfo(extension[0])[2] == "1":
retorno.append(extension[0])
return retorno
def manageExtensions(self):
ExtensionList = self.getExtensionList()
windowextension = WindowExtensions(self.data_path, self)
windowextension.setList(ExtensionList)
windowextension.run()
def getExtensionList(self):
extensiondir = self.data_path+"/extensions"
extensionList = []
for extension in os.listdir(extensiondir):
extensionxmlfile = extensiondir+"/"+extension+"/conf.xml"
if os.path.isfile(extensionxmlfile):
extensioninfo = XMLParser(extensionxmlfile)
name = extensioninfo.getValue("pytrainer-extension","name")
description = extensioninfo.getValue("pytrainer-extension","description")
extensionList.append((extensiondir+"/"+extension,name,description))
return extensionList
def getExtensionInfo(self,pathExtension):
info = XMLParser(pathExtension+"/conf.xml")
name = info.getValue("pytrainer-extension","name")
description = info.getValue("pytrainer-extension","description")
code = info.getValue("pytrainer-extension","extensioncode")
extensiondir = self.pytrainer_main.profile.extensiondir
helpfile = pathExtension+"/"+info.getValu | e("pytrainer-extension","helpfile")
type = info.getValue("pytrainer-extension","type")
if not os.path.isfile(extensiondir+"/"+code+"/conf.xml"):
status = 0
else:
info = XMLParser(extensiondir+"/"+code+"/conf.xml")
status = info.getValue("pytrainer-extension","status")
#print name,description,status,helpfile,type
return name,description,status,helpfile,type
def getExtensionConfParams(self,pathExtension):
| info = XMLParser(pathExtension+"/conf.xml")
code = info.getValue("pytrainer-extension","extensioncode")
extensiondir = self.pytrainer_main.profile.extensiondir
params = {}
if not os.path.isfile(extensiondir+"/"+code+"/conf.xml"):
prefs = info.getAllValues("conf-values")
prefs.append(("status","0"))
for pref in prefs:
params[pref[0]] = info.getValue("pytrainer-extension",pref[0])
else:
prefs = info.getAllValues("conf-values")
prefs.append(("status","0"))
info = XMLParser(extensiondir+"/"+code+"/conf.xml")
for pref in prefs:
params[pref[0]] = info.getValue("pytrainer-extension",pref[0])
#params.append((pref[0],info.getValue("pytrainer-extension",pref[0])))
return params
def setExtensionConfParams(self,pathExtension,savedOptions):
info = XMLParser(pathExtension+"/conf.xml")
code = info.getValue("pytrainer-extension","extensioncode")
extensiondir = self.pytrainer_main.profile.extensiondir+"/"+code
if not os.path.isdir(extensiondir):
os.mkdir(extensiondir)
if not os.path.isfile(extensiondir+"/conf.xml"):
savedOptions.append(("status","0"))
info = XMLParser(extensiondir+"/conf.xml")
info.createXMLFile("pytrainer-extension",savedOptions)
def loadExtension(self,pathExtension):
info = XMLParser(pathExtension+"/conf.xml")
txtbutton = info.getValue("pytrainer-extension","extensionbutton")
name = info.getValue("pytrainer-extension","name")
type = info.getValue("pytrainer-extension","type")
#print "Loading Extension %s" %name
return txtbutton,pathExtension,type
def getCodeConfValue(self,code,value):
extensiondir = self.pytrainer_main.profile.extensiondir
info = XMLParser(extensiondir+"/"+code+"/conf.xml")
return info.getValue("pytrainer-extension",value)
def importClass(self, pathExtension):
logging.debug('>>')
info = XMLParser(pathExtension+"/conf.xml")
#import extension
extension_dir = os.path.realpath(pathExtension)
extension_filename = info.getValue("pytrainer-extension","executable")
extension_classname = info.getValue("pytrainer-extension","extensioncode")
extension_type = info.getValue("pytrainer-extension","type")
options = self.getExtensionConfParams(pathExtension)
logging.debug("Extension Filename: %s", extension_filename )
logging.debug("Extension Classname: %s", extension_classname)
logging.debug("Extension Type: %s", extension_type)
logging.debug("Extension options: %s", options)
sys.path.insert(0, extension_dir)
module = __import__(extension_filename)
extensionMain = getattr(module, extension_classname)
logging.debug('<<')
return extensionMain(parent=self, pytrainer_main=self.parent, conf_dir=self.pytrainer_main.profile.confdir, options=options)
|
ddalex/python-prompt-toolkit | examples/multi-column-autocompletion.py | Python | bsd-3-clause | 955 | 0.002094 | #!/usr/bin/env python
"""
Similar to the autocompletion example. But display all the completions in multiple columns.
"""
from __future__ import unicode_literals
from prompt_toolkit.contrib.completers import WordCompleter
from prompt_toolkit.shortcuts import get_input
animal_completer = WordCompleter([
'alligator',
'ant',
'ape',
'bat',
'bear',
'beaver',
'bee',
'bison',
| 'butterfly',
'cat',
'chicken',
'crocodile',
'dinosaur',
'dog',
'dolphine',
'dove',
'duck',
'eagle',
'elephant',
'fish',
'goat',
'gorilla',
'kangoroo',
'leopard',
'lion',
'mouse' | ,
'rabbit',
'rat',
'snake',
'spider',
'turkey',
'turtle',
], ignore_case=True)
def main():
text = get_input('Give some animals: ', completer=animal_completer, display_completions_in_columns=True)
print('You said: %s' % text)
if __name__ == '__main__':
main()
|
ProfessorX/Config | .PyCharm30/system/python_stubs/-1247971765/PyQt4/QtGui/QPicture.py | Python | gpl-2.0 | 3,567 | 0.00841 | # encoding: utf-8
# module PyQt4.QtGui
# from /usr/lib/python3/dist-packages/PyQt4/QtGui.cpython-34m-x86_64-linux-gnu.so
# by generator 1.135
# no doc
# imports
import PyQt4.QtCore as __PyQt4_QtCore
from .QPaintDevice import QPaintDevice
class QPicture(QPaintDevice):
"""
QPicture(int formatVersion=-1)
QPicture(QPicture)
"""
def boundingRect(self): # real signature unknown; restored from __doc__
""" QPicture.boundingRect() -> QRect """
pass
def data(self): # real signature unknown; restored from __doc__
""" QPicture.data() -> str """
return ""
def detach(self): # real signature unknown; restored from __doc__
""" QPicture.detach() """
pass
def devType(self): # real signature unknown; restored from __doc__
""" QPicture.devType() -> int """
return 0
def inputFormatList(self): # real signature unknown; restored from __doc__
""" QPicture.inputFormatList() -> list-of-str """
pass
def inputFormats(self): # real signature unknown; restored from __doc__
""" QPicture.inputFormats() -> list-of-QByteArray """
pass
def isDetached(self): # real signature unknown; restored from __doc__
""" QPic | ture.isDetached() -> bool """
return False
def isNull(self): # real sign | ature unknown; restored from __doc__
""" QPicture.isNull() -> bool """
return False
def load(self, *__args): # real signature unknown; restored from __doc__ with multiple overloads
"""
QPicture.load(QIODevice, str format=None) -> bool
QPicture.load(str, str format=None) -> bool
"""
return False
def metric(self, QPaintDevice_PaintDeviceMetric): # real signature unknown; restored from __doc__
""" QPicture.metric(QPaintDevice.PaintDeviceMetric) -> int """
return 0
def outputFormatList(self): # real signature unknown; restored from __doc__
""" QPicture.outputFormatList() -> list-of-str """
pass
def outputFormats(self): # real signature unknown; restored from __doc__
""" QPicture.outputFormats() -> list-of-QByteArray """
pass
def paintEngine(self): # real signature unknown; restored from __doc__
""" QPicture.paintEngine() -> QPaintEngine """
return QPaintEngine
def pictureFormat(self, p_str): # real signature unknown; restored from __doc__
""" QPicture.pictureFormat(str) -> str """
return ""
def play(self, QPainter): # real signature unknown; restored from __doc__
""" QPicture.play(QPainter) -> bool """
return False
def save(self, *__args): # real signature unknown; restored from __doc__ with multiple overloads
"""
QPicture.save(QIODevice, str format=None) -> bool
QPicture.save(str, str format=None) -> bool
"""
return False
def setBoundingRect(self, QRect): # real signature unknown; restored from __doc__
""" QPicture.setBoundingRect(QRect) """
pass
def setData(self, p_str): # real signature unknown; restored from __doc__
""" QPicture.setData(str) """
pass
def size(self): # real signature unknown; restored from __doc__
""" QPicture.size() -> int """
return 0
def swap(self, QPicture): # real signature unknown; restored from __doc__
""" QPicture.swap(QPicture) """
pass
def __init__(self, *__args): # real signature unknown; restored from __doc__ with multiple overloads
pass
|
takeshineshiro/python-novaclient | novaclient/tests/functional/test_keypairs.py | Python | apache-2.0 | 4,555 | 0 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import tempfile
import uuid
from tempest_lib import exceptions
from novaclient.tests.functional import base
from novaclient.tests.functional import fake_crypto
class TestKeypairsNovaClient(base.ClientTestBase):
"""Keypairs functional tests.
"""
def _serialize_kwargs(self, kwargs):
kwargs_pairs = ['--%(key)s %(val)s' % {'key': key.replace('_', '-'),
'val': val}
for key, val in kwargs.items()]
return " ".join(kwargs_pairs)
def _create_keypair(self, **kwargs):
key_name = self._raw_create_keypair(**kwargs)
self.addCleanup(self.nova, 'keypair-delete %s' % key_name)
return key_name
def _raw_create_keypair(self, **kwargs):
key_name = 'keypair-' + str(uuid.uuid4())
kwargs_str = self._serialize_kwargs(kwargs)
self.nova('keypair-add %s %s' % (kwargs_str, key_name))
return key_name
def _show_keypair(self, key_name):
return self.nova('keypair-show %s' % key_name)
def _list_keypairs(self):
return self.nova('keypair-list')
def _delete_keypair(self, key_name):
self.nova('keypair-delete %s' % key_name)
def _create_public_key_file(self, public_key):
pubfile = tempfile.mkstemp()[1]
with open(pubfile, 'w') as f:
f.write(public_key)
return pubf | ile
def test_create_keypair(self):
key_name = self._create_keypair()
keypair = self._show_keypair(key_name)
self.assertIn(key_name, keypair)
return keypair
def _test_import_keypair(self, fingerprint, **create_kwargs):
key_name = self._create_keypair(**create_kwargs)
keypair = self._show_keypair(key_name)
| self.assertIn(key_name, keypair)
self.assertIn(fingerprint, keypair)
return keypair
def test_import_keypair(self):
pub_key, fingerprint = fake_crypto.get_ssh_pub_key_and_fingerprint()
pub_key_file = self._create_public_key_file(pub_key)
self._test_import_keypair(fingerprint, pub_key=pub_key_file)
def test_list_keypair(self):
key_name = self._create_keypair()
keypairs = self._list_keypairs()
self.assertIn(key_name, keypairs)
def test_delete_keypair(self):
key_name = self._raw_create_keypair()
keypair = self._show_keypair(key_name)
self.assertIsNotNone(keypair)
self._delete_keypair(key_name)
# keypair-show should fail if no keypair with given name is found.
self.assertRaises(exceptions.CommandFailed,
self._show_keypair, key_name)
class TestKeypairsNovaClientV22(TestKeypairsNovaClient):
"""Keypairs functional tests for v2.2 nova-api microversion.
"""
def nova(self, *args, **kwargs):
return self.cli_clients.nova(flags='--os-compute-api-version 2.2 ',
*args, **kwargs)
def test_create_keypair(self):
keypair = super(TestKeypairsNovaClientV22, self).test_create_keypair()
self.assertIn('ssh', keypair)
def test_create_keypair_x509(self):
key_name = self._create_keypair(key_type='x509')
keypair = self._show_keypair(key_name)
self.assertIn(key_name, keypair)
self.assertIn('x509', keypair)
def test_import_keypair(self):
pub_key, fingerprint = fake_crypto.get_ssh_pub_key_and_fingerprint()
pub_key_file = self._create_public_key_file(pub_key)
keypair = self._test_import_keypair(fingerprint, pub_key=pub_key_file)
self.assertIn('ssh', keypair)
def test_import_keypair_x509(self):
certif, fingerprint = fake_crypto.get_x509_cert_and_fingerprint()
pub_key_file = self._create_public_key_file(certif)
keypair = self._test_import_keypair(fingerprint, key_type='x509',
pub_key=pub_key_file)
self.assertIn('x509', keypair)
|
RobinQuetin/CAIRIS-web | cairis/cairis/WeaknessTreatmentDialog.py | Python | apache-2.0 | 3,242 | 0.024985 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import wx
import armid
import ARM
from WeaknessTreatmentPanel import WeaknessTreatmentPanel
class WeaknessTreatmentDialog(wx.Dialog):
def __init__(self,parent,targetName,cvName,reqName = '',assetName = '',effValue = '',tRat = ''):
wx.Dialog.__init__(self,parent,-1,'Edit ' + targetName + ' treatment',style=wx.DEFAULT_DIALOG_STYLE|wx.MAXIMIZE_BOX|wx.THICK_FRAME|wx.RESIZE_BORDER,size=(400,300))
self.theRequirementName = reqName
self.theAssetName = assetName
self.theEffectivenessValue = effValue
self.theRationale = tRat
mainSizer = wx.BoxSizer(wx.VERTICAL)
self.panel = WeaknessTreatmentPanel(self,cvName)
mainSizer.Add(self.panel,1,wx.EXPAND)
self.SetSizer(mainSizer)
wx.EVT_BUTTON(self,armid.WEAKNESSTREATMENT_BUTTONCOMMIT_ID,self.onCommit)
if reqName != '':
self.panel.loadControls(reqName,assetName,effValue)
def onCommit(self,evt):
reqCtrl = self.F | indWindowById(armid.WEAKNESSTREATMENT_COMBOREQGOAL_ID)
assetC | trl = self.FindWindowById(armid.WEAKNESSTREATMENT_COMBOASSET_ID)
effCtrl = self.FindWindowById(armid.WEAKNESSTREATMENT_COMBOEFFECTIVENESS_ID)
ratCtrl = self.FindWindowById(armid.WEAKNESSTREATMENT_TEXTRATIONALE_ID)
self.theRequirementName = reqCtrl.GetValue()
self.theAssetName = assetCtrl.GetValue()
self.theEffectivenessValue = effCtrl.GetValue()
self.theRationale = ratCtrl.GetValue()
commitLabel = 'Edit weakness treatment'
if len(self.theRequirementName) == 0:
dlg = wx.MessageDialog(self,'Requirement name cannot be empty',commitLabel,wx.OK)
dlg.ShowModal()
dlg.Destroy()
return
if len(self.theAssetName) == 0:
dlg = wx.MessageDialog(self,'Asset name cannot be empty',commitLabel,wx.OK)
dlg.ShowModal()
dlg.Destroy()
return
if len(self.theEffectivenessValue) == 0:
dlg = wx.MessageDialog(self,'Effectiveness cannot be empty',commitLabel,wx.OK)
dlg.ShowModal()
dlg.Destroy()
return
if len(self.theRationale) == 0:
dlg = wx.MessageDialog(self,'Rationale cannot be empty',commitLabel,wx.OK)
dlg.ShowModal()
dlg.Destroy()
return
else:
self.EndModal(armid.WEAKNESSTREATMENT_BUTTONCOMMIT_ID)
def requirement(self): return self.theRequirementName
def asset(self): return self.theAssetName
def effectiveness(self): return self.theEffectivenessValue
def rationale(self): return self.theRationale
|
siosio/intellij-community | python/testData/quickFixes/PyAddImportQuickFixTest/combinedElementOrdering/first/second/__init__.py | Python | apache-2.0 | 20 | 0.05 | f | rom bar import | path |
andymckay/django | django/core/management/base.py | Python | bsd-3-clause | 13,731 | 0.001311 | """
Base classes for writing management commands (named commands which can
be executed through ``django-admin.py`` or ``manage.py``).
"""
import os
import sys
from optparse import make_option, OptionParser
import traceback
import django
from django.core.exceptions import ImproperlyConfigured
from django.core.management.color import color_style
from django.utils.encoding import smart_str
class CommandError(Exception):
"""
Exception class indicating a problem while executing a management
command.
If this exception is raised during the execution of a management
command, it will be caught and turned into a nicely-printed error
message to the appropriate output stream (i.e., stderr); as a
result, raising this exception (with a sensible description of the
error) is the preferred way to indicate that something has gone
wrong in the execution of a comma | nd.
"""
pass
def handle_default_options(options):
"""
Include any default options that all commands should accept here
so that ManagementUtility can handle them before searching for
user commands.
"""
if options.settings:
| os.environ['DJANGO_SETTINGS_MODULE'] = options.settings
if options.pythonpath:
sys.path.insert(0, options.pythonpath)
class BaseCommand(object):
"""
The base class from which all management commands ultimately
derive.
Use this class if you want access to all of the mechanisms which
parse the command-line arguments and work out what code to call in
response; if you don't need to change any of that behavior,
consider using one of the subclasses defined in this file.
If you are interested in overriding/customizing various aspects of
the command-parsing and -execution behavior, the normal flow works
as follows:
1. ``django-admin.py`` or ``manage.py`` loads the command class
and calls its ``run_from_argv()`` method.
2. The ``run_from_argv()`` method calls ``create_parser()`` to get
an ``OptionParser`` for the arguments, parses them, performs
any environment changes requested by options like
``pythonpath``, and then calls the ``execute()`` method,
passing the parsed arguments.
3. The ``execute()`` method attempts to carry out the command by
calling the ``handle()`` method with the parsed arguments; any
output produced by ``handle()`` will be printed to standard
output and, if the command is intended to produce a block of
SQL statements, will be wrapped in ``BEGIN`` and ``COMMIT``.
4. If ``handle()`` raised a ``CommandError``, ``execute()`` will
instead print an error message to ``stderr``.
Thus, the ``handle()`` method is typically the starting point for
subclasses; many built-in commands and command types either place
all of their logic in ``handle()``, or perform some additional
parsing work in ``handle()`` and then delegate from it to more
specialized methods as needed.
Several attributes affect behavior at various steps along the way:
``args``
A string listing the arguments accepted by the command,
suitable for use in help messages; e.g., a command which takes
a list of application names might set this to '<appname
appname ...>'.
``can_import_settings``
A boolean indicating whether the command needs to be able to
import Django settings; if ``True``, ``execute()`` will verify
that this is possible before proceeding. Default value is
``True``.
``help``
A short description of the command, which will be printed in
help messages.
``option_list``
This is the list of ``optparse`` options which will be fed
into the command's ``OptionParser`` for parsing arguments.
``output_transaction``
A boolean indicating whether the command outputs SQL
statements; if ``True``, the output will automatically be
wrapped with ``BEGIN;`` and ``COMMIT;``. Default value is
``False``.
``requires_model_validation``
A boolean; if ``True``, validation of installed models will be
performed prior to executing the command. Default value is
``True``. To validate an individual application's models
rather than all applications' models, call
``self.validate(app)`` from ``handle()``, where ``app`` is the
application's Python module.
"""
# Metadata about this command.
option_list = (
make_option('-v', '--verbosity', action='store', dest='verbosity', default='1',
type='choice', choices=['0', '1', '2', '3'],
help='Verbosity level; 0=minimal output, 1=normal output, 2=verbose output, 3=very verbose output'),
make_option('--settings',
help='The Python path to a settings module, e.g. "myproject.settings.main". If this isn\'t provided, the DJANGO_SETTINGS_MODULE environment variable will be used.'),
make_option('--pythonpath',
help='A directory to add to the Python path, e.g. "/home/djangoprojects/myproject".'),
make_option('--traceback', action='store_true',
help='Print traceback on exception'),
)
help = ''
args = ''
# Configuration shortcuts that alter various logic.
can_import_settings = True
requires_model_validation = True
output_transaction = False # Whether to wrap the output in a "BEGIN; COMMIT;"
def __init__(self):
self.style = color_style()
def get_version(self):
"""
Return the Django version, which should be correct for all
built-in Django commands. User-supplied commands should
override this method.
"""
return django.get_version()
def usage(self, subcommand):
"""
Return a brief description of how to use this command, by
default from the attribute ``self.help``.
"""
usage = '%%prog %s [options] %s' % (subcommand, self.args)
if self.help:
return '%s\n\n%s' % (usage, self.help)
else:
return usage
def create_parser(self, prog_name, subcommand):
"""
Create and return the ``OptionParser`` which will be used to
parse the arguments to this command.
"""
return OptionParser(prog=prog_name,
usage=self.usage(subcommand),
version=self.get_version(),
option_list=self.option_list)
def print_help(self, prog_name, subcommand):
"""
Print the help message for this command, derived from
``self.usage()``.
"""
parser = self.create_parser(prog_name, subcommand)
parser.print_help()
def run_from_argv(self, argv):
"""
Set up any environment changes requested (e.g., Python path
and Django settings), then run this command.
"""
parser = self.create_parser(argv[0], argv[1])
options, args = parser.parse_args(argv[2:])
handle_default_options(options)
self.execute(*args, **options.__dict__)
def execute(self, *args, **options):
"""
Try to execute this command, performing model validation if
needed (as controlled by the attribute
``self.requires_model_validation``). If the command raises a
``CommandError``, intercept it and print it sensibly to
stderr.
"""
show_traceback = options.get('traceback', False)
# Switch to English, because django-admin.py creates database content
# like permissions, and those shouldn't contain any translations.
# But only do this if we can assume we have a working settings file,
# because django.utils.translation requires settings.
saved_lang = None
if self.can_import_settings:
try:
from django.utils import translation
saved_lang = translation.get_language()
translation.activate('en-us')
except ImportError, e:
# If settings |
raphaelrpl/portal | backend/appengine/routes/profiles/new.py | Python | mit | 968 | 0.002066 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from config.template_middleware import TemplateResponse
from gaebusiness.business import CommandExecutionException
from gaepermission.decorator import login_required
from tekton import router
from gaecookie.decorator import no_csrf
from profile_app import profile_facade
from routes import profiles
from tekton.gae.middleware.redirect import RedirectResponse
@login_required
@no_csrf
def index():
return TemplateResponse({'save_path': rou | ter.to_path(save)}, 'profiles/profile_form.html')
@login_required
def save(**profile_properties):
cmd = profile_facade.save_profile_cmd(**profile_properties)
try:
cmd()
except CommandExecutionException:
context = {'errors': cmd.errors,
' | profile': profile_properties}
return TemplateResponse(context, 'profiles/profile_form.html')
return RedirectResponse(router.to_path(profiles))
|
aoakeson/home-assistant | homeassistant/components/sensor/systemmonitor.py | Python | mit | 5,755 | 0.000174 | """
Support for monitoring the local system..
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/sensor.systemmonitor/
"""
import logging
import homeassistant.util.dt as dt_util
from homeassistant.const import STATE_OFF, STATE_ON
from homeassistant.helpers.entity import Entity
REQUIREMENTS = ['psutil==4.0.0']
SENSOR_TYPES = {
'disk_use_percent': ['Disk Use', '%', 'mdi:harddisk'],
'disk_use': ['Disk Use', 'GiB', 'mdi:harddisk'],
'disk_free': ['Disk Free', 'GiB', 'mdi:harddisk'],
'memory_use_percent': ['RAM Use', '%', 'mdi:memory'],
'memory_use': ['RAM Use', 'MiB', 'mdi:memory'],
'memory_free': ['RAM Free', 'MiB', 'mdi:memory'],
'processor_use': ['CPU Use', '%', 'mdi:memory'],
'process': ['Process', '', 'mdi:memory'],
'swap_use_percent': ['Swap Use', '%', 'mdi:harddisk'],
'swap_use': ['Swap Use', 'GiB', 'mdi:harddisk'],
'swap_free': ['Swap Free', 'GiB', 'mdi:harddisk'],
'network_out': ['Sent', 'MiB', 'mdi:server-network'],
'network_in': ['Recieved', 'MiB', 'mdi:server-network'],
'packets_out': ['Packets sent', '', 'mdi:server-network'],
'packets_in': ['Packets recieved', '', 'mdi:server-network'],
'ipv4_address': ['IPv4 address', '', 'mdi:server-network'],
'ipv6_address': ['IPv6 address', '', 'mdi:server-network'],
'last_boot': ['Last Boot', '', 'mdi:clock'],
'since_last_boot': ['Since Last Boot', '', 'mdi:clock']
}
_LOGGER = logging.getLogger(__name__)
# pylint: disable=unused-argument
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Setup the sensors."""
dev = []
for resource in config['resources']:
if 'arg' not in resource:
resource['arg'] = ''
if resource['type'] not in SENSOR_TYPES:
_LOGGER.error('Sensor type: "%s" does not exist', resource['type'])
else:
dev.append(SystemMonitorSensor(resource['type'], resource['arg']))
add_devices(dev)
class SystemMonitorSensor(Entity):
"""Implementation of a system monitor sensor."""
def __init__(self, sensor_type, argument=''):
"""Initialize the sensor."""
self._name = SENSOR_TYPES[sensor_type][0] + ' ' + argument
self.argument = argument
self.type = sensor_type
self._state = None
self._unit_of_measurement = SENSOR_TYPES[sensor_type][1]
self.update()
@property
def name(self):
"""Return the name of the sensor."""
return self._name.rstrip()
@property
def icon(self):
"""Icon to use in the frontend, if any."""
return SENSOR_TYPES[self.type][2]
@property
def state(self):
"""Return the state of the device."""
return self._state
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity, if any."""
return self._unit_of_measurement
# pylint: disable=too-many-branches
def update(self):
"""Get the latest system information."""
import psutil
if self.type == 'disk_use_percent':
self._state = psutil.disk_usage(self.argument).percent
elif self.type == 'disk_use':
self._state = round(psutil.disk_usage(self.argument).used /
1024**3, 1)
elif self.type == 'disk_free':
self._state = round(psutil.disk_usage(self.argument).free /
1024**3, 1)
elif self.type == 'memory_use_percent':
self._state = psutil.virtual_memory().percent
elif self.type == 'memory_use':
self._state = round((psutil.virtual_memory().total -
| psutil.virtual_memory().available) /
1024**2, 1)
elif self.type == 'memory_free':
self._state = round(psutil.virtual_memory().available / 1024**2, 1)
elif self.type == 'swap_use_percent':
self._state = psutil.swap_memory().percent
elif s | elf.type == 'swap_use':
self._state = round(psutil.swap_memory().used / 1024**3, 1)
elif self.type == 'swap_free':
self._state = round(psutil.swap_memory().free / 1024**3, 1)
elif self.type == 'processor_use':
self._state = round(psutil.cpu_percent(interval=None))
elif self.type == 'process':
if any(self.argument in l.name() for l in psutil.process_iter()):
self._state = STATE_ON
else:
self._state = STATE_OFF
elif self.type == 'network_out':
self._state = round(psutil.net_io_counters(pernic=True)
[self.argument][0] / 1024**2, 1)
elif self.type == 'network_in':
self._state = round(psutil.net_io_counters(pernic=True)
[self.argument][1] / 1024**2, 1)
elif self.type == 'packets_out':
self._state = psutil.net_io_counters(pernic=True)[self.argument][2]
elif self.type == 'packets_in':
self._state = psutil.net_io_counters(pernic=True)[self.argument][3]
elif self.type == 'ipv4_address':
self._state = psutil.net_if_addrs()[self.argument][0][1]
elif self.type == 'ipv6_address':
self._state = psutil.net_if_addrs()[self.argument][1][1]
elif self.type == 'last_boot':
self._state = dt_util.datetime_to_date_str(
dt_util.as_local(
dt_util.utc_from_timestamp(psutil.boot_time())))
elif self.type == 'since_last_boot':
self._state = dt_util.utcnow() - dt_util.utc_from_timestamp(
psutil.boot_time())
|
lowitty/selenium | com/ericsson/xn/commons/base_clint_for_selenium.py | Python | mit | 2,301 | 0.005215 | #! /usr/bin/python
# -*- coding: utf-8 -*-
# from datetime import datetime
from multiprocessing.managers import BaseManager
BaseManager.register('platform_info')
def platform_info(ip, port, passwd):
mgr = start_session(ip, port, passwd)
return mgr.platform_info()._getvalue()
BaseManager.register('server_time')
def server_time(ip, port, passwd):
mgr = start_session(ip, port, passwd)
return mgr.server_time()._getvalue()
BaseManager.register('send_trap')
def send_trap(ip, port, passwd, ne_type, alarm, target_ip,auth_info=None, trap_port=None):
mgr = start_session(ip, port, passwd)
return mgr.send_trap(ne_type, alarm, target_ip,auth_info, trap_port)._getvalue()
BaseManager.register('send_trap_nbi')
def send_trap_nbi(ip, port, passwd, ne_type, alarm, host,
| auth_info=None, nbi_raw='/opt/LINBI/TestTool_CMCC_N13A/bin/raw_catch.log', t_port=None):
mgr = start_session(ip, port, passwd)
return mgr.send_trap_nbi(ne_type, alarm, host, auth_info, nbi_raw, t_port)._getvalue()
BaseManager.register('')
def get_nodeid_by_nename(ip, port, password, ne_name):
mgr = start_session(ip, port, password)
return mgr.get_nodeid_by_nename(ne_name)._getvalue()
BaseManager.register('is_alarm_id_unic')
def is_alarm_id_unic(ip, port, password, id):
mgr = | start_session(ip, port, password)
return mgr.is_alarm_id_unic(id)._getvalue()
BaseManager.register('is_notification_id_unic')
def is_notification_id_unic(ip, port, password, id):
mgr = start_session(ip, port, password)
return mgr.is_notification_id_unic(id)._getvalue()
def start_session(ip, port, passwd):
mgr = BaseManager(address=(ip, port), authkey=passwd)
mgr.connect()
return mgr
def close_session(mgr):
pass
# print datetime.now().strftime('%H:%M:%S:%f')
#print send_trap('10.184.74.67', 7070,'xoambaseserver', 'OCGAS', 'monitorTargetsExceedThreshold', '10.184.74.68', [])
# print datetime.now().strftime('%H:%M:%S:%f')
'''
from datetime import datetime
print datetime.now().strftime('%H:%M:%S:%f')
print send_trap_nbi('127.0.0.1', 7070, 'xoambaseserver', 'LTEHSS', 'SoftwareProgramError-1', '127.0.0.1', ['privUser1', 'authUser1', 'privUser1'], '/Users/lowitty/temp/x.txt', 11162)
print datetime.now().strftime('%H:%M:%S:%f')
''' |
emvecchi/mss | src/utils/crowdflower/create_csv.py | Python | apache-2.0 | 1,241 | 0.033038 | import sys, glob, os
"""
This program creates a csv file where each row contains an
image and one of its associated tag <"image_path, tag">.
An image has to be in 'jpg' format, a lab | el in 'txt' format.
Usage:
@param1: dataset directory
@param2: url base where images and labels have to be located
@param3: output directory
"""
def write_csv(dataset_path, url_path, out_path):
info = {}
for file_path in glob.glob(os.path.join(dataset_path, 'labels/*')):
file = open(file_path, 'r')
file_name = os.path.basename(file_path)
image_name = file_name[:-4] + ".jpg"
if image_name in info | :
label = info[image_name]
else:
label = []
for tag in file:
label.append(tag)
info[image_name] = label
data = []
for image in info:
for tag in info[image]:
image_file_name = url_path + 'images/' + image
data.append((str(image_file_name), str(tag)))
csv_file = open(out_path, 'w')
csv_file.write("Image,Tag" + '\n')
for pair in data:
csv_file.write(pair[0] + ',' + pair[1] + '\n')
csv_file.close()
if __name__ =="__main__":
_dataset_path = sys.argv[1]
_url_path = sys.argv[2]
_out_path = sys.argv[3]
write_csv(_dataset_path, _url_path, _out_path)
|
polysquare/polysquare-travis-container | container-setup.py | Python | mit | 1,494 | 0 | # /container-setup.py
#
# Initial setup script specific to polysquare-travis-container. Creates
# a cache dir in the container and sets the
# _POLYSQUARE_TRAVIS_CONTAINER_TEST_CACHE_DIR environment variable
# to point to it.
#
# See /LICENCE.md for Copyright information
"""Initial setup script specific to polysquare-ci-scripts."""
import os
def run(cont, util, shell, argv=list()):
"""Set up language runtimes and pass control to python project script."""
cache_dir = cont.named_cache_dir("travis_container_downloads",
ephemeral=False)
| cache_dir_key = "_POLYSQUARE_TRAVIS_CONTAINER_TEST_CACHE_DIR"
shell.overwrite_environment_variable(cache_dir_key, cache_dir)
cont.fetch_and_import("setup/pyth | on/setup.py").run(cont, util, shell, argv)
config_python = "setup/project/configure_python.py"
py_ver = util.language_version("python3")
py_cont = cont.fetch_and_import(config_python).get(cont,
util,
shell,
py_ver)
with py_cont.activated(util):
with util.Task("""Downloading all distributions"""):
os.environ[cache_dir_key] = cache_dir
util.execute(cont,
util.long_running_suppressed_output(),
util.which("python"),
"download-all-distros-to.py")
|
marcoceppi/juju-bundlelib | jujubundlelib/changeset.py | Python | lgpl-3.0 | 9,892 | 0.000101 | # Copyright 2015 Canonical Ltd.
# Licensed under the AGPLv3, see LICENCE file for details.
from __future__ import unicode_literals
import copy
import itertools
import models
import utils
class ChangeSet(object):
"""Hold the state for parser handlers.
Also expose methods to send and receive changes (usually Python dicts).
"""
services_added = {}
machines_added = {}
| def __init__(self, bundle):
self.bundle = bundle
self._changeset = []
self._counter = itertools.count()
def send(self, change):
"""Store a change | in this change set."""
self._changeset.append(change)
def recv(self):
"""Return all the collected changes.
Changes are stored using self.send().
"""
changeset = self._changeset
self._changeset = []
return changeset
def next_action(self):
"""Return an incremental integer to be included in the changes ids."""
return next(self._counter)
def is_legacy_bundle(self):
"""Report whether the bundle uses the legacy (version 3) syntax."""
return utils.is_legacy_bundle(self.bundle)
def handle_services(changeset):
"""Populate the change set with addCharm and addService changes."""
charms = {}
for service_name, service in changeset.bundle['services'].items():
# Add the addCharm record if one hasn't been added yet.
if service['charm'] not in charms:
record_id = 'addCharm-{}'.format(changeset.next_action())
changeset.send({
'id': record_id,
'method': 'addCharm',
'args': [service['charm']],
'requires': [],
})
charms[service['charm']] = record_id
# Add the deploy record for this service.
record_id = 'addService-{}'.format(changeset.next_action())
changeset.send({
'id': record_id,
'method': 'deploy',
'args': [
service['charm'],
service_name,
service.get('options', {})
],
'requires': [charms[service['charm']]],
})
changeset.services_added[service_name] = record_id
if 'annotations' in service:
changeset.send({
'id': 'setAnnotations-{}'.format(changeset.next_action()),
'method': 'setAnnotations',
'args': [
'${}'.format(record_id),
'service',
service['annotations'],
],
'requires': [record_id],
})
return handle_machines
def handle_machines(changeset):
"""Populate the change set with addMachines changes."""
for machine_name, machine in changeset.bundle.get('machines', {}).items():
if machine is None:
# We allow the machine value to be unset in the YAML.
machine = {}
record_id = 'addMachines-{}'.format(changeset.next_action())
changeset.send({
'id': record_id,
'method': 'addMachines',
'args': [
{
'series': machine.get('series', ''),
'constraints': machine.get('constraints', {}),
},
],
'requires': [],
})
changeset.machines_added[str(machine_name)] = record_id
if 'annotations' in machine:
changeset.send({
'id': 'setAnnotations-{}'.format(changeset.next_action()),
'method': 'setAnnotations',
'args': [
'${}'.format(record_id),
'machine',
machine['annotations'],
],
'requires': [record_id],
})
return handle_relations
def handle_relations(changeset):
"""Populate the change set with addRelation changes."""
for relation in changeset.bundle.get('relations', []):
relations = [models.Relation(*i.split(':')) if ':' in i
else models.Relation(i, '') for i in relation]
changeset.send({
'id': 'addRelation-{}'.format(changeset.next_action()),
'method': 'addRelation',
'args': [
'${}'.format(
changeset.services_added[rel.name]) +
(':{}'.format(rel.interface) if rel.interface else '')
for rel in relations
],
'requires': [changeset.services_added[rel.name] for
rel in relations],
})
return handle_units
def handle_units(changeset):
"""Populate the change set with addUnit changes."""
units, records = {}, {}
for service_name, service in changeset.bundle['services'].items():
for i in range(service.get('num_units', 0)):
record_id = 'addUnit-{}'.format(changeset.next_action())
unit_name = '{}/{}'.format(service_name, i)
records[record_id] = {
'id': record_id,
'method': 'addUnit',
'args': [
'${}'.format(changeset.services_added[service_name]),
1,
None,
],
'requires': [changeset.services_added[service_name]],
}
units[unit_name] = {
'record': record_id,
'service': service_name,
'unit': i,
}
_handle_units_placement(changeset, units, records)
def _handle_units_placement(changeset, units, records):
"""Ensure that requires and placement directives are taken into account."""
for service_name, service in changeset.bundle['services'].items():
num_units = service.get('num_units')
if num_units is None:
# This is a subordinate service.
continue
placement_directives = service.get('to', [])
if not isinstance(placement_directives, (list, tuple)):
placement_directives = [placement_directives]
if placement_directives and not changeset.is_legacy_bundle():
placement_directives += (
placement_directives[-1:] *
(num_units - len(placement_directives)))
placed_in_services = {}
for i in range(num_units):
unit = units['{}/{}'.format(service_name, i)]
record = records[unit['record']]
if i < len(placement_directives):
record = _handle_unit_placement(
changeset, units, unit, record, placement_directives[i],
placed_in_services)
changeset.send(record)
def _handle_unit_placement(
changeset, units, unit, record, placement_directive,
placed_in_services):
record = copy.deepcopy(record)
if changeset.is_legacy_bundle():
placement = models.parse_v3_unit_placement(placement_directive)
else:
placement = models.parse_v4_unit_placement(placement_directive)
if placement.machine:
# The unit is placed on a machine.
if placement.machine == 'new':
parent_record_id = 'addMachines-{}'.format(changeset.next_action())
options = {}
if placement.container_type:
options = {'containerType': placement.container_type}
changeset.send({
'id': parent_record_id,
'method': 'addMachines',
'args': [options],
'requires': [],
})
else:
if changeset.is_legacy_bundle():
record['args'][2] = '0'
return record
parent_record_id = changeset.machines_added[placement.machine]
if placement.container_type:
parent_record_id = _handle_container_placement(
changeset, placement, parent_record_id)
else:
# The unit is placed to a unit or to a service.
service = placement.service
unit_number = placement.unit
if unit_number is None:
unit_number = _next_unit_in_servi |
anqxyr/pyscp | pyscp/snapshot.py | Python | mit | 11,400 | 0 | #!/usr/bin/env python3
"""
Snapshot access classes.
This module contains the classes that facilitate information extraction
and communication with the sqlite Snapshots.
"""
###############################################################################
# Module Imports
###############################################################################
import bs4
import concurrent.futures
import functools
import itertools
import logging
import operator
import pathlib
import re
import requests
from pyscp import core, orm, utils
###############################################################################
# Global Constants And Variables
###############################################################################
log = logging.getLogger(__name__)
###############################################################################
class Page(core.Page):
"""Page object."""
###########################################################################
# Internal Methods
###########################################################################
def _query(self, ptable, stable='User'):
"""Generate SQL queries used to retrieve data."""
pt, st = [getattr(orm, i) for i in (ptable, stable)]
return pt.select(pt, st.name).join(st).where(pt.page == self._id)
@utils.cached_property
def _pdata(self):
"""Preload the ids and contents of the page."""
pdata = orm.Page.get(orm.Page.url == self.url)
return pdata.id, pdata._data['thread'], pdata.html
###########################################################################
# Properties
###########################################################################
@property
def html(self):
"""Return HTML contents of the page."""
return self._pdata[2]
@utils.cached_property
def history(self):
"""Return the revisions of the page."""
revs = self._query('Revision')
revs = sorted(revs, key=lambda x: x.number)
return [core.Revision(
r.id, r.number, r.user.name, str(r.time), r.comment)
for r in revs]
@utils.cached_property
def votes(self):
"""Return all votes made on the page."""
return [core.Vote(v.user.name, v.value)
for v in self._query('Vote')]
@utils.cached_property
def tags(self):
"""Return the set of tags with which the page is tagged."""
return {pt.tag.name for pt in self._query('PageTag', 'Tag')}
class Thread(core.Thread):
"""Discussion/forum thread."""
@utils.cached_property
def posts(self):
"""Post objects belonging to this thread."""
fp = orm.ForumPost
us = orm.User
query = fp.select(fp, us.name).join(us).where(fp.thread == self._id)
return [core.P | ost(
p.id, p.title, p.content, p.user.name,
str(p.time), p._data['parent'])
for p i | n query]
class Wiki(core.Wiki):
"""Snapshot of a Wikidot website."""
Page = Page
Thread = Thread
# Tautology = Tautology
###########################################################################
# Special Methods
###########################################################################
def __init__(self, site, dbpath):
"""Create wiki instance."""
super().__init__(site)
if not pathlib.Path(dbpath).exists():
raise FileNotFoundError(dbpath)
self.dbpath = dbpath
orm.connect(dbpath)
def __repr__(self):
"""Pretty-print current instance."""
return '{}.{}({}, {})'.format(
self.__module__,
self.__class__.__qualname__,
repr(self.site),
repr(self.dbpath))
###########################################################################
# Internal Methods
###########################################################################
@staticmethod
def _filter_author(author):
return (orm.Page.select(orm.Page.url)
.join(orm.Revision).join(orm.User)
.where(orm.Revision.number == 0)
.where(orm.User.name == author))
@staticmethod
def _filter_tag(tag):
return (orm.Page.select(orm.Page.url)
.join(orm.PageTag).join(orm.Tag)
.where(orm.Tag.name == tag))
@staticmethod
def _get_operator(string):
symbol, *values = re.split(r'(\d+)', string)
opdict = {
'>': 'gt', '<': 'lt', '>=': 'ge', '<=': 'le', '=': 'eq', '': 'eq'}
if symbol not in opdict:
raise ValueError
return getattr(operator, opdict[symbol]), values
def _filter_rating(self, rating):
compare, values = self._get_operator(rating)
rating = int(values[0])
return (orm.Page.select(orm.Page.url)
.join(orm.Vote).group_by(orm.Page.url)
.having(compare(orm.peewee.fn.sum(orm.Vote.value), rating)))
def _filter_created(self, created):
compare, values = self._get_operator(created)
date = '-'.join(values[::2])
return (orm.Page.select(orm.Page.url)
.join(orm.Revision).where(orm.Revision.number == 0)
.group_by(orm.Page.url)
.having(compare(
orm.peewee.fn.substr(orm.Revision.time, 1, len(date)),
date)))
def _list_pages_parsed(self, **kwargs):
query = orm.Page.select(orm.Page.url)
keys = ('author', 'tag', 'rating', 'created')
keys = [k for k in keys if k in kwargs]
for k in keys:
query = query & getattr(self, '_filter_' + k)(kwargs[k])
if 'limit' in kwargs:
query = query.limit(kwargs['limit'])
return map(self, [p.url for p in query])
###########################################################################
# SCP-Wiki Specific Methods
###########################################################################
@functools.lru_cache(maxsize=1)
def list_images(self):
"""Image metadata."""
query = (
orm.Image.select(orm.Image, orm.ImageStatus.name)
.join(orm.ImageStatus))
return [core.Image(r.url, r.source, r.status.name, r.notes, r.data)
for r in query]
###############################################################################
class SnapshotCreator:
"""
Create a snapshot of a wikidot site.
This class uses WikidotConnector to iterate over all the pages of a site,
and save the html content, revision history, votes, and the discussion
of each to a sqlite database. Optionally, standalone forum threads can be
saved too.
In case of the scp-wiki, some additional information is saved:
images for which their CC status has been confirmed, and info about
overwriting page authorship.
In general, this class will not save images hosted on the site that is
being saved. Only the html content, discussions, and revision/vote
metadata is saved.
"""
def __init__(self, dbpath):
"""Create an instance."""
if pathlib.Path(dbpath).exists():
raise FileExistsError(dbpath)
orm.connect(dbpath)
self.pool = concurrent.futures.ThreadPoolExecutor(max_workers=20)
def take_snapshot(self, wiki, forums=False):
"""Take new snapshot."""
self.wiki = wiki
self._save_all_pages()
if forums:
self._save_forums()
if 'scp-wiki' in self.wiki.site:
self._save_meta()
orm.queue.join()
self._save_cache()
orm.queue.join()
log.info('Snapshot succesfully taken.')
def _save_all_pages(self):
"""Iterate over the site pages, call _save_page for each."""
orm.create_tables(
'Page', 'Revision', 'Vote', 'ForumPost',
'PageTag', 'ForumThread', 'User', 'Tag')
count = next(
self.wiki.list_pages(body='total', limit=1))._body['total']
bar = utils.ProgressBar('SAVING PAGES'.ljust(20), int(count))
bar.start()
|
jaywink/shoop | shoop_tests/admin/test_qs.py | Python | agpl-3.0 | 691 | 0.001447 | # -*- coding: utf-8 -*-
# This file is part of Shoop.
#
# Copyright (c) 2012-2015, Shoop Ltd. All rights reserved.
#
# This source code is licensed under the AGPLv3 lice | nse found in the
# LICENSE file in the root directory of this source tree.
from shoop.admin.utils.urls import manipulate_query_string
def test_qs_manipulation():
url = "http://example.com/"
assert manipulate_query_string(url) == url # Noop works
hello_url = manipulate_query_string(url, q="hello", w="wello") # Adding works
assert "q=hello" in hello_url
assert "w=wello" in hello_url
unhello_url = manipulate_q | uery_string(hello_url, q=None) # Removal works
assert "w=wello" in unhello_url
|
fake-name/ReadableWebProxy | WebMirror/management/rss_parser_funcs/feed_parse_extractWataboutMe.py | Python | bsd-3-clause | 597 | 0.030151 | def extractWataboutMe(item): |
'''
Parser for 'watabout.me'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
tagmap = [
('Ben-To', | 'Ben-To', 'translated'),
('PRC', 'PRC', 'translated'),
('Loiterous', 'Loiterous', 'oel'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False |
Tianyi94/EC601Project_Somatic-Parkour-Game-based-on-OpenCV | Old Code/ControlPart/FaceDetection+BackgroundReduction.py | Python | mit | 723 | 0.045643 | import numpy as np
import cv2
from matplotlib import pyplot as plt
face_ | cascade = cv2.CascadeClassifier('/home/tianyiz/user/601project/c/haarcascade_frontalface_alt.xml')
cap = cv2.VideoCapture(0)
fgbg = cv2.createBackgroundSubtractorMOG2()
while 1:
ret, img = cap.read()
gray = cv2.cvtColor(img, | cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, 1.3, 5)
#Background reduce
fgmask = fgbg.apply(img)
cv2.imshow('Reduce',fgmask)
for (x,y,w,h) in faces:
print(x,y,w,h)
cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2)
roi_gray = gray[y:y+h, x:x+w]
roi_color = img[y:y+h, x:x+w]
cv2.imshow('img',img)
k = cv2.waitKey(30) & 0xff
if k == 27:
break
cap.release()
cv2.destroyAllWindows()
|
davidhalter/depl | test/deploy/test_fab.py | Python | mit | 709 | 0 | import pytest
from ..helpers import config_file, main_run
@config_file('''
deploy:
- fab: |
run('touch depl_fab')
sudo("rm depl_fab")
''')
def test_no_error(tmpdir):
main_run(['depl', 'deploy', 'localhost'])
@config_file('''
deploy:
- fab: |
sudo("rm depl_fab")
''')
def test_raise_error(tmpdir):
with pytest.raises(SystemExit):
main_run(['depl', 'deploy', 'localhost'])
@config_file('''
deploy:
- fab: |
| with warn_only():
result = sudo("rm depl_fab")
assert result.failed
''')
def test_raise_error_but_quiet(tmpdir):
main_run(['depl', 'deploy', | 'localhost'])
|
madvas/gae-angular-material-starter | main/task.py | Python | mit | 3,654 | 0.004926 | # coding: utf-8
"""
Module for created app engine deferred tasks. Mostly sending emails
"""
import logging
import flask
from google.appengine.api import mail #pylint: disable=import-error
from google.appengine.ext import deferred #pylint: disable=import-er | ror
import config
import util
def send_mail_notification(subject, body, receiver=None, **kwargs):
"""Function for sending email via GAE's mail and deferred module
Args:
subject (string): Email subject
body (string): | Email body
receiver (string, optional): Email receiver, if omitted admin will send email himself
**kwargs: Arbitrary keyword arguments.
"""
if not config.CONFIG_DB.feedback_email:
return
brand_name = config.CONFIG_DB.brand_name
sender = '%s <%s>' % (brand_name, config.CONFIG_DB.feedback_email)
subject = '[%s] %s' % (brand_name, subject)
if config.DEVELOPMENT:
logging.info(
'\n'
'######### Deferring to send this email: #############################'
'\nFrom: %s\nTo: %s\nSubject: %s\n\n%s\n'
'#####################################################################'
, sender, receiver or sender, subject, body)
deferred.defer(mail.send_mail, sender, receiver or sender, subject, body, **kwargs)
def new_user_notification(user_db):
"""Sends notification to admin about newly registered user
To be this enabled, notify_on_new_user must be true in config database
Args:
user_db (model.User): newly registered user
"""
if not config.CONFIG_DB.notify_on_new_user:
return
body = 'name: %s\nusername: %s\nemail: %s\n%s\n%s' % (
user_db.name,
user_db.username,
user_db.email,
''.join([': '.join(('%s\n' % a).split('_')) for a in user_db.auth_ids]),
'%s#!/user/%s' % (flask.url_for('index', _external=True), user_db.username)
)
send_mail_notification('New user: %s' % user_db.name, body)
def reset_password_notification(user_db):
"""Sends email with url, which user can use to reset his password
Args:
user_db (model.User): User, who requested password reset
"""
if not user_db.email:
return
user_db.token = util.uuid()
user_db.put()
receiver = '%s <%s>' % (user_db.name, user_db.email)
body = '''Hello %(name)s,
it seems someone (hopefully you) tried to reset your password with %(brand)s.
In case it was you, please reset it by following this link:
%(link)s
If it wasn't you, we apologize. You can either ignore this email or reply to it
so we can take a look.
Best regards,
%(brand)s
''' % {
'name': user_db.name,
'link': flask.url_for('user_reset', token=user_db.token, _external=True),
'brand': config.CONFIG_DB.brand_name,
}
send_mail_notification('Reset your password', body, receiver)
def verify_user_email_notification(user_db):
"""Sends email, which user can use to verify his email address
Args:
user_db (model.User): user, who should verify his email
"""
if not user_db.email:
return
user_db.token = util.uuid()
user_db.put()
receiver = user_db.email
body = '''Welcome to %(brand)s.
Follow the link below to confirm your email address and activate your account:
%(link)s
If it wasn't you, we apologize. You can either ignore this email or reply to it
so we can take a look.
Best regards,
%(brand)s
''' % {
'link': flask.url_for('user_verify', token=user_db.token, _external=True),
'brand': config.CONFIG_DB.brand_name,
}
send_mail_notification('Verify your email', body, receiver)
|
wavefrontHQ/python-client | wavefront_api_client/models/integration_dashboard.py | Python | apache-2.0 | 7,471 | 0.000134 | # coding: utf-8
"""
Wavefront REST API Documentation
<p>The Wavefront REST API enables you to interact with Wavefront servers using standard REST API tools. You can use the REST API to automate commonly executed operations such as automatically tagging sources.</p><p>When you make REST API calls outside the Wavefront REST API documentation you must add the header \"Authorization: Bearer <<API-TOKEN>>\" to your HTTP requests.</p> # noqa: E501
OpenAPI spec version: v2
Contact: chitimba@wa | vefront.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from wavefront_api_client.configuration import Configuration
class IntegrationDashboard(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The ke | y is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'dashboard_min_obj': 'DashboardMin',
'dashboard_obj': 'Dashboard',
'description': 'str',
'name': 'str',
'url': 'str'
}
attribute_map = {
'dashboard_min_obj': 'dashboardMinObj',
'dashboard_obj': 'dashboardObj',
'description': 'description',
'name': 'name',
'url': 'url'
}
def __init__(self, dashboard_min_obj=None, dashboard_obj=None, description=None, name=None, url=None, _configuration=None): # noqa: E501
"""IntegrationDashboard - a model defined in Swagger""" # noqa: E501
if _configuration is None:
_configuration = Configuration()
self._configuration = _configuration
self._dashboard_min_obj = None
self._dashboard_obj = None
self._description = None
self._name = None
self._url = None
self.discriminator = None
if dashboard_min_obj is not None:
self.dashboard_min_obj = dashboard_min_obj
if dashboard_obj is not None:
self.dashboard_obj = dashboard_obj
self.description = description
self.name = name
self.url = url
@property
def dashboard_min_obj(self):
"""Gets the dashboard_min_obj of this IntegrationDashboard. # noqa: E501
:return: The dashboard_min_obj of this IntegrationDashboard. # noqa: E501
:rtype: DashboardMin
"""
return self._dashboard_min_obj
@dashboard_min_obj.setter
def dashboard_min_obj(self, dashboard_min_obj):
"""Sets the dashboard_min_obj of this IntegrationDashboard.
:param dashboard_min_obj: The dashboard_min_obj of this IntegrationDashboard. # noqa: E501
:type: DashboardMin
"""
self._dashboard_min_obj = dashboard_min_obj
@property
def dashboard_obj(self):
"""Gets the dashboard_obj of this IntegrationDashboard. # noqa: E501
:return: The dashboard_obj of this IntegrationDashboard. # noqa: E501
:rtype: Dashboard
"""
return self._dashboard_obj
@dashboard_obj.setter
def dashboard_obj(self, dashboard_obj):
"""Sets the dashboard_obj of this IntegrationDashboard.
:param dashboard_obj: The dashboard_obj of this IntegrationDashboard. # noqa: E501
:type: Dashboard
"""
self._dashboard_obj = dashboard_obj
@property
def description(self):
"""Gets the description of this IntegrationDashboard. # noqa: E501
Dashboard description # noqa: E501
:return: The description of this IntegrationDashboard. # noqa: E501
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""Sets the description of this IntegrationDashboard.
Dashboard description # noqa: E501
:param description: The description of this IntegrationDashboard. # noqa: E501
:type: str
"""
if self._configuration.client_side_validation and description is None:
raise ValueError("Invalid value for `description`, must not be `None`") # noqa: E501
self._description = description
@property
def name(self):
"""Gets the name of this IntegrationDashboard. # noqa: E501
Dashboard name # noqa: E501
:return: The name of this IntegrationDashboard. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this IntegrationDashboard.
Dashboard name # noqa: E501
:param name: The name of this IntegrationDashboard. # noqa: E501
:type: str
"""
if self._configuration.client_side_validation and name is None:
raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501
self._name = name
@property
def url(self):
"""Gets the url of this IntegrationDashboard. # noqa: E501
URL path to the JSON definition of this dashboard # noqa: E501
:return: The url of this IntegrationDashboard. # noqa: E501
:rtype: str
"""
return self._url
@url.setter
def url(self, url):
"""Sets the url of this IntegrationDashboard.
URL path to the JSON definition of this dashboard # noqa: E501
:param url: The url of this IntegrationDashboard. # noqa: E501
:type: str
"""
if self._configuration.client_side_validation and url is None:
raise ValueError("Invalid value for `url`, must not be `None`") # noqa: E501
self._url = url
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(IntegrationDashboard, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, IntegrationDashboard):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, IntegrationDashboard):
return True
return self.to_dict() != other.to_dict()
|
compas-dev/compas | src/compas_ghpython/artists/artist.py | Python | mit | 300 | 0 | from __future__ import print_function
from __fu | ture__ import absolute_import
from __future__ import division
from compas.artists import Artist
class GHArtist(Artist):
"""Base class for all GH artists.
"""
def __init__(self, **kwargs):
super(GHArtist, self).__init__( | **kwargs)
|
SuperDARNCanada/placeholderOS | experiments/testing_archive/test_beamforming_16_boxes.py | Python | gpl-3.0 | 7,055 | 0.009639 | #!/usr/bin/python
import os
import sys
sys.path.append(os.environ['BOREALISPATH'])
# write an experiment that creates a new control program.
from experiment_prototype.experiment_prototype import ExperimentPrototype
class OneBox(ExperimentPrototype):
def __init__(self):
cpid = 100000000
super(OneBox, self).__init__(cpid)
pulse_sequence = [0, 14, 22, 24, 27, 31, 42, 43]
#pulse_sequence = [0,3,15,41,66,95,97,106,142,152,220,221,225,242,295,330,338,354,382,388,402,415,486,504,523,546,553]
self.add_slice({ # slice_id = 0, there is only one slice.
"tx_antennas": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15],
"rx_main_antennas": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15],
"rx_int_antennas": [0, 1, 2, 3],
"pulse_sequence":pulse_sequence,#[0, 14, 22, 24, 27, 31, 42, 43],
"pulse_shift": [0] * len(pulse_sequence),
"mpinc": 1500, # us
"pulse_len": 300, # us
"nrang": 75, # range gates
"frang": 180, # first range gate, in km
"intt": 3000, # duration of an integration, in ms
"intn": 21, # number of averages if intt is None.
"beam_angle": [-0.0], # [-26.25, -22.75, -19.25, -15.75, -12.25, -8.75,
# -5.25, -1.75, 1.75, 5.25, 8.75, 12.25, 15.75, 19.25, 22.75, 26.25],
"beam_order": [0],
#"scanboundflag": True, # there is a scan boundary
#"scanbound": 60000, # ms
"txfreq": 13332,
#"clrfrqflag": True, # search for clear frequency before transmitting
#"clrfrqrange": [13200, 13500], # frequency range for clear frequency search, kHz
# including a clrfrqrange overrides rxfreq and txfreq so these are no longer necessary
# as they will be set by the frequency chosen from the range.
"xcf": True, # cross-correlation processing
"acfint": True, # interferometer acfs
})
# USED THE FOLLOWING FOR TESTING SECOND SLICE
# self.add_slice({ # slice_id = 0, there is only one slice.
# "txantennas": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15],
# "rx_main_antennas": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15],
# "rx_int_antennas": [0, 1, 2, 3],
# "pulse_sequence": [0, 14, 22, 24, 27, 31, 42, 43],
# "pulse_shift": [0, 0, 0, 0, 0, 0, 0, 0],
# "mpinc": 1500, # us
# "pulse_len": 300, # us
# "nrang": 75, # range gates
# "frang": 180, # first range gate, in km
# "intt": 3000, # duration of an integration, in ms
# "intn": 21, # number of averages if intt is None.
# "beam_angle": [-26.25, -22.75, -19.25, -15.75, -12.25, -8.75,
# -5.25, -1.75, 1.75, 5.25, 8.75, 12.25, 15.75, 19.25, 22.75, 26.25],
# "beam_order": [15, 14, 13, 12, 11, 10, 9, 8, 7, | 6, 5, 4, 3, 2, 1, 0],
# "scanboundflag": True, # there is a scan boundary
# "scanbound": 60000, # ms
# "clrfrqflag": True, # search for clear frequency before transmitting
# "clrfrqrange": [13100, 13200], # frequency range for clear frequency search, kHz
# # including a clrfrqrange overrides rxfreq and txfreq so these are no | longer necessary
# # as they will be set by the frequency chosen from the range.
# "xcf": True, # cross-correlation processing
# "acfint": True, # interferometer acfs
# }, interfacing_dict={0: 'PULSE'})
# Other things you can change if you wish. You may want to discuss with us about it beforehand.
# These apply to the experiment and all slices as a whole.
#self.txctrfreq = 12000 # kHz, oscillator mixer frequency on the USRP for TX
# self.txrate = 12000000 # Hz, sample rate fed to DAC
#self.rxctrfreq = 12000 # kHz, mixer frequency on the USRP for RX
print(self.rxctrfreq)
"""
INTERFACING TYPES:
NONE : Only the default, must be changed.
SCAN : Scan by scan interfacing. Experiment slice 1 will scan first followed by slice 2 and subsequent slices.
INTTIME : integration time interfacing (full integration time of one sequence, then the next). Time/number of
sequences dependent on intt and intn in the slice. Effectively simultaneous scan interfacing, interleaving
each integration time in the scans. Slice 1 first inttime or beam direction will run followed by slice
2's first inttime, etc. If slice 1's len(beam_order) is greater than slice 2's, slice 2's last
integration will run and then all the rest of slice 1's will continue until the full scan is over.
Experiment slice 1 and 2 must have the same scan boundary, if any boundary.
INTEGRATION : pulse sequence or integration interfacing (one sequence of one slice, then the next). Experiment
slice 1 and 2 must have same intt and intn. Integrations will switch between one and the other slice until
time is up or the required number of averages is reached.
PULSE : Simultaneous sequence interfacing, pulse by pulse creates a single sequence. Experiment Slice 1 and 2
might have different frequencies and/or may have different pulse length, mpinc, sequence. They must also
have same len(scan), although they may use different directions in scan. They must have the same scan
boundary if any. A time offset between the pulses starting may be set (seq_timer in the slice). Slice 1
and 2 will have integrations that run at the same time.
"""
# Update the following interface dictionary if you have more than one slice dictionary in your slice_list.
# The keys in the interface dictionary correspond to the slice_ids of the slices in your slice_list.
# NOTE keys are as such: (0,1), (0,2), (1,2), NEVER includes (2,0) etc.
# self.interface.update({
# (0,1) : 'PULSE'
# })
# def update(self, acfdata):
"""
Use this function to change your experiment based on ACF data retrieved from the rx_signal_processing block.
This function is called after every integration period so that your experiment can be changed to adjust to
existing conditions. Talk to us if you have something specific in mind that you're not sure if you can
implement here.
:param acfdata ??? TBD
:rtype boolean
:return change_flag, indicating whether the experiment has changed or not. True = change has occurred.
""" # TODO update with how acfdata will be passed in
# TODO : docs about what can and cannot be changed. Warning about changing centre frequencies.
# change_flag = False
# return change_flag
|
SeanXP/Nao-Robot | python/language/set_Chinese.py | Python | gpl-2.0 | 1,345 | 0.018311 | #! /usr/bin/env python
#-*- coding: utf-8 -*-
#################################################################
# Copyright (C) 2015 Sean Guo. All rights reserved.
#
# > File Name: < set_Chinese.py >
# > Author: < Sean Guo >
# > Mail: < iseanxp+code@gmail | .com >
# > Created Tim | e: < 2015/03/30 >
# > Last Changed:
# > Description:
#################################################################
from naoqi import ALProxy
robot_ip = "192.168.1.100"
robot_port = 9559 # default port : 9559
tts = ALProxy("ALTextToSpeech", robot_ip, robot_port)
#tts.setLanguage("English")
#tts.say("Hello, world! I am Nao robot!")
# 切换语言包需要较长时间,故尽量不要在程序运行时切换;
tts.setLanguage("Chinese")
tts.say("你好,我是闹机器人。")
tts.say("我可以说流利的绕口令:")
tts.say("打南边来了一个喇嘛,手里提着五斤鳎蚂,打北边来了一个哑巴,腰里别着一个喇叭")
tts.say("提搂鳎蚂的喇嘛要拿鳎蚂去换别着喇叭的哑巴的喇叭,别着喇叭的哑巴不愿意拿喇叭去换提搂鳎蚂的喇嘛的鳎蚂")
#tts.say('粉红墙上画凤凰,凤凰画在粉红墙。')
#tts.say(' 红凤凰、粉凤凰,红粉凤凰、花凤凰。')
#tts.say('红凤凰,黄凤凰,红粉凤凰,粉红凤凰,花粉花凤凰。')
|
zouyapeng/horizon-newtouch | openstack_dashboard/dashboards/admin/instances/forms.py | Python | apache-2.0 | 3,554 | 0.000281 | # Copyright 2013 Kylin OS, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon import messages
from openstack_dashboard import api
class LiveMigrateForm(forms.SelfHandlingForm):
current_host = forms.CharField(label=_("Current Host"),
required=False,
widget=forms.TextInput(
attrs={'readonly': 'readonly'}))
host = forms.ChoiceField(label=_("New Host"),
help_text=_("Choose a Host to migrate to."))
disk_over_commit = forms.BooleanField(label=_("Disk Over Commit"),
initial=False, required=False)
block_migration = forms.BooleanField(label=_("Block Migration"),
initial=False, required=False)
def __init__(self, request, *args, **kwargs):
super(LiveMigrateForm, self).__init__(request, *args, **kwargs)
initial = kwargs.get('initial', {})
instance_id = initial.get('instance_id')
self.fields['instance_id'] = forms.CharField(widget=forms.HiddenInput,
initial=instance_id)
self.fields['host'].choices = self.populate_host_choices(request,
initial)
def populate_host_choices(self, request, initial):
hosts = initial.get('hosts')
current_host = initial.get('current_host')
host_list = [(host.host_name,
host.host_name)
for host in hosts
if host.service.startswith('compute') and
host.host_name != current_host]
if host_list:
host_list.insert(0, ("", _("Select a new host")))
else:
host_list.insert(0, ("", _("No other hosts available.")))
return sorted(host_list)
def handle(self, request, data):
try:
block_migration = data['block_migration']
disk_over_commit = data['disk_over_commit']
api.nova.server_live_migrate(request,
data['instance_id'],
data['host'],
| block_migration=block_migration,
disk_over_commit=disk_over_commit)
msg = _('The instance is preparing the live migration '
'to host "%s".') % data['host']
messages.success(request, msg)
return True
except Exception:
msg = _('Failed to live migrate instance to '
'h | ost "%s".') % data['host']
redirect = reverse('horizon:admin:instances:index')
exceptions.handle(request, msg, redirect=redirect)
|
SteelToeOSS/Samples | pysteel/fs.py | Python | apache-2.0 | 296 | 0 | import os
import | shutil
import stat
def deltree(dirpath):
"""
:type dirpath: str
"""
if os.path.exists(dirpath):
def remove_readonly(func, path, _):
os.chmod(path, stat.S_IWRITE)
func(path) |
shutil.rmtree(dirpath, onerror=remove_readonly)
|
mvkirk/appengine-lumx-skeleton | app/server/main.py | Python | apache-2.0 | 766 | 0.002611 | import logging
| import os
import jinja2
import webapp2
from google.appengine.ext.webapp.util import run_wsgi_app
from server.service import Service
jinja_environment = jinja2.Environment(
loader=jinja2.FileSystemLoader(os.path.dirname(os.path.dirname(__file__))),
variable_start_strin | g='[[',
variable_end_string=']]'
)
class MainHandler(webapp2.RequestHandler):
def get(self):
template = jinja_environment.get_template('/client/front/index.html')
self.response.out.write(template.render({}))
application = webapp2.WSGIApplication([
('/', MainHandler),
('/service.*', Service)
], debug=True)
def main():
logging.getLogger().setLevel(logging.DEBUG)
run_wsgi_app(application)
if __name__ == '__main__':
main()
|
little-dude/monolithe | tests/base/sdk/python/tdldk/v1_0/fetchers/galists_fetcher.py | Python | bsd-3-clause | 598 | 0.003344 | # -*- coding: | utf-8 -*-
#
# __code_header example
# put your license header here
# it will be added to all the generated files
#
from bambou import NURESTFetcher
class GAListsFetcher(NURESTFetcher):
""" Represents a GALists fetcher
Notes:
This fetcher enables to | fetch GAList objects.
See:
bambou.NURESTFetcher
"""
@classmethod
def managed_class(cls):
""" Return GAList class that is managed.
Returns:
.GAList: the managed class
"""
from .. import GAList
return GAList
|
John-Lin/SDN-hands-on | examples/simple_switch/simple_switch.py | Python | mit | 3,589 | 0 | from ryu.base import app_manager
from ryu.controller import ofp_event
from ryu.controller.handler import CONFIG_DISPATCHER, MAIN_DISPATCHER
from ryu.controller.handler import set_ev_cls
from ryu.ofproto import ofproto_v1_3
from ryu.lib.packet import packet
from ryu.lib.packet import ethernet
class SimpleSwitch(app_manager.RyuApp):
OFP_VERSIONS = [ofproto_v1_3.OFP_VERSION]
def __init__(self, *args, **kwargs):
super(SimpleSwitch, self).__init__(*args, **kwargs)
self.mac_to_port = {}
@set_ev_cls(ofp_event.EventOFPSwitchFeatures, CONFIG_DISPATCHER)
def switch_features_handler(self, ev):
datapath = ev.msg.datapath
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
match = parser.OFPMatch()
actions = [parser.OFPActionOutput(ofproto.OFPP_CONTROLLER,
ofproto.OFPCML_NO_BUFFER)]
self.add_flow(datapath, 0, match, actions)
def add_flow(self, datapath, priority, match, actions, buffer_id=None):
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
inst = [parser.OFPInstructionActions(ofproto.OFPIT_APPLY_ACTIONS,
| actions)]
if buffer_id:
mod = parser.OFPFlowMod(datapath=datapath, buffer_id=buffer_id,
priority=priority, match=match,
instructions=inst)
else:
mod = parser.OFPFlowMod(datapath=datapath, priority=priority,
match=match, instructions=inst)
datapath.send_msg(mod)
@set_ev_cls(ofp_event.EventOFPPacketIn, MAIN_DISPATCHER)
def _packet_in_handler(self, ev):
# If you hit this you might want to increase
# the "miss_send_length" of your switch
if ev.msg.msg_len < ev.msg.total_len:
self.logger.debug("packet truncated: only %s of %s bytes",
ev.msg.msg_len, ev.msg.total_len)
msg = ev.msg
datapath = msg.datapath
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
in_port = msg.match['in_port']
pkt = packet.Packet(msg.data)
eth = pkt.get_protocols(ethernet.ethernet)[0]
dst = eth.dst
src = eth.src
dpid = datapath.id
self.mac_to_port.setdefault(dpid, {})
self.logger.info("packet in %s %s %s %s", dpid, src, dst, in_port)
# learn a mac address to avoid FLOOD next time.
self.mac_to_port[dpid][src] = in_port
if dst in self.mac_to_port[dpid]:
out_port = self.mac_to_port[dpid][dst]
else:
out_port = ofproto.OFPP_FLOOD
actions = [parser.OFPActionOutput(out_port)]
# install a flow to avoid packet_in next time
if out_port != ofproto.OFPP_FLOOD:
match = parser.OFPMatch(in_port=in_port, eth_dst=dst)
# verify if we have a valid buffer_id, if yes avoid to send both
# flow_mod & packet_out
if msg.buffer_id != ofproto.OFP_NO_BUFFER:
self.add_flow(datapath, 1, match, actions, msg.buffer_id)
return
else:
self.add_flow(datapath, 1, match, actions)
data = None
if msg.buffer_id == ofproto.OFP_NO_BUFFER:
data = msg.data
out = parser.OFPPacketOut(datapath=datapath, buffer_id=msg.buffer_id,
in_port=in_port, actions=actions, data=data)
datapath.send_msg(out)
| |
clsb/miles | doc/conf.py | Python | mit | 9,457 | 0.006027 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# miles documentation build configuration file, created by
# sphinx-quickstart on Sun Jul 17 | 17:10:29 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# | If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.mathjax',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'miles'
copyright = '2016, Juan M. Bello-Rivas'
author = 'Juan M. Bello-Rivas'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.0.1'
# The full version, including alpha/beta/rc tags.
release = '0.0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
#html_title = 'miles v0.0.1'
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
#html_last_updated_fmt = None
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr', 'zh'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'milesdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'miles.tex', 'miles Documentation',
'Juan M. Bello-Rivas', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after |
capstone-rust/capstone-rs | capstone-sys/capstone/suite/synctools/asmwriter.py | Python | mit | 33,268 | 0.002345 | #!/usr/bin/python
# convert LLVM GenAsmWriter.inc for Capstone disassembler.
# by Nguyen Anh Quynh, 2019
import sys
if len(sys.argv) == 1:
print("Syntax: %s <GenAsmWriter.inc> <Output-GenAsmWriter.inc> <Output-GenRegisterName.inc> <arch>" %sys.argv[0])
sys.exit(1)
arch = sys.argv[4]
f = open(sys.argv[1])
lines = f.readlines()
f.close()
f1 = open(sys.argv[2], 'w+')
f2 = open(sys.argv[3], 'w+')
f1.write("/* Capstone Disassembly Engine, http://www.capstone-engine.org */\n")
f1.write("/* By Nguyen Anh Quynh <aquynh@gmail.com>, 2013-2019 */\n")
f1.write("\n")
f2.write("/* Capstone Disassembly Engine, http://www.capstone-engine.org */\n")
f2.write("/* By Nguyen Anh Quynh <aquynh@gmail.com>, 2013-2019 */\n")
f2.write("\n")
need_endif = False
in_getRegisterName = False
in_printAliasInstr = False
fragment_no = None
skip_printing = False
skip_line = 0
skip_count = 0
def replace_getOp(line):
line2 = line
if 'MI->getOperand(0)' in line:
line2 = line.replace('MI->getOperand(0)', 'MCInst_getOperand(MI, 0)')
elif 'MI->getOperand(1)' in line:
line2 = line.replace('MI->getOperand(1)', 'MCInst_getOperand(MI, 1)')
elif 'MI->getOperand(2)' in line:
line2 = line.replace('MI->getOperand(2)', 'MCInst_getOperand(MI, 2)')
elif 'MI->getOperand(3)' in line:
line2 = line.replace('MI->getOperand(3)', 'MCInst_getOperand(MI, 3)')
elif 'MI->getOperand(4)' in line:
line2 = line.replace('MI->getOperand(4)', 'MCInst_getOperand(MI, 4)')
elif 'MI->getOperand(5)' in line:
line2 = line.replace('MI->getOperand(5)', 'MCInst_getOperand(MI, 5)')
elif 'MI->getOperand(6)' in line:
line2 = line.replace('MI->getOperand(6)', 'MCInst_getOperand(MI, 6)')
elif 'MI->getOperand(7)' in line:
line2 = line.replace('MI->getOperand(7)', 'MCInst_getOperand(MI, 7)')
elif 'MI->getOperand(8)' in line:
line2 = line.replace('MI->getOperand(8)', 'MCInst_getOperand(MI, 8)')
return line2
def replace_getReg(line):
line2 = line
if 'MI->getOperand(0).getReg()' in line:
line2 = line.replace('MI->getOperand(0).getReg()', 'MCOperand_getReg(MCInst_getOperand(MI, 0))')
elif 'MI->getOperand(1).getReg()' in line:
line2 = line.replace('MI->getOperand(1).getReg()', 'MCOperand_getReg(MCInst_getOperand(MI, 1))')
elif 'MI->getOperand(2).getReg()' in line:
line2 = line.replace('MI->getOperand(2).getReg()', 'MCOperand_getReg(MCInst_getOperand(MI, 2))')
elif 'MI->getOperand(3).getReg()' in line:
line2 = line.replace('MI->getOperand(3).getReg()', 'MCOperand_getReg(MCInst_getOperand(MI, 3))')
elif 'MI->getOperand(4).getReg()' in line:
line2 = line.replace('MI->getOperand(4).getReg()', 'MCOperand_getReg(MCInst_getOperand(MI, 4))')
elif 'MI->getOperand(5).getReg()' in line:
line2 = line.replace('MI->getOperand(5).getReg()', 'MCOperand_getReg(MCInst_getOperand(MI, 5))')
elif 'MI->getOperand(6).getReg()' in line:
line2 = line.replace('MI->getOperand(6).getReg()', 'MCOperand_getReg(MCInst_getOperand(MI, 6))')
elif 'MI->getOperand(7).getReg()' in line:
line2 = line.replace('MI->getOperand(7).getReg()', 'MCOperand_getReg(MCInst_getOperand(MI, 7))')
elif 'MI->getOperand(8).getReg()' in line:
line2 = line.replace('MI->getOperand(8).getReg()', 'MCOperand_getReg(MCInst_getOperand(MI, 8))')
return line2
| # extract param between text()
# MRI.getRegClass(AArch64::GPR32spRegClassID).contains(MI->getOperand(1).getReg( | )))
def extract_paren(line, text):
i = line.index(text)
return line[line.index('(', i)+1 : line.index(')', i)]
# extract text between <>
# printSVERegOp<'q'>
def extract_brackets(line):
if '<' in line:
return line[line.index('<')+1 : line.index('>')]
else:
return ''
# delete text between <>, including <>
# printSVERegOp<'q'>
def del_brackets(line):
if '<' in line:
return line[:line.index('<')] + line[line.index('>') + 1:]
else:
return line
def print_line(line):
line = line.replace('::', '_')
line = line.replace('nullptr', 'NULL')
if not skip_printing:
if in_getRegisterName:
f2.write(line + "\n")
else:
f1.write(line + "\n")
for line in lines:
line = line.rstrip()
#print("@", line)
# skip Alias
if arch.upper() == 'X86':
if 'PRINT_ALIAS_INSTR' in line:
# done
break
if skip_line:
skip_count += 1
if skip_count <= skip_line:
# skip this line
continue
else:
# skip enough number of lines, reset counters
skip_line = 0
skip_count = 0
if "::printInstruction" in line:
if arch.upper() in ('AARCH64', 'ARM64'):
#print_line("static void printInstruction(MCInst *MI, SStream *O, MCRegisterInfo *MRI)\n{")
print_line("static void printInstruction(MCInst *MI, SStream *O)\n{")
else:
print_line("static void printInstruction(MCInst *MI, SStream *O)\n{")
elif 'const char *AArch64InstPrinter::' in line:
continue
elif 'getRegisterName(' in line:
if 'unsigned AltIdx' in line:
print_line("static const char *getRegisterName(unsigned RegNo, unsigned AltIdx)\n{")
else:
print_line("static const char *getRegisterName(unsigned RegNo)\n{")
elif 'getRegisterName' in line:
in_getRegisterName = True
print_line(line)
elif '::printAliasInstr' in line:
if arch.upper() in ('AARCH64', 'PPC'):
print_line("static char *printAliasInstr(MCInst *MI, SStream *OS, MCRegisterInfo *MRI)\n{")
print_line(' #define GETREGCLASS_CONTAIN(_class, _reg) MCRegisterClass_contains(MCRegisterInfo_getRegClass(MRI, _class), MCOperand_getReg(MCInst_getOperand(MI, _reg)))')
else:
print_line("static bool printAliasInstr(MCInst *MI, SStream *OS)\n{")
print_line(" unsigned int I = 0, OpIdx, PrintMethodIdx;")
print_line(" char *tmpString;")
in_printAliasInstr = True
elif 'STI.getFeatureBits()[' in line:
if arch.upper() == 'ARM':
line2 = line.replace('STI.getFeatureBits()[', 'ARM_getFeatureBits(MI->csh->mode, ')
elif arch.upper() == 'AARCH64':
line2 = line.replace('STI.getFeatureBits()[', 'AArch64_getFeatureBits(')
line2 = line2.replace(']', ')')
print_line(line2)
elif ', STI, ' in line:
line2 = line.replace(', STI, ', ', ')
if 'printSVELogicalImm<' in line:
if 'int16' in line:
line2 = line2.replace('printSVELogicalImm', 'printSVELogicalImm16')
line2 = line2.replace('<int16_t>', '')
elif 'int32' in line:
line2 = line2.replace('printSVELogicalImm', 'printSVELogicalImm32')
line2 = line2.replace('<int32_t>', '')
else:
line2 = line2.replace('printSVELogicalImm', 'printSVELogicalImm64')
line2 = line2.replace('<int64_t>', '')
if 'MI->getOperand(' in line:
line2 = replace_getOp(line2)
# C++ template
if 'printPrefetchOp' in line2:
param = extract_brackets(line2)
if param == '':
param = 'false'
line2 = del_brackets(line2)
line2 = line2.replace(', O);', ', O, %s);' %param)
line2 = line2.replace(', OS);', ', OS, %s);' %param)
elif '<false>' in line2:
line2 = line2.replace('<false>', '')
line2 = line2.replace(', O);', ', O, false);')
line2 = line2.replace('STI, ', '')
elif '<true>' in line:
line2 = line2.replace('<true>', '')
line2 = line2.replace(', O);', ', O, true);')
line2 = line2.replace('STI, ', '')
elif 'printAdrLabelOperand' in line:
# C++ template
if '<0>' in line:
line2 = line2.replace('<0>', '')
line2 = line2.replace(', O);', ', O, 0);')
elif '<1>' in line:
line2 = line2.replace('< |
sthenc/nc_packer | tools/chime_scorer.py | Python | mit | 13,171 | 0.037279 | #!/usr/bin/python
import numpy as np
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('inset', choices=['test', 'val', 'train', 'all'])
parser.add_argument('recog', choices=['clean', 'reverb', 'noisy', 'retrain', 'all'])
phase_group = parser.add_mutually_exclusive_group(required = False)
phase_group.add_argument("-gen", "--just-generate",
help="Only generate features",
action="store_true")
phase_group.add_argument("-tst", "--just-test",
help="Only generate features",
action="store_true")
parser.add_argument("-cd", "--cuda",
help="Enable cuda",
action="store_true")
parser.add_argument("testid", help="String to generate necessary folders etc.") # can potentially delete data
parser.add_argument("netname", help="Input autosave file")
parser.add_argument("-del", "--delete",
help="Delete generated features to save space",
action="store_true")
args = parser.parse_args()
#print (args)
# create and change to test directory
rootdir = "/mnt/data/Fer/diplomski/training_currennt/speech_autoencoding_chime/test/" + args.testid + "/"
import shutil as sh
import os
if not os.path.exists(rootdir):
os.makedirs(rootdir)
os.chdir(rootdir)
# setup logging
import logging
logging.basicConfig(filename='chime_scorer.log', format='%(asctime)s:%(levelname)s:%(message)s', level=logging.DEBUG)
logging.info("=" * 20 + "Program started" + "=" * 20)
logging.info("Arguments: " + str(args))
# copy selected network file to test directory
import subprocess as sb
netname = os.path.basename(args.netname)
#netname = netname.split('.')[0] + ".json"
print(netname, args.testid)
try:
#print(["cp", args.netname, rootdir])
tmp_output = sb.check_output(["cp", args.netname, rootdir], stderr=sb.STDOUT, universal_newlines=True)
except sb.CalledProcessError as exc:
logging.error("Invalid netname. returncode: " + str(exc.returncode) + " output: " + str(exc.output))
print("Invalid netname")
exit()
else:
log | ging.info("Copy netname: \n{}\n".format(tmp_output))
# feature generate phase
testfe | at = "output_test/"
valfeat = "output_val/"
trainfeat = "output_train/"
testfeatnorm = "output_norm_test/"
valfeatnorm = "output_norm_val/"
trainfeatnorm = "output_norm_train/"
testnc = "../../test_reverb_norm.nc"
valnc = "../../val_reverb_norm.nc"
trainnc = "../../train_reverb_norm.nc"
# for dubugging
#testnc = "../../dev2.nc"
#valnc = trainnc = "../../train2.nc"
for f in [testfeat, valfeat, trainfeat]:
if not os.path.exists(rootdir + f):
os.makedirs(rootdir + f)
logging.info("Created " + rootdir + f)
for f in [testnc, valnc, trainnc]:
if not os.path.isfile(rootdir + f):
logging.error("File doesn't exist: " + rootdir + f)
print("File doesn't exist: " + rootdir + f)
exit()
if args.delete:
for f in [testfeat, valfeat, trainfeat, testfeatnorm, valfeatnorm, trainfeatnorm]:
if os.path.exists(f):
sh.rmtree(f)
logging.info("Deleted temporary feature folders")
exit(0)
def clean_folder(foldername):
if os.path.exists(foldername):
sh.rmtree(foldername)
os.makedirs(foldername)
else:
os.makedirs(foldername) # should never happen
#network = autosave_run7_epoch138.autosave
#train = false
#input_noise_sigma = 0
#parallel_sequences = 2
#ff_output_format = htk
#ff_output_kind = 838
#feature_period = 10
#cuda = false
#revert_std = true
#ff_input_file = ../../test_reverb_norm.nc
#ff_output_file = ./output-test-138/
#print(os.path.basename(args.netname))
if args.cuda:
tmp_cuda = "true"
tmp_parallel = "100"
else:
tmp_cuda = "false"
tmp_parallel = "3"
command_template = ["currennt", "--network", "./" + netname, "--train","false",
"--input_noise_sigma", "0", "--parallel_sequences", tmp_parallel, "--ff_output_format", "htk", "--ff_output_kind", "838",
"--feature_period", "10", "--cuda", tmp_cuda, "--revert_std", "true"]
def generate_features(feat, nc):
try:
command = command_template[:];
command.extend(["--ff_input_file", nc, "--ff_output_file", "./" + feat])
logging.info("Command: " + str(command))
tmp_output = sb.check_output(command, stderr=sb.STDOUT, universal_newlines=True)
except sb.CalledProcessError as exc:
logging.error("Error generating features " + feat + " . returncode: " + str(exc.returncode) + " output: \n" + str(exc.output))
print("Error generating features " + feat + " ")
exit()
else:
logging.info("Generated features " + feat + " : \n{}\n".format(tmp_output))
rename_template = ["rename2mfcc.sh"]
def do_rename(feat):
try:
command = rename_template[:];
command.extend([feat])
logging.info("Command: " + str(command))
tmp_output = sb.check_output(command, stderr=sb.STDOUT, universal_newlines=True)
except sb.CalledProcessError as exc:
logging.error("Error renaming features " + feat + " . returncode: " + str(exc.returncode) + " output: \n" + str(exc.output))
print("Error renaming features " + feat + " ")
exit()
else:
logging.info("Renamed features " + feat + " : \n{}\n".format(tmp_output))
compute_template = ["compute_output_mean_stddev.py"]
def compute_means(feat, saved_means):
try:
command = compute_template[:];
command.extend([feat, saved_means])
logging.info("Command: " + str(command))
tmp_output = sb.check_output(command, stderr=sb.STDOUT, universal_newlines=True)
except sb.CalledProcessError as exc:
logging.error("Error computing means and stddevs of features " + feat + " . returncode: " + str(exc.returncode) + " output: \n" + str(exc.output))
print("Error computing means and stddevs of features " + feat + " ")
exit()
else:
logging.info("Computing means and stddevs of features " + feat + " : \n{}\n".format(tmp_output))
normalize_template = ["normalizer.py"]
def do_normalize(feat, saved_means, outfeat):
try:
command = normalize_template[:];
command.extend([feat, saved_means, outfeat])
logging.info("Command: " + str(command))
tmp_output = sb.check_output(command, stderr=sb.STDOUT, universal_newlines=True)
except sb.CalledProcessError as exc:
logging.error("Error normalizing features " + feat + " . returncode: " + str(exc.returncode) + " output: \n" + str(exc.output))
print("Error normalizing features " + feat + " ")
exit()
else:
logging.info("Normalized features " + feat + " : \n{}\n".format(tmp_output))
def do_feature_work(feat, outfeat, nc, saved_means):
clean_folder(rootdir + feat)
generate_features(feat, nc)
do_rename(feat)
compute_means(feat, saved_means)
#sb.call(["htk_mfcc_visualize.py", feat + "0dB/10_bgakzn.mfcc"])
do_normalize(feat, saved_means, outfeat)
#sb.call(["htk_mfcc_visualize.py", outfeat + "0dB/10_bgakzn.mfcc"])
if not args.just_test:
logging.info("Started generating features")
if args.inset == "test" or args.inset == "all" :
feat = testfeat
outfeat = testfeatnorm
nc = testnc
saved_means = "./test_means.json"
do_feature_work(feat, outfeat, nc, saved_means)
if args.inset == "train" or args.inset == "all" :
feat = trainfeat
outfeat = trainfeatnorm
nc = trainnc
saved_means = "./train_means.json"
do_feat |
silly-wacky-3-town-toon/SOURCE-COD | toontown/estate/GardenDropGame.py | Python | apache-2.0 | 20,215 | 0.002721 | from panda3d.core import *
from panda3d.direct import *
from toontown.toonbase.ToonBaseGlobal import *
from direct.gui.DirectGui import *
from panda3d.core import *
from panda3d.direct import *
from direct.gui.DirectScrolledList import *
from direct.distributed.ClockDelta import *
from toontown.toontowngui import TTDialog
import math
from direct.task.Task import Task
from toontown.toonbase import ToontownGlobals
from direct.distributed import DistributedObject
from direct.directnotify import DirectNotifyGlobal
from direct.fsm import ClassicFSM
from direct.fsm import State
from toontown.toon import Toon
from direct.showbase import RandomNumGen
from toontown.toonbase import TTLocalizer
import random
import random
import cPickle
from direct.showbase import PythonUtil
import GameSprite
from math import pi
import GardenProgressMeter
class GardenDropGame(DirectObject.DirectObject):
def __init__(self):
self.acceptErrorDialog = None
self.doneEvent = 'game Done'
self.sprites = []
self.load()
thing = self.model.find('**/item_board')
self.block = self.model1.find('**/minnieCircle')
self.colorRed = (1, 0, 0, 1)
self.colorBlue = (0, 0, 1, 1)
self.colorGreen = (0, 1, 0, 1)
self.colorGhostRed = (1, 0, 0, 0.5)
self.colorGhostBlue = (0, 0, 1, 0.5)
self.colorGhostGreen = (0, 1, 0, 0.5)
self.colorWhite = (1, 1, 1, 1)
self.colorBlack = (0, 0, 0, 1.0)
self.colorShadow = (0, 0, 0, 0.5)
self.lastTime = None
self.running = 0
self.massCount = 0
self.foundCount = 0
self.maxX = 0.47
self.minX = -0.47
self.maxZ = 0.65
self.minZ = -0.1
self.newBallX = 0.0
self.newBallZ = 0.6
self.rangeX = self.maxX - self.minX
self.rangeZ = self.maxZ - self.minZ
size = 0.085
sizeZ = size * 0.8
gX = int(self.rangeX / size)
gZ = int(self.rangeZ / sizeZ)
self.maxX = self.minX + gX * size
self.maxZ = self.minZ + gZ * sizeZ
self.controlOffsetX = 0.0
self.controlOffsetZ = 0.0
self.queExtent = 3
print 'Grid Dimensions X%s Z%s' % (gX, gZ)
self.grid = []
self.gridDimX = gX
self.gridDimZ = gZ
self.gridBrick = False
base.gardenGame = self
for countX in range(self.gridDimX):
newRow = []
for countZ in range(self.gridDimZ):
offset = 0
if countZ % 2 == 0:
offset = size / 2
newRow.append([None, countX * size + self.minX + offset, countZ * sizeZ + self.minZ])
self.grid.append(newRow)
self.controlSprite = None
self.cogSprite = self.addUnSprite(self.block, posX=0.25, posZ=0.5)
self.cogSprite.setColor(self.colorShadow)
for ball in range(0, 3):
place = random.random() * self.rangeX
newSprite = self.addSprite(self.block, size=0.5, posX=self.minX + place, posZ=0.0, found=1)
self.stickInGrid(newSprite, 1)
self.queBall = self.addSprite(self.block, posX=0.25, posZ=0.5, found=0)
self.queBall.setColor(self.colorWhite)
self.queBall.isQue = 1
self.matchList = []
self.newBallTime = 1.0
self.newBallCountUp = 0.0
self.cogX = 0
self.cogZ = 0
self.__run()
return
def findGrid(self, x, z, force = 0):
currentClosest = None
currentDist = 10000000
for countX in range(self.gridDimX):
for countZ in range(self.gridDimZ):
testDist = self.testPointDistanceSquare(x, z, self.grid[countX][countZ][1], self.grid[countX][countZ][2])
if self.grid[countX][countZ][0] == None and testDist < currentDist and (force or self.hasNeighbor(countX, countZ)):
currentClosest = self.grid[countX][countZ]
self.closestX = countX
self.closestZ = countZ
currentDist = testDist
return currentClosest
def hasNeighbor(self, cellX, cellZ):
gotNeighbor = 0
if cellZ % 2 == 0:
if self.testGridfull(self.getValidGrid(cellX - 1, cellZ)):
gotNeighbor = 1
elif self.testGridfull(self.getValidGrid(cellX + 1, cellZ)):
gotNeighbor = 1
elif self.testGridfull(self.getValidGrid(cellX, cellZ + 1)):
gotNeighbor = 1
elif self.testGridfull(self.getValidGrid(cellX + 1, cellZ + 1)):
gotNeighbor = 1
elif self.testGridfull(self.getValidGrid(cellX, cellZ - 1)):
gotNeighbor = 1
elif self.testGridfull(self.getValidGrid(cellX + 1, cellZ - 1)):
| gotNeighbor = 1
elif se | lf.testGridfull(self.getValidGrid(cellX - 1, cellZ)):
gotNeighbor = 1
elif self.testGridfull(self.getValidGrid(cellX + 1, cellZ)):
gotNeighbor = 1
elif self.testGridfull(self.getValidGrid(cellX, cellZ + 1)):
gotNeighbor = 1
elif self.testGridfull(self.getValidGrid(cellX - 1, cellZ + 1)):
gotNeighbor = 1
elif self.testGridfull(self.getValidGrid(cellX, cellZ - 1)):
gotNeighbor = 1
elif self.testGridfull(self.getValidGrid(cellX - 1, cellZ - 1)):
gotNeighbor = 1
return gotNeighbor
def clearMatchList(self):
for entry in self.matchList:
gridEntry = self.grid[entry[0]][entry[1]]
sprite = gridEntry[0]
gridEntry[0] = None
sprite.markedForDeath = 1
return
def createMatchList(self, x, z):
self.matchList = []
self.fillMatchList(x, z)
def fillMatchList(self, cellX, cellZ):
if (cellX, cellZ) in self.matchList:
return
self.matchList.append((cellX, cellZ))
colorType = self.grid[cellX][cellZ][0].colorType
if cellZ % 2 == 0:
if self.getColorType(cellX - 1, cellZ) == colorType:
self.fillMatchList(cellX - 1, cellZ)
if self.getColorType(cellX + 1, cellZ) == colorType:
self.fillMatchList(cellX + 1, cellZ)
if self.getColorType(cellX, cellZ + 1) == colorType:
self.fillMatchList(cellX, cellZ + 1)
if self.getColorType(cellX + 1, cellZ + 1) == colorType:
self.fillMatchList(cellX + 1, cellZ + 1)
if self.getColorType(cellX, cellZ - 1) == colorType:
self.fillMatchList(cellX, cellZ - 1)
if self.getColorType(cellX + 1, cellZ - 1) == colorType:
self.fillMatchList(cellX + 1, cellZ - 1)
else:
if self.getColorType(cellX - 1, cellZ) == colorType:
self.fillMatchList(cellX - 1, cellZ)
if self.getColorType(cellX + 1, cellZ) == colorType:
self.fillMatchList(cellX + 1, cellZ)
if self.getColorType(cellX, cellZ + 1) == colorType:
self.fillMatchList(cellX, cellZ + 1)
if self.getColorType(cellX - 1, cellZ + 1) == colorType:
self.fillMatchList(cellX - 1, cellZ + 1)
if self.getColorType(cellX, cellZ - 1) == colorType:
self.fillMatchList(cellX, cellZ - 1)
if self.getColorType(cellX - 1, cellZ - 1) == colorType:
self.fillMatchList(cellX - 1, cellZ - 1)
def testGridfull(self, cell):
if not cell:
return 0
elif cell[0] != None:
return 1
else:
return 0
return
def getValidGrid(self, x, z):
if x < 0 or x >= self.gridDimX:
return None
elif z < 0 or z >= self.gridDimZ:
return None
else:
return self.grid[x][z]
return None
def getColorType(self, x, z):
if x < 0 or x >= self.gridDimX:
return -1
elif z < 0 or z >= self.gridDimZ:
return -1
elif self.grid[x][z][0] == None:
return -1
else:
return self.grid[x][ |
honeynet/beeswarm | beeswarm/drones/honeypot/tests/test_smtp.py | Python | gpl-3.0 | 7,214 | 0.003327 | # Copyright (C) 2012 Aniket Panse <contact@aniketpanse.in
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# Aniket Panse <contact@aniketpanse.in> grants Johnny Vestergaard <jkv@unixcluster.dk>
# a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable
# copyright license to reproduce, prepare derivative works of, publicly
# display, publicly perform, sublicense, relicense, and distribute [the] Contributions
# and such derivative works.
import smtplib
import base64
import hmac
import os
import tempfile
import shutil
import gevent.monkey
from gevent.server import StreamServer
from beeswarm.drones.honeypot.honeypot import Honeypot
from beeswarm.drones.honeypot.capabilities import smtp
gevent.monkey.patch_all()
import unittest
class SmtpTests(unittest.TestCase):
def setUp(self):
self.work_dir = tempfile.mkdtemp()
Honeypot.prepare_environment(self.work_dir)
def tearDown(self):
if os.path.isdir(self.work_dir):
shutil.rmtree(self.work_dir)
def test_connection(self):
""" Tries to connect and run a EHLO command. Very basic test.
"""
# Use uncommon port so that we can run test even if the Honeypot is running.
options = {'enabled': 'True', 'port': 0, 'protocol_specific_data': {'banner': 'test'},
'users': {'test': 'test'}, }
cap = smtp.smtp(options, self.work_dir)
srv = StreamServer(('0.0.0.0', 0), cap.handle_session)
srv.start()
smtp_ = smtplib.SMTP('127.0.0.1', srv.server_port, local_hostname='localhost', timeout=15)
smtp_.ehlo()
smtp_.quit()
srv.stop()
def test_AUTH_CRAM_MD5_reject(self):
""" Makes sure the server rejects all invalid login attempts that use the
CRAM-MD5 Authentication method.
"""
options = {'enabled': 'True', 'port': 0, 'protocol_specific_data': {'banner': 'Test'},
'users': {'someguy': 'test'}}
cap = smtp.smtp(options, self.work_dir)
srv = StreamServer(('0.0.0.0', 0), cap.handle_session)
srv.start()
def encode_cram_md5(challenge, user, password):
challenge = base64.decodestring(challenge)
response = user + ' ' + hmac.HMAC(password, challenge).hexdigest()
return base64.b64encode(response)
smtp_ = smtplib.SMTP('127.0.0.1', srv.server_port, local_hostname='localhost', timeout=15)
_, resp = smtp_.docmd('AUTH', 'CRAM-MD5')
code, resp = smtp_.docmd(encode_cram_md5(resp, 'test', 'test'))
# For now, the server's going to return a 535 code.
self.assertEqual(code, 535)
srv.stop()
def test_AUTH_PLAIN_reject(self):
""" Makes sure the server rejects all invalid login attempts that use the PLAIN Authentication method.
"""
options = {'enabled': 'True', 'port': 0, 'protocol_specific_data': {'bann | er': 'Test'},
'users': {'someguy': 'test'}}
cap = smtp.smtp(options, self.work_dir)
srv = StreamServer(('0.0.0.0', | 0), cap.handle_session)
srv.start()
smtp_ = smtplib.SMTP('127.0.0.1', srv.server_port, local_hostname='localhost', timeout=15)
arg = '\0%s\0%s' % ('test', 'test')
code, resp = smtp_.docmd('AUTH', 'PLAIN ' + base64.b64encode(arg))
self.assertEqual(code, 535)
srv.stop()
def test_AUTH_LOGIN_reject(self):
""" Makes sure the server rejects all invalid login attempts that use the LOGIN Authentication method.
"""
options = {'enabled': 'True', 'port': 0, 'protocol_specific_data': {'banner': 'Test'},
'users': {'someguy': 'test'}}
cap = smtp.smtp(options, self.work_dir)
srv = StreamServer(('0.0.0.0', 0), cap.handle_session)
srv.start()
smtp_ = smtplib.SMTP('127.0.0.1', srv.server_port, local_hostname='localhost', timeout=15)
smtp_.docmd('AUTH', 'LOGIN')
smtp_.docmd(base64.b64encode('test'))
code, resp = smtp_.docmd(base64.b64encode('test'))
self.assertEqual(code, 535)
srv.stop()
def test_AUTH_CRAM_MD5(self):
""" Makes sure the server accepts valid login attempts that use the CRAM-MD5 Authentication method.
"""
options = {'enabled': 'True', 'port': 0, 'protocol_specific_data': {'banner': 'Test'},
'users': {'test': 'test'}}
cap = smtp.smtp(options, self.work_dir)
srv = StreamServer(('0.0.0.0', 0), cap.handle_session)
srv.start()
def encode_cram_md5(challenge, user, password):
challenge = base64.decodestring(challenge)
response = user + ' ' + hmac.HMAC(password, challenge).hexdigest()
return base64.b64encode(response)
smtp_ = smtplib.SMTP('127.0.0.1', srv.server_port, local_hostname='localhost', timeout=15)
_, resp = smtp_.docmd('AUTH', 'CRAM-MD5')
code, resp = smtp_.docmd(encode_cram_md5(resp, 'test', 'test'))
# For now, the server's going to return a 535 code.
self.assertEqual(code, 235)
srv.stop()
def test_AUTH_PLAIN(self):
""" Makes sure the server accepts valid login attempts that use the PLAIN Authentication method.
"""
options = {'enabled': 'True', 'port': 0, 'protocol_specific_data': {'banner': 'Test'},
'users': {'test': 'test'}}
cap = smtp.smtp(options, self.work_dir)
srv = StreamServer(('0.0.0.0', 0), cap.handle_session)
srv.start()
smtp_ = smtplib.SMTP('127.0.0.1', srv.server_port, local_hostname='localhost', timeout=15)
arg = '\0%s\0%s' % ('test', 'test')
code, resp = smtp_.docmd('AUTH', 'PLAIN ' + base64.b64encode(arg))
self.assertEqual(code, 235)
srv.stop()
def test_AUTH_LOGIN(self):
""" Makes sure the server accepts valid login attempts that use the LOGIN Authentication method.
"""
options = {'enabled': 'True', 'port': 0, 'protocol_specific_data': {'banner': 'Test'},
'users': {'test': 'test'}}
cap = smtp.smtp(options, self.work_dir)
srv = StreamServer(('0.0.0.0', 0), cap.handle_session)
srv.start()
smtp_client = smtplib.SMTP('127.0.0.1', srv.server_port, local_hostname='localhost', timeout=15)
smtp_client.docmd('AUTH', 'LOGIN')
smtp_client.docmd(base64.b64encode('test'))
code, resp = smtp_client.docmd(base64.b64encode('test'))
self.assertEqual(code, 235)
srv.stop()
if __name__ == '__main__':
unittest.main()
|
djanderson/equery | pym/gentoolkit/eshowkw/keywords_header.py | Python | gpl-2.0 | 3,555 | 0.029817 | # vim:fileencoding=utf-8
# Copyright 2001-2010 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
__all__ = ['keywords_header']
from portage import settings as ports
from portage.output import colorize
from display_pretty import colorize_string
from display_pretty import align_string
class keywords_header:
__IMPARCHS = [ 'arm', 'amd64', 'x86' ]
__ADDITIONAL_FIELDS = [ 'unused', 'slot' ]
__EXTRA_FIELDS = [ 'repo' ]
def __readKeywords(self):
"""Read all available keywords from portage."""
return [x for x in ports.archlist()
if not x.startswith('~')]
def __sortKeywords(self, keywords, prefix = False, required_keywords = []):
"""Sort keywords with short archs first"""
# user specified only some keywords to display
if len(required_keywords) != 0:
tmpkeywords = [k for k in keywords
if k in required_keywords]
# idiots might specify non-existant archs
if len(tmpkeywords) != 0:
keywords = tmpkeywords
normal = [k for k in keywords
if len(k.split('-')) == 1]
normal.sort()
if prefix:
longer = [k for k in keywords
if len(k.split('-')) != 1]
longer.sort()
normal.extend(longer)
return normal
def __readAdditionalFields(self):
"""Prepare list of aditional fileds displayed by eshowkw (2nd part)"""
return self.__ADDITIONAL_FIELDS
def __readExtraFields(self):
"""Prepare list of extra fileds displayed by eshowkw (3rd part)"""
return self.__EXTRA_FIELDS
def __formatKeywords(self, keywords, align, length):
"""Append colors and align keywords properly"""
tmp = []
for keyword in keywords:
tmp2 = keyword
keyword = align_string(keyword, align, length)
# % are used as separators for further split so we wont loose spaces and coloring
keyword = '%'.join(list(keyword))
if tmp2 in self.__IMPARCHS:
tmp.append(colorize_string('darkyellow', keyword))
else:
tmp.append(keyword)
return tmp
def __formatAdditional(self, additional, align, length):
"""Align additional items properly"""
# % are | used as separators for further split so we wont loose spaces and coloring
return ['%'.join(align_string(x, align, length)) for x in additional]
def __prepareExtra(self, extra, align, length):
content = []
content.append(''.ljust(length, '-'))
content.extend(self.__formatAdditional(extra, align, length))
return content
def _ | _prepareResult(self, keywords, additional, align, length):
"""Parse keywords and additional fields into one list with proper separators"""
content = []
content.append(''.ljust(length, '-'))
content.extend(self.__formatKeywords(keywords, align, length))
content.append(''.ljust(length, '-'))
content.extend(self.__formatAdditional(additional, align, length))
return content
def __init__(self, prefix = False, required_keywords = [], keywords_align = 'bottom'):
"""Initialize keywords header."""
additional = self.__readAdditionalFields()
extra = self.__readExtraFields()
self.keywords = self.__sortKeywords(self.__readKeywords(), prefix, required_keywords)
self.length = max(
max([len(x) for x in self.keywords]),
max([len(x) for x in additional]),
max([len(x) for x in extra])
)
#len(max([max(self.keywords, key=len), max(additional, key=len)], key=len))
self.keywords_count = len(self.keywords)
self.additional_count = len(additional)
self.extra_count = len(extra)
self.content = self.__prepareResult(self.keywords, additional, keywords_align, self.length)
self.extra = self.__prepareExtra(extra, keywords_align, self.length)
|
twitter/pants | src/python/pants/backend/python/tasks/setup_py.py | Python | apache-2.0 | 30,450 | 0.010213 | # coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import, division, print_function, unicode_literals
import inspect
import io
import itertools
import os
import shutil
import textwrap
from abc import abstractmethod
from builtins import bytes, map, object, str, zip
from collections import defaultdict
from pex.installer import Packager, WheelInstaller
from pex.interpreter import PythonInterpreter
from pex.pex import PEX
from pex.pex_builder import PEXBuilder
from pex.pex_info import PexInfo
from twitter.common.collections import OrderedSet
from twitter.common.dirutil.chroot import Chroot
from pants.backend.python.subsystems.pex_build_util import is_local_python_dist
from pants.backend.python.targets.python_binary import PythonBinary
from pants.backend.python.targets.python_requirement_library import PythonRequirementLibrary
from pants.backend.python.targets.python_target import PythonTarget
from pants.base.build_environment import get_buildroot
from pants.base.exceptions import TargetDefinitionException, TaskError
from pants.base.specs import SiblingAddresses
from pants.base.workunit import WorkUnitLabel
from pants.build_graph.address_lookup_error import AddressLookupError
from pants.build_graph.build_graph import sort_targets
from pants.build_graph.resources import Resources
from pants.task.task import Task
from pants.util.collections_abc_backport import Iterable, Mapping, MutableSequence, OrderedDict, Set
from pants.util.contextutil import temporary_file
from pants.util.dirutil import safe_concurrent_creation, safe_rmtree, safe_walk
from pants.util.memo import memoized_property
from pants.util.meta import AbstractClass
from pants.util.process_handler import subprocess
from pants.util.strutil import ensure_binary, ensure_text, safe_shlex_split
SETUP_BOILERPLATE = """
# DO NOT EDIT THIS FILE -- AUTOGENERATED BY PANTS
# Target: {setup_target}
from setuptools import setup
setup(**{setup_dict})
"""
# Distutils does not support unicode strings in setup.py, so we must explicitly convert to binary
# strings as pants uses unicode_literals. A natural and prior technique was to use `pprint.pformat`,
# but that embeds u's in the string itself during conversion. For that reason we roll out own
# literal pretty-printer here.
#
# For more information, see http://bugs.python.org/issue13943
def distutils_repr(obj):
output = io.StringIO()
linesep = os.linesep
def _write(data):
output.write(ensure_text(data))
def _write_repr(o, indent=False, level=0):
pad = ' ' * 4 * level
if indent:
_write(pad)
level += 1
if isinstance(o, (bytes, str)):
# The py2 repr of str (unicode) is `u'...'` and we don't want the `u` prefix; likewise,
# the py3 repr of bytes is `b'...'` and we don't want the `b` prefix so we hand-roll a
# repr here.
if linesep in o:
_write('"""{}"""'.format(ensure_text(o.replace('"""', r'\"\"\"'))))
else:
_write("'{}'".format(ensure_text(o.replace("'", r"\'"))))
elif isinstance(o, Mapping):
_write('{' + linesep)
for k, v in o.items():
_write_repr(k, indent=True, level=level)
_write(': ')
_write_repr(v, indent=False, level=level)
_write(',' + linesep)
_write(pad + '}')
elif isinstance(o, Iterable):
if isinstance(o, MutableSequence):
open_collection, close_collection = '[]'
elif isinstance(o, Set):
open_collection, close_collection = '{}'
else:
open_collection, close_collection = '()'
_write(open_collection + linesep)
for i in o:
_write_repr(i, indent=True, level=level)
_write(',' + linesep)
_write(pad + close_collection)
else:
_write(repr(o)) # Numbers and bools.
_write_repr(obj)
return output.getvalue()
class SetupPyRunner(WheelInstaller):
# We extend WheelInstaller to make sure `setuptools` and `wheel` are available to setup.py.
def __init__(self, source_dir, setup_command, **kw):
self._setup_command = setup_command
super(SetupPyRunner, self).__init__(source_dir, **kw)
def setup_command(self):
return self._setup_command
class TargetAncestorIterator(object):
"""Supports iteration of target ancestor lineages."""
def __init__(self, build_graph):
self._build_graph = build_graph
def iter_target_siblings_and_ancestors(self, target):
"""Produces an iterator over a target's siblings and ancestor lineage.
:returns: A target iterator yielding the target and its siblings and then it ancestors from
nearest to furthest removed.
"""
def iter_targets_in_spec_path(spec_path):
try:
siblings = SiblingAddresses(spec_path)
for address in self._build_graph.inject_specs_closure([siblings]):
yield self._build_graph.get_target(address)
except AddressLookupError:
# A spec path may not have any addresses registered under it and that's ok.
# For example:
# a:a
# a/b/c:c
#
# Here a/b contains no addresses.
pass
def iter_siblings_and_ancestors(spec_path):
for sibling in iter_targets_in_spec_path(spec_path):
yield sibling
parent_spec_path = os.path.dirname(spec_path)
if parent_spec_path != spec_path:
for parent in iter_siblings_and_ancestors(parent_spec_path):
yield parent
for target in iter_siblings_and_ancestors(target.address.spec_path):
yield target
# TODO(John Sirois): Get jvm and python publishing on the same page.
# Either python should require all nodes in an exported target closure be either exported or
# 3rdparty or else jvm publishing should use an ExportedTargetDependencyCalcul | ator to aggregate
# un-exported non-3rdparty interior nodes as needed. It seems like the latter is preferable since
# it can be used with a BUILD graph validator requiring completely exported subgraphs to enforce the
# former as a matter of local repo policy.
class ExportedTargetDependencyCalculator(AbstractClass):
"""Calculates the dependencies of exported targets.
When a target is exported many of its internal transitive library dependencies may be satisfied by
other int | ernal targets that are also exported and "own" these internal transitive library deps.
In other words, exported targets generally can have reduced dependency sets and an
`ExportedTargetDependencyCalculator` can calculate these reduced dependency sets.
To use an `ExportedTargetDependencyCalculator` a subclass must be created that implements two
predicates and a walk function for the class of targets in question. For example, a
`JvmDependencyCalculator` would need to be able to identify jvm third party dependency targets,
and local exportable jvm library targets. In addition it would need to define a walk function
that knew how to walk a jvm target's dependencies.
"""
class UnExportedError(TaskError):
"""Indicates a target is not exported."""
class NoOwnerError(TaskError):
"""Indicates an exportable target has no owning exported target."""
class AmbiguousOwnerError(TaskError):
"""Indicates an exportable target has more than one owning exported target."""
def __init__(self, build_graph):
self._ancestor_iterator = TargetAncestorIterator(build_graph)
@abstractmethod
def requires_export(self, target):
"""Identifies targets that need to be exported (are internal targets owning source code).
:param target: The target to identify.
:returns: `True` if the given `target` owns files that should be included in exported packages
when the target is a member of an exported target's dependency graph.
"""
@abstractmethod
def is_exported(self, target):
"""Identifies targets of interest that are exported from this project.
:param target: The target to identify.
:returns: `True` if the given `target` represents a top-level target exported from this project.
"""
@abstractmethod
def dependencies(self, target):
"""Returns an iterator |
jcu-eresearch/TDH-rich-data-capture | jcudc24provisioning/controllers/ca_schema_scripts.py | Python | bsd-3-clause | 11,701 | 0.005811 | """
alters the ColanderAlchemy generated schema to add additional display options including:
- Removing the top level title (ColanderAlchemy outputs a schema with a name at the top of every form).
- Removing items not on the specified page (based on attributes on schema nodes).
- Allowing form elements to be required.
- Grouping of nodes under a MappingSchema for display purposes.
- Prevent duplicate field names causing problems by adding parent schema names separated by :
- Removal of advanced/restricted fields based on a passed in parameter (this could be upgraded to integrate with the
Pyramid permissions system).
- There is also a fix_schema_field_name) method that reverts names to their original value.
"""
import ast
from collections import OrderedDict
from datetime import date
from beaker.cache import cache_region
import colander
from colanderalchemy.types import SQLAlchemyMapping
import deform
import os
import logging
from pyramid.security import has_permission
#from jcudc24provisioning.models.project import DatasetDataSource, SOSDataSource, PushDataSource, PullDataSource, FormDataSource
logger = logging.getLogger(__name__)
__author__ = 'Casey Bajema'
def fix_schema_field_name(field_name):
"""
Remove parent form element names that prepend the actual elements name to prevent duplicate form names.
:param field_name: Name that is used on the output HTML forms.
:return: model attribute name.
"""
return field_name.split(":")[-1]
def convert_schema(schema, restrict_admin=False, **kw):
"""
Convert the default ColanderAlchemy schema to include the required additional features, this may include:
- Removing the top level title (ColanderAlchemy outputs a schema with a name at the top of every form).
- Removing items not on the specified page (based on attributes on schema nodes).
- Allowing form elements to be required.
- Grouping of nodes under a MappingSchema for display purposes.
- Prevent duplicate field names causing problems by adding parent schema names separated by :
- Removal of advanced/restricted fields based on a passed in parameter (this could be upgraded to integrate with the
Pyramid permissions system).
- There is also a fix_schema_field_name) method that reverts names to their original value.
:param schema: original schema
:param restrict_admin: should fields marked as restricted (ca_requires_admin=True) be removed?
:param kw: additional paramaters
:return: modified schema with the desired alterations.
"""
schema.title = ''
# Remove elements not on the current page (or in other words, elements that have a non-matching page value set).
if kw.has_key('page'):
schema = _remove_nodes_not_on_page(schema, kw.pop('page'))
# Make fields required (ColanderAlchemy removes the ability to have required fields)
schema = _force_required(schema)
# fix_order(schema)
# Wrap elements between ca_group_start and ca_group_end attributes with a MappingSchema (display purposes)
schema = schema = _group_nodes(schema)
# Prepend the elements name with <parent element name>:
schema = _prevent_duplicate_fields(schema)
# Remove fields that are marked as ca_require_admin=True if restrict_admin is True
if restrict_admin:
schema = _remove_admin_fields(schema)
return schema
@cache_region('long_term')
def _remove_admin_fields(schema):
"""
Remove fields that are marked as ca_require_admin=True
:param schema: schema to remove elements from
:return: updated schema
"""
denied_nodes = []
for node in schema.children:
if hasattr(node, 'requires_admin') and node.requires_admin:
# if hasattr(node, 'requires_admin') and node.requires_admin and not has_permission("advanced_fields"):
# print "Removed node: " + str(node)
denied_nodes.append(node)
else:
if len(node.children) > 0:
_remove_admin_fields(node)
for node in denied_nodes:
schema.children.remove(node)
return schema
@cache_region('long_term')
def _prevent_duplicate_fields(schema):
"""
Prepend the elements name with <parent element name>:
:param schema: schema to update
:return: updated schema
"""
for node in sch | ema.children:
node.name = schema.name + ":" + node.name
if | isinstance(node.typ, colander.Sequence):
_prevent_duplicate_fields(node.children[0])
elif len(node.children) > 0:
node = _prevent_duplicate_fields(node)
return schema
@cache_region('long_term')
def _force_required(schema):
"""
Make fields required (ColanderAlchemy removes the ability to have required fields)
:param schema: schema to update
:return: updated schema
"""
for node in schema.children:
if len(node.children) > 0:
_force_required(node)
if hasattr(node, 'force_required') and node.force_required:
setattr(node, 'missing', colander._marker)
elif hasattr(node, 'force_required') and not node.force_required:
setattr(node, 'missing', node.default)
return schema
def _remove_nodes_not_on_page(schema, page):
"""
Remove elements not on the current page (or in other words, elements that have a non-matching page value set).
:param schema: schema to update
:param page: page value that is allowed, all elements with a non-matching page value are removed.
:return: updated schema
"""
children_to_remove = []
for child in schema.children:
if len(child.children) > 0:
_remove_nodes_not_on_page(child, page)
if hasattr(child, 'page') and child.page != page:
children_to_remove.append(child)
for child in children_to_remove:
schema.children.remove(child)
return schema
def _fix_sequence_schemas(sequence_node):
"""
Sequence schemas have some display problems that ca_child_... elements are used to fix.
Some other problems include displaying of labels when there is only 1 element (which looks odd).
:param sequence_node: sequence item to fix/update
:return: None
"""
# Set the childs widget if ca_child_widget has been set on the sequence (I can't see any other way to do it)
for attr in sequence_node.__dict__:
if attr[:6] == "child_":
setattr(sequence_node.children[0], attr[6:], sequence_node.__dict__[attr])
# if hasattr(child, "child_widget"):
# child.children[0].widget = child.child_widget
# If there is only 1 displayed child, hide the labels etc so that the item looks like a list
only_one_displayed = True
displayed_child = None
for sub_child in sequence_node.children[0].children:
if not isinstance(sub_child.widget, deform.widget.HiddenWidget):
if displayed_child:
only_one_displayed = False
continue
displayed_child = sub_child
if only_one_displayed and displayed_child:
if sequence_node.children[0].widget is None:
sequence_node.children[0].widget = deform.widget.MappingWidget(template="ca_sequence_mapping",
readonly_template="readonly/ca_sequence_mapping", item_template="ca_sequence_mapping_item",
readonly_item_template="readonly/ca_sequence_mapping_item")
else:
if sequence_node.children[0].widget.template == "mapping":
sequence_node.children[0].widget.template = "ca_sequence_mapping"
if sequence_node.children[0].widget.readonly_template == "readonly/mapping":
sequence_node.children[0].widget.readonly_template = "readonly/ca_sequence_mapping"
if sequence_node.children[0].widget.item_template == "mapping_item":
sequence_node.children[0].widget.item_template = "ca_sequence_mapping_item"
if sequence |
hunch/hunch-gift-app | django/conf/locale/lv/formats.py | Python | mit | 1,226 | 0.004898 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
DATE_FORMAT = r'Y. \g\a\d\a j. F'
TIME_FORMAT = 'H:i:s'
DATETIME_FORMAT = r'Y. \g\a\d\a j. F, H:i:s'
YEAR_MONTH_FORMAT = r'Y. \g. F'
MONTH_DAY_FORMAT = 'j. F'
SHORT_DATE_FORMAT = r'j.m.Y'
SHORT_DATETIME_FORMAT = 'j.m.Y H:i:s'
FIRST_DAY_OF_WEEK = 1 #Monday
DATE_ | INP | UT_FORMATS = (
'%Y-%m-%d', '%d.%m.%Y', '%d.%m.%y', # '2006-10-25', '25.10.2006', '25.10.06'
)
TIME_INPUT_FORMATS = (
'%H:%M:%S', # '14:30:59'
'%H:%M', # '14:30'
'%H.%M.%S', # '14.30.59'
'%H.%M', # '14.30'
)
DATETIME_INPUT_FORMATS = (
'%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'
'%Y-%m-%d %H:%M', # '2006-10-25 14:30'
'%d.%m.%Y %H:%M:%S', # '25.10.2006 14:30:59'
'%d.%m.%Y %H:%M', # '25.10.2006 14:30'
'%d.%m.%Y', # '25.10.2006'
'%d.%m.%y %H:%M:%S', # '25.10.06 14:30:59'
'%d.%m.%y %H:%M', # '25.10.06 14:30'
'%d.%m.%y %H.%M.%S', # '25.10.06 14.30.59'
'%d.%m.%y %H.%M', # '25.10.06 14.30'
'%d.%m.%y', # '25.10.06'
)
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = ' '
NUMBER_GROUPING = 3
|
tobegit3hub/cinder_docker | cinder/tests/unit/api/contrib/test_services.py | Python | apache-2.0 | 24,584 | 0 | # Copyright 2012 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
from iso8601 import iso8601
from oslo_utils import timeutils
import webob.exc
from cinder.api.contrib import services
from cinder.api import extensions
from cinder import context
from cinder import db
from cinder import exception
from cinder import policy |
from cinder import test
from cinder.tests.unit.api import fakes
fake_services_list = [
{'binary': 'cinder-scheduler',
'host': 'host1',
'availability_zone': 'cinder',
'id': 1,
'disabled': True,
'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 2),
'created_at' | : datetime.datetime(2012, 9, 18, 2, 46, 27),
'disabled_reason': 'test1',
'modified_at': ''},
{'binary': 'cinder-volume',
'host': 'host1',
'availability_zone': 'cinder',
'id': 2,
'disabled': True,
'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 5),
'created_at': datetime.datetime(2012, 9, 18, 2, 46, 27),
'disabled_reason': 'test2',
'modified_at': ''},
{'binary': 'cinder-scheduler',
'host': 'host2',
'availability_zone': 'cinder',
'id': 3,
'disabled': False,
'updated_at': datetime.datetime(2012, 9, 19, 6, 55, 34),
'created_at': datetime.datetime(2012, 9, 18, 2, 46, 28),
'disabled_reason': '',
'modified_at': ''},
{'binary': 'cinder-volume',
'host': 'host2',
'availability_zone': 'cinder',
'id': 4,
'disabled': True,
'updated_at': datetime.datetime(2012, 9, 18, 8, 3, 38),
'created_at': datetime.datetime(2012, 9, 18, 2, 46, 28),
'disabled_reason': 'test4',
'modified_at': ''},
{'binary': 'cinder-volume',
'host': 'host2',
'availability_zone': 'cinder',
'id': 5,
'disabled': True,
'updated_at': datetime.datetime(2012, 9, 18, 8, 3, 38),
'created_at': datetime.datetime(2012, 9, 18, 2, 46, 28),
'disabled_reason': 'test5',
'modified_at': datetime.datetime(2012, 10, 29, 13, 42, 5)},
{'binary': 'cinder-volume',
'host': 'host2',
'availability_zone': 'cinder',
'id': 6,
'disabled': False,
'updated_at': datetime.datetime(2012, 9, 18, 8, 3, 38),
'created_at': datetime.datetime(2012, 9, 18, 2, 46, 28),
'disabled_reason': '',
'modified_at': datetime.datetime(2012, 9, 18, 8, 1, 38)},
{'binary': 'cinder-scheduler',
'host': 'host2',
'availability_zone': 'cinder',
'id': 6,
'disabled': False,
'updated_at': None,
'created_at': datetime.datetime(2012, 9, 18, 2, 46, 28),
'disabled_reason': '',
'modified_at': None},
]
class FakeRequest(object):
environ = {"cinder.context": context.get_admin_context()}
GET = {}
# NOTE(uni): deprecating service request key, binary takes precedence
# Still keeping service key here for API compatibility sake.
class FakeRequestWithService(object):
environ = {"cinder.context": context.get_admin_context()}
GET = {"service": "cinder-volume"}
class FakeRequestWithBinary(object):
environ = {"cinder.context": context.get_admin_context()}
GET = {"binary": "cinder-volume"}
class FakeRequestWithHost(object):
environ = {"cinder.context": context.get_admin_context()}
GET = {"host": "host1"}
# NOTE(uni): deprecating service request key, binary takes precedence
# Still keeping service key here for API compatibility sake.
class FakeRequestWithHostService(object):
environ = {"cinder.context": context.get_admin_context()}
GET = {"host": "host1", "service": "cinder-volume"}
class FakeRequestWithHostBinary(object):
environ = {"cinder.context": context.get_admin_context()}
GET = {"host": "host1", "binary": "cinder-volume"}
def fake_service_get_all(context, filters=None):
return fake_services_list
def fake_service_get_by_host_binary(context, host, binary):
for service in fake_services_list:
if service['host'] == host and service['binary'] == binary:
return service
return None
def fake_service_get_by_id(value):
for service in fake_services_list:
if service['id'] == value:
return service
return None
def fake_service_update(context, service_id, values):
service = fake_service_get_by_id(service_id)
if service is None:
raise exception.ServiceNotFound(service_id=service_id)
else:
{'host': 'host1', 'service': 'cinder-volume',
'disabled': values['disabled']}
def fake_policy_enforce(context, action, target):
pass
def fake_utcnow(with_timezone=False):
tzinfo = iso8601.Utc() if with_timezone else None
return datetime.datetime(2012, 10, 29, 13, 42, 11, tzinfo=tzinfo)
class ServicesTest(test.TestCase):
def setUp(self):
super(ServicesTest, self).setUp()
self.stubs.Set(db, "service_get_all", fake_service_get_all)
self.stubs.Set(timeutils, "utcnow", fake_utcnow)
self.stubs.Set(db, "service_get_by_args",
fake_service_get_by_host_binary)
self.stubs.Set(db, "service_update", fake_service_update)
self.stubs.Set(policy, "enforce", fake_policy_enforce)
self.context = context.get_admin_context()
self.ext_mgr = extensions.ExtensionManager()
self.ext_mgr.extensions = {}
self.controller = services.ServiceController(self.ext_mgr)
def test_services_list(self):
req = FakeRequest()
res_dict = self.controller.index(req)
response = {'services': [{'binary': 'cinder-scheduler',
'host': 'host1', 'zone': 'cinder',
'status': 'disabled', 'state': 'up',
'updated_at': datetime.datetime(
2012, 10, 29, 13, 42, 2)},
{'binary': 'cinder-volume',
'host': 'host1', 'zone': 'cinder',
'status': 'disabled', 'state': 'up',
'updated_at': datetime.datetime(
2012, 10, 29, 13, 42, 5)},
{'binary': 'cinder-scheduler',
'host': 'host2',
'zone': 'cinder',
'status': 'enabled', 'state': 'down',
'updated_at': datetime.datetime(
2012, 9, 19, 6, 55, 34)},
{'binary': 'cinder-volume',
'host': 'host2',
'zone': 'cinder',
'status': 'disabled', 'state': 'down',
'updated_at': datetime.datetime(
2012, 9, 18, 8, 3, 38)},
{'binary': 'cinder-volume',
'host': 'host2',
'zone': 'cinder',
'status': 'disabled', 'state': 'down',
'updated_at': datetime.datetime(
2012, 10, 29, 13, 42, 5)},
{'binary': 'cinder-volume',
'host': 'host2',
'zone': 'cinder',
'status': 'enabled', 'state': 'down',
'updated_at': datetime.datetime(
2012, 9, 18, 8, 3, 38)},
|
plotly/plotly.py | packages/python/plotly/plotly/validators/layout/mapbox/layer/line/_dash.py | Python | mit | 421 | 0.002375 | import _plotly_utils.basevalidators
class DashValidator(_plotly_utils.basevalidators.DataArrayValidator):
def __init__(
self, plotly_name="dash", parent_name="layout.mapbox.layer.line" | , **kwargs
):
super(DashValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "plot" | ),
**kwargs
)
|
elbeardmorez/quodlibet | quodlibet/quodlibet/ext/events/inhibit.py | Python | gpl-2.0 | 2,526 | 0 | # -*- coding: utf-8 -*-
# Copyright 2011 Christoph Reiter <reiter.christoph@gmail.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
import os
import sys
if os.name == "nt" or sys.platform == "darwin":
from quodlibet.plugins import PluginNotSupportedError
raise PluginNotSupportedError
import dbus
from quodlibet import _
from quodlibet import app
from quodlibet.qltk import Icons
from quodlibet.plugins.events import EventPlugin
def get_toplevel_xid():
if app.window.get_window():
try:
return app.window.get_window().get_xid()
except AttributeError: # non x11
pass
return 0
class InhibitFlags(object):
LOGOUT = 1
USERSWITCH = 1 << 1
SUSPEND = 1 << 2
IDLE = 1 << 3
class SessionInhibit(EventPlugin):
PLUGIN_ID = "screensaver_inhibit"
PLUGIN_NAME = _("Inhibit Screensaver")
PLUGIN_DESC = _("Prevents the GNOME screensaver from activating while"
" a song is playing.")
PLUGIN_ICON = Icons.PREFERENCES_DESKTOP_SCREENSAVER
DBUS_NAME = "org.gnome.SessionManager"
DBUS_INTERFACE = "org.gnome.SessionManager"
DBUS_PATH = "/org/gnome/SessionManager"
APPLICATION_ID = "quodlibet"
INHIBIT_REASON = _("Music is playing")
__cookie = None
def enabled(self):
if not app.player.paused:
self.plugin_on_unpaused()
def disabled(self):
if not app.player.paused:
self.plugin_on_paused()
def plugin_on_unpaused(self):
xid = dbus.UInt32(get_tople | vel_xid())
flags = dbus.UInt32(InhibitFlags.IDLE)
try:
bus = dbus.SessionBus()
obj = bus.get_object(self.DBUS_NAME, self.DBUS_PATH)
iface = dbus.Interface(obj, self.DBUS_INTERFACE)
self.__cookie = iface.Inhibit(
self.APPLICATION_ID, xid, self.INHIBIT_REASON, | flags)
except dbus.DBusException:
pass
def plugin_on_paused(self):
if self.__cookie is None:
return
try:
bus = dbus.SessionBus()
obj = bus.get_object(self.DBUS_NAME, self.DBUS_PATH)
iface = dbus.Interface(obj, self.DBUS_INTERFACE)
iface.Uninhibit(self.__cookie)
self.__cookie = None
except dbus.DBusException:
pass
|
KennethNielsen/SoCo | soco/cache.py | Python | mit | 7,367 | 0 | # -*- coding: utf-8 -*-
# pylint: disable=not-context-manager,useless-object-inheritance
# NOTE: The pylint not-content-manager warning is disabled pending the fix of
# a bug in pylint https://github.com/PyCQA/pylint/issues/782
# NOTE: useless-object-inheritance needed for Python 2.x compatability
"""This module contains the classes underlying SoCo's caching system."""
from __future__ import unicode_literals
import threading
from time import time
from . import config
from .compat import dumps
class _BaseCache(object):
"""An abstract base class for the cache."""
# pylint: disable=no-self-use, unused-argument
def __init__(self, *args, **kwargs):
super().__init__()
self._cache = {}
#: `bool`: whether the cache is enabled
self.enabled = True
def put(self, item, *args, **kwargs):
"""Put an item into the cache."""
raise NotImplementedError
def get(self, *args, **kwargs):
"""Get an item from the cache."""
raise NotImplementedError
def delete(self, *args, **kwargs):
"""Delete an item from the cache."""
raise NotImplementedError
def clear(self):
"""Empty the whole cache."""
raise NotImplementedError
class NullCache(_BaseCache):
"""A cache which does nothing.
Useful for debugging.
"""
def put(self, item, *args, **kwargs):
"""Put an item into the cache."""
def get(self, *args, **kwargs):
"""Get an item from the cache."""
return None
def delete(self, *args, **kwargs):
"""Delete an item from the cache."""
def clear(self):
"""Empty the whole cache."""
class TimedCache(_BaseCache):
"""A simple thread-safe cache for caching method return values.
The cache key is generated by from the given ``*args`` and ``**kwargs``.
Items are expired from the cache after a given period of time.
Example:
>>> from time import sleep
>>> cache = TimedCache()
>>> cache.put("item", 'some', kw='args', timeout=3)
>>> # Fetch the item again, by providing the same args and kwargs.
>>> assert cache.get('some', kw='args') == "item"
>>> # Providing different args or kwargs will not return the item.
>>> assert not cache.get('some', 'otherargs') == "item"
>>> # Waiting for less than the provided timeout does not cause the
>>> # item to expire.
>>> sleep(2)
>>> assert cache.get('some', kw='args') == "item"
>>> # But waiting for longer does.
>>> sleep(2)
>>> assert not cache.get('some', kw='args') == "item"
Warning:
At present, the cache can theoretically grow and grow, since entries
are not automatically purged, though in practice this is unlikely
since there are not that many different combinations of arguments in
the places where it is used in SoCo, so not that many different
cache entries will be created. If this becomes a problem,
use a thread and timer to purge the cache, or rewrite this to use
LRU logic!
"""
def __init__(self, default_timeout=0):
"""
Args:
default_timeout (int): The default number of seconds after
which items will be expired.
"""
super().__init__()
#: `int`: The default caching expiry interval in seconds.
self.default_timeout = default_timeout
# A thread lock for the cache
self._cache_lock = threading.Lock()
def get(self, *args, **kwargs):
"""Get an item from the cache for this combination of args and kwargs.
Args:
*args: any arguments.
**kwargs: any keyword arguments.
Returns:
object: The object which has been found in the cache, or `None` if
no unexpired item is found. This means that there is no point
storing an item in the cache if it is `None`.
"""
if not self.enabled:
return None
# Look in the cache to see if there is an unexpired item. If there is
# we can just return the cached result.
cache_key = self.make_key(args, kwargs)
# Lock and load
with self._cache_lock:
if cache_key in self._cache:
expirytime, item = self._cache[cache_key]
if expirytime >= time():
return item
else:
# An expired item is present - delete it
del self._cache[cache_key]
# Nothing found
return None
def put(self, item, *args, **kwargs):
"""Put an item into the cache, for this combination of args and kwargs.
Args:
*args: any arguments.
**kwargs: any keyword arguments. If ``timeout`` is specified as one
of the keyword arguments, the item will remain available
for retrieval for ``timeout`` seconds. If ``timeout`` is
`None` or not specified, the ``default_timeout`` for this
cache will be used. Specify a ``timeout`` of 0 (or ensure that
the ``default_timeout`` for this cache is 0) if this item is
not to be cached.
"""
if not self.enabled:
return
# Check for a timeout keyword, store and remove it.
timeout = kwargs.pop("timeout", None)
if timeout is None:
timeout = self.default_timeout
cache_key = self.make_key(args, kwargs)
# Store the item, along with the time at which it will expire
with self._cache_lock:
self._cache[cache_key] = (time() + timeout, item)
def delete(self, *args, **kwargs):
"""Delete an item from the cache for this combination of args and
kwargs."""
cache_key = self.make_key(args, kwargs)
with self._cache_lock:
try:
del self._cache[cache_key]
except KeyError:
pass
def clear(self):
"""Empty the whole cache."""
with self._cache_lock:
self._cache.clear()
@staticmethod
def make_key(*args, **kwargs):
"""Generate a unique, hashable, representation of the args and kwargs.
Args:
*args: any arguments.
**kwargs: any keyword arguments.
Returns:
str: the key.
"""
# This is not entirely straightforward, since args and kwargs may
# contain mutable items and unicode. Possibilities include using
# __repr__, frozensets, and code from Py3's LRU cache. But pickle
# works, and although it is not as fast as some methods, it is good
# enough at the moment
cache_key = dumps((args, kwargs))
return cache_key
class Cache(NullCache):
"""A factory class which returns an instance of a cache subclass.
A `TimedCache` is returned, unless `config.CACHE_ENABLED` is `False`,
in which case a `NullCache` will be returned.
"""
def __new__(cls, *args, **kwargs):
if config.CACHE_ENABLED:
| new_cls = TimedCache
else:
new_cls = NullCache
instance = super(Cache | , cls).__new__(new_cls)
instance.__init__(*args, **kwargs)
return instance
|
metabrainz/picard | picard/ui/filebrowser.py | Python | gpl-2.0 | 9,720 | 0.001955 | # -*- coding: utf-8 -*-
#
# Picard, the next-generation MusicBrainz tagger
#
# Copyright (C) 2006-2008 Lukáš Lalinský
# Copyright (C) 2008 Hendrik van Antwerpen
# Copyright (C) 2008-2009, 2019-2022 Philipp Wolfer
# Copyright (C) 2011 Andrew Barnert
# Copyright (C) 2012-2013 Michael Wiencek
# Copyright (C) 2013 Wieland Hoffmann
# Copyright (C) 2013, 2017 Sophist-UK
# Copyright (C) 2013, 2018-2021 Laurent Monin
# Copyright (C) 2015 Jeroen Kromwijk
# Copyright (C) 2016-2017 Sambhav Kothari
# Copyright (C) 2018 Vishal Choudhary
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import os
from PyQt5 import (
QtCore,
QtWidgets,
)
from PyQt5.QtCore import QStandardPaths
from picard import log
from picard.config import (
BoolOption,
TextOption,
get_config,
)
from picard.const.sys import IS_MACOS
from picard.formats import supported_formats
from picard.util import find_existing_path
def _macos_find_root_volume():
try:
for entry in os.scandir('/Volumes/'):
if entry.is_symlink() and os.path.realpath(entry.path) == '/':
return entry.path
except OSError:
log.warning('Could not detect macOS boot volume', exc_info=True)
return None
def _macos_extend_root_volume_path(path):
if not path.startswith('/Volumes/'):
root_volume = _macos_find_root_volume()
if root_volume:
if path.startswith('/'):
path = path[1:]
path = os.path.join(root_volume, path)
return path
_default_current_browser_path = QStandardPaths.writableLocation(QStandardPaths.StandardLocation.HomeLocation)
if IS_MACOS:
_default_current_browser_path = _macos_extend_root_volume_path(_default_current_browser_path)
class FileBrowser(QtWidgets.QTreeView):
options = [
TextOption("persist", "current_browser_path", _default_current_browser_path),
BoolOption("persist", "show_hidden_files", False),
]
def __init__(self, parent):
super().__init__(parent)
self.setSelectionMode(QtWidgets.QAbstractItemView.SelectionMode.ExtendedSelection)
self.setDragEnabled(True)
self.load_selected_files_action = QtWidgets.QAction(_("&Load selected files"), self)
self.load_selected_files_action.triggered.connect(self.load_selected_files)
self.addAction(self.load_selected_files_action)
self.move_files_here_action = QtWidgets.QAction(_("&Move tagged files here"), self)
self.move_files_here_action.triggered.connect(self.move_files_here)
self.addAction(self.move_files_here_action)
self.toggle_hidden_action = QtWidgets.QAction(_("Show &hidden files"), self)
self.toggle_hidden_action.setCheckable(True)
config = get_config()
self.toggle_hidden_action.setChecked(config.persist["show_hidden_files"])
self.toggle_hidden_action.toggled.connect(self.show_hidden)
self.addAction(self.toggle_hidden_action)
self.set_as_starting_directory_action = QtWidgets.QAction(_("&Set as starting directory"), self)
self.set_as_starting_directory_action.triggered.connect(self.set_as_starting_directory)
self.addAction(self.set_as_starting_directory_action)
self.doubleClicked.connect(self.load_file_for_item)
self.focused = False
def showEvent(self, event):
if not self.model():
self._set_model()
def contextMenuEvent(self, event):
menu = QtWidgets.QMenu(self)
menu.addAction(self.load_selected_files_action)
menu.addSeparator()
menu.addAction(self.move_files_here_action)
menu.addAction(self.toggle_hidden_action)
menu.addAction(self.set_as_starting_directory_action)
menu.exec_(event.globalPos())
event.accept()
def _set_model(self):
model = QtWidgets.QFileSystemModel()
self.setModel(model)
model.layoutChanged.connect(self._layout_changed)
model.setRootPath("")
self._set_model_filter()
filters = []
for exts, name in supported_formats():
filters.extend("*" + e for e in exts)
model.setNameFilters(filters)
# Hide unsupported files completely
model.setNameFilterDisables(False)
model.sort(0, QtCore.Qt.SortOrder.AscendingOrder)
if IS_MACOS:
self.setRootIndex(model.index("/Volumes"))
header = self.header()
header.hideSection(1)
header.hideSection(2)
header.hideSection(3)
header.setSectionResizeMode(QtWidgets.QHeaderView.ResizeMode.ResizeToContents)
header.setStretchLastSection(False)
header.setVisible(False)
def _set_model_filter(self):
config = get_config()
model_filter = QtCore.QDir.Filter.AllDirs | QtCore.QDir.Filter.Files | QtCore.QDir.Filter.Drives | QtCore.QDir.Filter.NoDotAndDotDot
if config.persist["show_hidden_files"]:
model_filter |= QtCore.QDir.Filter.Hidden
self.model().setFilter(model_filter)
def _layout_changed(self):
def scroll():
# XXX The currentIndex seems to change while QFileSystemModel is
# populating itself (so setCurrentIndex in __init__ won't last).
# The time it takes to load varies and there are no signals to find
# out when it's done. As a workaround, keep restoring the state as
# long as the layout is updating, and the user hasn't focused yet.
if not self.focused:
self._restore_state()
self.scrollTo(self.currentIndex())
QtCore.QTimer.singleShot(0, scroll)
def scrollTo(self, index, scrolltype=QtWidgets.QAbstractItemView.ScrollHint.EnsureVisible):
# QTreeView.scrollTo resets the horizontal scroll position to 0.
# Reimplemented to instead scroll to horizontal parent position or keep previous position.
config = get_config()
if index and config.setting['filebrowser_horizontal_autoscroll']:
level = -1
parent = index.parent()
root = self.rootIndex()
while parent.isValid() and parent != root:
parent = parent.parent()
level += 1
pos_x = max(self.indentation() * level, 0)
else:
pos_x = self.horizontalScrollBar().value()
super().scrollTo(index, scrolltype)
self.horizontalScrollBar().setValue(pos_x)
def mousePressEvent(self, event):
super().mousePressEvent(event)
| index = self.indexAt(event.pos())
if index.isValid():
self.selectionModel().setCurrentIndex(index, QtCore.QItemSelectionModel.SelectionFlag.NoUpdate)
def focusInEvent(self, event):
self.focused = True
super().focusInEvent(event)
def | show_hidden(self, state):
config = get_config()
config.persist["show_hidden_files"] = state
self._set_model_filter()
def save_state(self):
indexes = self.selectedIndexes()
if indexes:
path = self.model().filePath(indexes[0])
config = get_config()
config.persist["current_browser_path"] = os.path.normpath(path)
def restore_state(self):
pass
def _restore_state(self):
config = get_config()
if config.setting["starting_directory"]:
path = config.setting["starting_directory_path"]
scrolltype = QtWidgets.QAbstractItemView.ScrollHint.PositionAtTop
else:
path = co |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.